diff --git a/SabreTools.Library/Data/Enums.cs b/SabreTools.Library/Data/Enums.cs
index 66d6e29c..da489b8e 100644
--- a/SabreTools.Library/Data/Enums.cs
+++ b/SabreTools.Library/Data/Enums.cs
@@ -225,50 +225,6 @@
kDummy = 0x19,
}
- ///
- /// Zip open type
- ///
- /// https://raw.githubusercontent.com/gjefferyes/RomVault/5a93500001f0d068f32cf77a048950717507f733/ROMVault2/SupportedFiles/ZipEnums.cs
- public enum ZipOpenType
- {
- Closed,
- OpenRead,
- OpenWrite
- }
-
- ///
- /// Zip testing type
- ///
- /// https://raw.githubusercontent.com/gjefferyes/RomVault/5a93500001f0d068f32cf77a048950717507f733/ROMVault2/SupportedFiles/ZipEnums.cs
- public enum ZipReturn
- {
- ZipGood,
- ZipFileLocked,
- ZipFileCountError,
- ZipSignatureError,
- ZipExtraDataOnEndOfZip,
- ZipUnsupportedCompression,
- ZipLocalFileHeaderError,
- ZipCentralDirError,
- ZipEndOfCentralDirectoryError,
- Zip64EndOfCentralDirError,
- Zip64EndOfCentralDirectoryLocatorError,
- ZipReadingFromOutputFile,
- ZipWritingToInputFile,
- ZipErrorGettingDataStream,
- ZipCRCDecodeError,
- ZipDecodeError,
- ZipFileNameToLong,
- ZipFileAlreadyOpen,
- ZipCannotFastOpen,
- ZipErrorOpeningFile,
- ZipErrorFileNotFound,
- ZipErrorReadingFile,
- ZipErrorTimeStamp,
- ZipErrorRollBackFile,
- ZipUntested
- }
-
#endregion
#region DatFile related
diff --git a/SabreTools.Library/External/Compress/File/File.cs b/SabreTools.Library/External/Compress/File/File.cs
new file mode 100644
index 00000000..6cd580af
--- /dev/null
+++ b/SabreTools.Library/External/Compress/File/File.cs
@@ -0,0 +1,214 @@
+using System;
+using System.IO;
+using Compress.Utils;
+using Path = RVIO.Path;
+using FileInfo = RVIO.FileInfo;
+using FileStream = RVIO.FileStream;
+
+namespace Compress.File
+{
+ public class File : ICompress
+ {
+ private FileInfo _fileInfo;
+ private Stream _inStream;
+ private byte[] _crc;
+
+ public string ZipFilename => _fileInfo?.FullName ?? "";
+
+ public long TimeStamp => _fileInfo?.LastWriteTime ?? 0;
+
+ public ZipOpenType ZipOpen { get; private set; }
+
+
+ public ZipStatus ZipStatus { get; private set; }
+
+ public int LocalFilesCount()
+ {
+ return 1;
+ }
+
+ public string Filename(int i)
+ {
+ return Path.GetFileName(ZipFilename);
+ }
+
+ public bool IsDirectory(int i)
+ {
+ return RVIO.Directory.Exists(ZipFilename);
+ }
+
+ public ulong UncompressedSize(int i)
+ {
+ return _fileInfo != null ? (ulong)_fileInfo.Length : 0;
+ }
+
+ public ulong? LocalHeader(int i)
+ {
+ return 0;
+ }
+
+ public ZipReturn FileStatus(int i)
+ {
+ return ZipReturn.ZipGood;
+ }
+
+ public byte[] CRC32(int i)
+ {
+ return _crc;
+ }
+
+ public ZipReturn ZipFileCreate(string newFilename)
+ {
+ if (ZipOpen != ZipOpenType.Closed)
+ {
+ return ZipReturn.ZipFileAlreadyOpen;
+ }
+
+ DirUtil.CreateDirForFile(newFilename);
+ _fileInfo = new FileInfo(newFilename);
+
+ int errorCode = FileStream.OpenFileWrite(newFilename, out _inStream);
+ if (errorCode != 0)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+ ZipOpen = ZipOpenType.OpenWrite;
+ return ZipReturn.ZipGood;
+ }
+
+
+ public void ZipFileClose()
+ {
+ if (ZipOpen == ZipOpenType.Closed)
+ {
+ return;
+ }
+
+ if (ZipOpen == ZipOpenType.OpenRead)
+ {
+ if (_inStream != null)
+ {
+ _inStream.Close();
+ _inStream.Dispose();
+ }
+ ZipOpen = ZipOpenType.Closed;
+ return;
+ }
+
+ _inStream.Flush();
+ _inStream.Close();
+ _inStream.Dispose();
+ _fileInfo = new FileInfo(_fileInfo.FullName);
+ ZipOpen = ZipOpenType.Closed;
+ }
+
+
+ public ZipReturn ZipFileOpen(string newFilename, long timestamp, bool readHeaders)
+ {
+ ZipFileClose();
+ ZipStatus = ZipStatus.None;
+ _fileInfo = null;
+
+ try
+ {
+ if (!RVIO.File.Exists(newFilename))
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorFileNotFound;
+ }
+ _fileInfo = new FileInfo(newFilename);
+ if (timestamp != -1 && _fileInfo.LastWriteTime != timestamp)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorTimeStamp;
+ }
+ int errorCode = FileStream.OpenFileRead(newFilename, out _inStream);
+ if (errorCode != 0)
+ {
+ ZipFileClose();
+ if (errorCode == 32)
+ {
+ return ZipReturn.ZipFileLocked;
+ }
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+ }
+ catch (PathTooLongException)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipFileNameToLong;
+ }
+ catch (IOException)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+ ZipOpen = ZipOpenType.OpenRead;
+
+ if (!readHeaders)
+ {
+ return ZipReturn.ZipGood;
+ }
+
+
+ //return ZipFileReadHeaders();
+ return ZipReturn.ZipGood;
+ }
+
+
+ public ZipReturn ZipFileOpen(Stream inStream)
+ {
+ ZipFileClose();
+ ZipStatus = ZipStatus.None;
+ _fileInfo = null;
+ _inStream = inStream;
+ ZipOpen = ZipOpenType.OpenRead;
+
+ //return ZipFileReadHeaders();
+ return ZipReturn.ZipGood;
+ }
+
+
+
+ public void ZipFileAddDirectory()
+ {
+ throw new NotImplementedException();
+ }
+
+ public ZipReturn ZipFileCloseWriteStream(byte[] crc32)
+ {
+ _crc = crc32;
+ return ZipReturn.ZipGood;
+ }
+
+ public void ZipFileCloseFailed()
+ {
+ throw new NotImplementedException();
+ }
+
+ public ZipReturn ZipFileOpenReadStream(int index, out Stream stream, out ulong streamSize)
+ {
+ _inStream.Position = 0;
+ stream = _inStream;
+ streamSize = (ulong)_fileInfo.Length;
+ return ZipReturn.ZipGood;
+ }
+
+ public ZipReturn ZipFileOpenWriteStream(bool raw, bool trrntzip, string filename, ulong uncompressedSize, ushort compressionMethod, out Stream stream)
+ {
+ _inStream.Position = 0;
+ stream = _inStream;
+ return ZipReturn.ZipGood;
+ }
+
+ public ZipReturn ZipFileCloseReadStream()
+ {
+ return ZipReturn.ZipGood;
+ }
+
+
+
+
+ }
+}
diff --git a/SabreTools.Library/External/Compress/ICompress.cs b/SabreTools.Library/External/Compress/ICompress.cs
new file mode 100644
index 00000000..5e5c147c
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ICompress.cs
@@ -0,0 +1,40 @@
+using System.IO;
+
+namespace Compress
+{
+ public interface ICompress
+ {
+ int LocalFilesCount();
+
+ string Filename(int i);
+ ulong? LocalHeader(int i);
+ ulong UncompressedSize(int i);
+ byte[] CRC32(int i);
+
+ bool IsDirectory(int i);
+
+ ZipOpenType ZipOpen { get; }
+
+ ZipReturn ZipFileOpen(string newFilename, long timestamp =-1, bool readHeaders=true);
+
+ ZipReturn ZipFileOpen(Stream inStream);
+ void ZipFileClose();
+
+ ZipReturn ZipFileOpenReadStream(int index, out Stream stream, out ulong streamSize);
+ ZipReturn ZipFileOpenWriteStream(bool raw, bool trrntzip, string filename, ulong uncompressedSize, ushort compressionMethod, out Stream stream);
+ ZipReturn ZipFileCloseReadStream();
+
+
+ ZipStatus ZipStatus { get; }
+
+ string ZipFilename { get; }
+ long TimeStamp { get; }
+
+ void ZipFileAddDirectory();
+
+ ZipReturn ZipFileCreate(string newFilename);
+ ZipReturn ZipFileCloseWriteStream(byte[] crc32);
+ void ZipFileCloseFailed();
+
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Common/ICoder.cs b/SabreTools.Library/External/Compress/SevenZip/Common/ICoder.cs
new file mode 100644
index 00000000..2677e52f
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Common/ICoder.cs
@@ -0,0 +1,152 @@
+using System;
+
+namespace Compress.SevenZip.Common
+{
+ ///
+ /// The exception that is thrown when an error in input stream occurs during decoding.
+ ///
+ public class DataErrorException : Exception
+ {
+ public DataErrorException() : base("Data Error") { }
+ }
+
+ ///
+ /// The exception that is thrown when the value of an argument is outside the allowable range.
+ ///
+ internal class InvalidParamException : Exception
+ {
+ public InvalidParamException() : base("Invalid Parameter") { }
+ }
+
+ public interface ICodeProgress
+ {
+ ///
+ /// Callback progress.
+ ///
+ ///
+ /// input size. -1 if unknown.
+ ///
+ ///
+ /// output size. -1 if unknown.
+ ///
+ void SetProgress(Int64 inSize, Int64 outSize);
+ };
+
+ internal interface ICoder
+ {
+ ///
+ /// Codes streams.
+ ///
+ ///
+ /// input Stream.
+ ///
+ ///
+ /// output Stream.
+ ///
+ ///
+ /// input Size. -1 if unknown.
+ ///
+ ///
+ /// output Size. -1 if unknown.
+ ///
+ ///
+ /// callback progress reference.
+ ///
+ void Code(System.IO.Stream inStream, System.IO.Stream outStream,
+ Int64 inSize, Int64 outSize, ICodeProgress progress);
+ };
+
+ /*
+ public interface ICoder2
+ {
+ void Code(ISequentialInStream []inStreams,
+ const UInt64 []inSizes,
+ ISequentialOutStream []outStreams,
+ UInt64 []outSizes,
+ ICodeProgress progress);
+ };
+ */
+
+ ///
+ /// Provides the fields that represent properties idenitifiers for compressing.
+ ///
+ internal enum CoderPropID
+ {
+ ///
+ /// Specifies default property.
+ ///
+ DefaultProp = 0,
+ ///
+ /// Specifies size of dictionary.
+ ///
+ DictionarySize,
+ ///
+ /// Specifies size of memory for PPM*.
+ ///
+ UsedMemorySize,
+ ///
+ /// Specifies order for PPM methods.
+ ///
+ Order,
+ ///
+ /// Specifies Block Size.
+ ///
+ BlockSize,
+ ///
+ /// Specifies number of postion state bits for LZMA (0 - x - 4).
+ ///
+ PosStateBits,
+ ///
+ /// Specifies number of literal context bits for LZMA (0 - x - 8).
+ ///
+ LitContextBits,
+ ///
+ /// Specifies number of literal position bits for LZMA (0 - x - 4).
+ ///
+ LitPosBits,
+ ///
+ /// Specifies number of fast bytes for LZ*.
+ ///
+ NumFastBytes,
+ ///
+ /// Specifies match finder. LZMA: "BT2", "BT4" or "BT4B".
+ ///
+ MatchFinder,
+ ///
+ /// Specifies the number of match finder cyckes.
+ ///
+ MatchFinderCycles,
+ ///
+ /// Specifies number of passes.
+ ///
+ NumPasses,
+ ///
+ /// Specifies number of algorithm.
+ ///
+ Algorithm,
+ ///
+ /// Specifies the number of threads.
+ ///
+ NumThreads,
+ ///
+ /// Specifies mode with end marker.
+ ///
+ EndMarker
+ };
+
+
+ internal interface ISetCoderProperties
+ {
+ void SetCoderProperties(CoderPropID[] propIDs, object[] properties);
+ };
+
+ internal interface IWriteCoderProperties
+ {
+ void WriteCoderProperties(System.IO.Stream outStream);
+ }
+
+ internal interface ISetDecoderProperties
+ {
+ void SetDecoderProperties(byte[] properties);
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/BZip2Constants.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/BZip2Constants.cs
new file mode 100644
index 00000000..c945d571
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/BZip2Constants.cs
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2001,2004-2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This package is based on the work done by Keiron Liddle, Aftex Software
+ * to whom the Ant project is very grateful for his
+ * great code.
+ */
+
+namespace Compress.SevenZip.Compress.BZip2
+{
+ /**
+ * Base class for both the compress and decompress classes.
+ * Holds common arrays, and static data.
+ *
+ * @author Keiron Liddle
+ */
+ internal class BZip2Constants
+ {
+
+ public const int baseBlockSize = 100000;
+ public const int MAX_ALPHA_SIZE = 258;
+ public const int MAX_CODE_LEN = 23;
+ public const int RUNA = 0;
+ public const int RUNB = 1;
+ public const int N_GROUPS = 6;
+ public const int G_SIZE = 50;
+ public const int N_ITERS = 4;
+ public const int MAX_SELECTORS = (2 + (900000 / G_SIZE));
+ public const int NUM_OVERSHOOT_BYTES = 20;
+
+ public static int[] rNums = {
+ 619, 720, 127, 481, 931, 816, 813, 233, 566, 247,
+ 985, 724, 205, 454, 863, 491, 741, 242, 949, 214,
+ 733, 859, 335, 708, 621, 574, 73, 654, 730, 472,
+ 419, 436, 278, 496, 867, 210, 399, 680, 480, 51,
+ 878, 465, 811, 169, 869, 675, 611, 697, 867, 561,
+ 862, 687, 507, 283, 482, 129, 807, 591, 733, 623,
+ 150, 238, 59, 379, 684, 877, 625, 169, 643, 105,
+ 170, 607, 520, 932, 727, 476, 693, 425, 174, 647,
+ 73, 122, 335, 530, 442, 853, 695, 249, 445, 515,
+ 909, 545, 703, 919, 874, 474, 882, 500, 594, 612,
+ 641, 801, 220, 162, 819, 984, 589, 513, 495, 799,
+ 161, 604, 958, 533, 221, 400, 386, 867, 600, 782,
+ 382, 596, 414, 171, 516, 375, 682, 485, 911, 276,
+ 98, 553, 163, 354, 666, 933, 424, 341, 533, 870,
+ 227, 730, 475, 186, 263, 647, 537, 686, 600, 224,
+ 469, 68, 770, 919, 190, 373, 294, 822, 808, 206,
+ 184, 943, 795, 384, 383, 461, 404, 758, 839, 887,
+ 715, 67, 618, 276, 204, 918, 873, 777, 604, 560,
+ 951, 160, 578, 722, 79, 804, 96, 409, 713, 940,
+ 652, 934, 970, 447, 318, 353, 859, 672, 112, 785,
+ 645, 863, 803, 350, 139, 93, 354, 99, 820, 908,
+ 609, 772, 154, 274, 580, 184, 79, 626, 630, 742,
+ 653, 282, 762, 623, 680, 81, 927, 626, 789, 125,
+ 411, 521, 938, 300, 821, 78, 343, 175, 128, 250,
+ 170, 774, 972, 275, 999, 639, 495, 78, 352, 126,
+ 857, 956, 358, 619, 580, 124, 737, 594, 701, 612,
+ 669, 112, 134, 694, 363, 992, 809, 743, 168, 974,
+ 944, 375, 748, 52, 600, 747, 642, 182, 862, 81,
+ 344, 805, 988, 739, 511, 655, 814, 334, 249, 515,
+ 897, 955, 664, 981, 649, 113, 974, 459, 893, 228,
+ 433, 837, 553, 268, 926, 240, 102, 654, 459, 51,
+ 686, 754, 806, 760, 493, 403, 415, 394, 687, 700,
+ 946, 670, 656, 610, 738, 392, 760, 799, 887, 653,
+ 978, 321, 576, 617, 626, 502, 894, 679, 243, 440,
+ 680, 879, 194, 572, 640, 724, 926, 56, 204, 700,
+ 707, 151, 457, 449, 797, 195, 791, 558, 945, 679,
+ 297, 59, 87, 824, 713, 663, 412, 693, 342, 606,
+ 134, 108, 571, 364, 631, 212, 174, 643, 304, 329,
+ 343, 97, 430, 751, 497, 314, 983, 374, 822, 928,
+ 140, 206, 73, 263, 980, 736, 876, 478, 430, 305,
+ 170, 514, 364, 692, 829, 82, 855, 953, 676, 246,
+ 369, 970, 294, 750, 807, 827, 150, 790, 288, 923,
+ 804, 378, 215, 828, 592, 281, 565, 555, 710, 82,
+ 896, 831, 547, 261, 524, 462, 293, 465, 502, 56,
+ 661, 821, 976, 991, 658, 869, 905, 758, 745, 193,
+ 768, 550, 608, 933, 378, 286, 215, 979, 792, 961,
+ 61, 688, 793, 644, 986, 403, 106, 366, 905, 644,
+ 372, 567, 466, 434, 645, 210, 389, 550, 919, 135,
+ 780, 773, 635, 389, 707, 100, 626, 958, 165, 504,
+ 920, 176, 193, 713, 857, 265, 203, 50, 668, 108,
+ 645, 990, 626, 197, 510, 357, 358, 850, 858, 364,
+ 936, 638
+ };
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/CBZip2InputStream.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/CBZip2InputStream.cs
new file mode 100644
index 00000000..da697686
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/CBZip2InputStream.cs
@@ -0,0 +1,1133 @@
+using System;
+using System.IO;
+
+/*
+ * Copyright 2001,2004-2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This package is based on the work done by Keiron Liddle, Aftex Software
+ * to whom the Ant project is very grateful for his
+ * great code.
+ */
+
+namespace Compress.SevenZip.Compress.BZip2
+{
+ /**
+ * An input stream that decompresses from the BZip2 format (with the file
+ * header chars) to be read as any other stream.
+ *
+ * @author Keiron Liddle
+ *
+ * NB: note this class has been modified to read the leading BZ from the
+ * start of the BZIP2 stream to make it compatible with other PGP programs.
+ */
+ internal class CBZip2InputStream : Stream
+ {
+ private static void Cadvise()
+ {
+ //System.out.Println("CRC Error");
+ //throw new CCoruptionError();
+ }
+
+ private static void BadBGLengths()
+ {
+ Cadvise();
+ }
+
+ private static void BitStreamEOF()
+ {
+ Cadvise();
+ }
+
+ private static void CompressedStreamEOF()
+ {
+ Cadvise();
+ }
+
+ private void MakeMaps()
+ {
+ int i;
+ nInUse = 0;
+ for (i = 0; i < 256; i++)
+ {
+ if (inUse[i])
+ {
+ seqToUnseq[nInUse] = (char)i;
+ unseqToSeq[i] = (char)nInUse;
+ nInUse++;
+ }
+ }
+ }
+
+ /*
+ index of the last char in the block, so
+ the block size == last + 1.
+ */
+ private int last;
+
+ /*
+ index in zptr[] of original string after sorting.
+ */
+ private int origPtr;
+
+ /*
+ always: in the range 0 .. 9.
+ The current block size is 100000 * this number.
+ */
+ private int blockSize100k;
+
+ private bool blockRandomised;
+
+ private int bsBuff;
+ private int bsLive;
+ private CRC mCrc = new CRC();
+
+ private bool[] inUse = new bool[256];
+ private int nInUse;
+
+ private char[] seqToUnseq = new char[256];
+ private char[] unseqToSeq = new char[256];
+
+ private char[] selector = new char[BZip2Constants.MAX_SELECTORS];
+ private char[] selectorMtf = new char[BZip2Constants.MAX_SELECTORS];
+
+ private int[] tt;
+ private char[] ll8;
+
+ /*
+ freq table collected to save a pass over the data
+ during decompression.
+ */
+ private int[] unzftab = new int[256];
+
+ private int[][] limit = InitIntArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE);
+ private int[][] basev = InitIntArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE);
+ private int[][] perm = InitIntArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE);
+ private int[] minLens = new int[BZip2Constants.N_GROUPS];
+
+ private Stream bsStream;
+ private bool leaveOpen;
+
+ private bool streamEnd = false;
+
+ private int currentChar = -1;
+
+ private const int START_BLOCK_STATE = 1;
+ private const int RAND_PART_A_STATE = 2;
+ private const int RAND_PART_B_STATE = 3;
+ private const int RAND_PART_C_STATE = 4;
+ private const int NO_RAND_PART_A_STATE = 5;
+ private const int NO_RAND_PART_B_STATE = 6;
+ private const int NO_RAND_PART_C_STATE = 7;
+
+ private int currentState = START_BLOCK_STATE;
+
+ private int storedBlockCRC, storedCombinedCRC;
+ private int computedBlockCRC, computedCombinedCRC;
+ private bool decompressConcatenated;
+
+ int i2, count, chPrev, ch2;
+ int i, tPos;
+ int rNToGo = 0;
+ int rTPos = 0;
+ int j2;
+ char z;
+
+ private long position = 0;
+
+ public CBZip2InputStream(Stream zStream, bool decompressConcatenated)
+ {
+ this.decompressConcatenated = decompressConcatenated;
+ ll8 = null;
+ tt = null;
+ BsSetStream(zStream);
+ Initialize(true);
+ InitBlock();
+ SetupBlock();
+ }
+
+ internal static int[][] InitIntArray(int n1, int n2)
+ {
+ int[][] a = new int[n1][];
+ for (int k = 0; k < n1; ++k)
+ {
+ a[k] = new int[n2];
+ }
+ return a;
+ }
+
+ internal static char[][] InitCharArray(int n1, int n2)
+ {
+ char[][] a = new char[n1][];
+ for (int k = 0; k < n1; ++k)
+ {
+ a[k] = new char[n2];
+ }
+ return a;
+ }
+
+ public override int ReadByte()
+ {
+ if (streamEnd)
+ {
+ return -1;
+ }
+ else
+ {
+ int retChar = currentChar;
+ switch (currentState)
+ {
+ case START_BLOCK_STATE:
+ break;
+ case RAND_PART_A_STATE:
+ break;
+ case RAND_PART_B_STATE:
+ SetupRandPartB();
+ break;
+ case RAND_PART_C_STATE:
+ SetupRandPartC();
+ break;
+ case NO_RAND_PART_A_STATE:
+ break;
+ case NO_RAND_PART_B_STATE:
+ SetupNoRandPartB();
+ break;
+ case NO_RAND_PART_C_STATE:
+ SetupNoRandPartC();
+ break;
+ default:
+ break;
+ }
+ return retChar;
+ }
+ }
+
+ private bool Initialize(bool isFirstStream)
+ {
+ int magic0 = bsStream.ReadByte();
+ int magic1 = bsStream.ReadByte();
+ int magic2 = bsStream.ReadByte();
+ if (magic0 == -1 && !isFirstStream)
+ {
+ return false;
+ }
+ if (magic0 != 'B' || magic1 != 'Z' || magic2 != 'h')
+ {
+ throw new IOException("Not a BZIP2 marked stream");
+ }
+ int magic3 = bsStream.ReadByte();
+ if (magic3 < '1' || magic3 > '9')
+ {
+ BsFinishedWithStream();
+ streamEnd = true;
+ return false;
+ }
+
+ SetDecompressStructureSizes(magic3 - '0');
+ this.bsLive = 0;
+ computedCombinedCRC = 0;
+ return true;
+ }
+
+ private void InitBlock()
+ {
+ char magic1, magic2, magic3, magic4;
+ char magic5, magic6;
+
+ while (true)
+ {
+ magic1 = BsGetUChar();
+ magic2 = BsGetUChar();
+ magic3 = BsGetUChar();
+ magic4 = BsGetUChar();
+ magic5 = BsGetUChar();
+ magic6 = BsGetUChar();
+ if (magic1 != 0x17 || magic2 != 0x72 || magic3 != 0x45
+ || magic4 != 0x38 || magic5 != 0x50 || magic6 != 0x90)
+ {
+ break;
+ }
+
+ if (Complete())
+ return;
+ }
+
+ if (magic1 != 0x31 || magic2 != 0x41 || magic3 != 0x59
+ || magic4 != 0x26 || magic5 != 0x53 || magic6 != 0x59)
+ {
+ BadBlockHeader();
+ streamEnd = true;
+ return;
+ }
+
+ storedBlockCRC = BsGetInt32();
+
+ if (BsR(1) == 1)
+ {
+ blockRandomised = true;
+ }
+ else
+ {
+ blockRandomised = false;
+ }
+
+ // currBlockNo++;
+ GetAndMoveToFrontDecode();
+
+ mCrc.InitialiseCRC();
+ currentState = START_BLOCK_STATE;
+ }
+
+ private void EndBlock()
+ {
+ computedBlockCRC = mCrc.GetFinalCRC();
+ /* A bad CRC is considered a fatal error. */
+ if (storedBlockCRC != computedBlockCRC)
+ {
+ CrcError();
+ }
+
+ computedCombinedCRC = (computedCombinedCRC << 1)
+ | (int)(((uint)computedCombinedCRC) >> 31);
+ computedCombinedCRC ^= computedBlockCRC;
+ }
+
+ private bool Complete()
+ {
+ storedCombinedCRC = BsGetInt32();
+ if (storedCombinedCRC != computedCombinedCRC)
+ {
+ CrcError();
+ }
+
+ bool complete = !decompressConcatenated || !Initialize(false);
+ if (complete)
+ {
+ BsFinishedWithStream();
+ streamEnd = true;
+ }
+
+ // Look for the next .bz2 stream if decompressing
+ // concatenated files.
+ return complete;
+ }
+
+ private static void BlockOverrun()
+ {
+ Cadvise();
+ }
+
+ private static void BadBlockHeader()
+ {
+ Cadvise();
+ }
+
+ private static void CrcError()
+ {
+ Cadvise();
+ }
+
+ private void BsFinishedWithStream()
+ {
+ try
+ {
+ if (this.bsStream != null)
+ {
+ if (!leaveOpen)
+ this.bsStream.Dispose();
+ this.bsStream = null;
+ }
+ }
+ catch
+ {
+ //ignore
+ }
+ }
+
+ private void BsSetStream(Stream f)
+ {
+ bsStream = f;
+ bsLive = 0;
+ bsBuff = 0;
+ }
+
+ private int BsR(int n)
+ {
+ int v;
+ while (bsLive < n)
+ {
+ int zzi;
+ int thech = '\0';
+ try
+ {
+ thech = (char)bsStream.ReadByte();
+ }
+ catch (IOException)
+ {
+ CompressedStreamEOF();
+ }
+ if (thech == '\uffff')
+ {
+ CompressedStreamEOF();
+ }
+ zzi = thech;
+ bsBuff = (bsBuff << 8) | (zzi & 0xff);
+ bsLive += 8;
+ }
+
+ v = (bsBuff >> (bsLive - n)) & ((1 << n) - 1);
+ bsLive -= n;
+ return v;
+ }
+
+ private char BsGetUChar()
+ {
+ return (char)BsR(8);
+ }
+
+ private int BsGetint()
+ {
+ int u = 0;
+ u = (u << 8) | BsR(8);
+ u = (u << 8) | BsR(8);
+ u = (u << 8) | BsR(8);
+ u = (u << 8) | BsR(8);
+ return u;
+ }
+
+ private int BsGetIntVS(int numBits)
+ {
+ return (int)BsR(numBits);
+ }
+
+ private int BsGetInt32()
+ {
+ return (int)BsGetint();
+ }
+
+ private void HbCreateDecodeTables(int[] limit, int[] basev,
+ int[] perm, char[] length,
+ int minLen, int maxLen, int alphaSize)
+ {
+ int pp, i, j, vec;
+
+ pp = 0;
+ for (i = minLen; i <= maxLen; i++)
+ {
+ for (j = 0; j < alphaSize; j++)
+ {
+ if (length[j] == i)
+ {
+ perm[pp] = j;
+ pp++;
+ }
+ }
+ }
+
+ for (i = 0; i < BZip2Constants.MAX_CODE_LEN; i++)
+ {
+ basev[i] = 0;
+ }
+ for (i = 0; i < alphaSize; i++)
+ {
+ basev[length[i] + 1]++;
+ }
+
+ for (i = 1; i < BZip2Constants.MAX_CODE_LEN; i++)
+ {
+ basev[i] += basev[i - 1];
+ }
+
+ for (i = 0; i < BZip2Constants.MAX_CODE_LEN; i++)
+ {
+ limit[i] = 0;
+ }
+ vec = 0;
+
+ for (i = minLen; i <= maxLen; i++)
+ {
+ vec += (basev[i + 1] - basev[i]);
+ limit[i] = vec - 1;
+ vec <<= 1;
+ }
+ for (i = minLen + 1; i <= maxLen; i++)
+ {
+ basev[i] = ((limit[i - 1] + 1) << 1) - basev[i];
+ }
+ }
+
+ private void RecvDecodingTables()
+ {
+ char[][] len = InitCharArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE);
+ int i, j, t, nGroups, nSelectors, alphaSize;
+ int minLen, maxLen;
+ bool[] inUse16 = new bool[16];
+
+ /* Receive the mapping table */
+ for (i = 0; i < 16; i++)
+ {
+ if (BsR(1) == 1)
+ {
+ inUse16[i] = true;
+ }
+ else
+ {
+ inUse16[i] = false;
+ }
+ }
+
+ for (i = 0; i < 256; i++)
+ {
+ inUse[i] = false;
+ }
+
+ for (i = 0; i < 16; i++)
+ {
+ if (inUse16[i])
+ {
+ for (j = 0; j < 16; j++)
+ {
+ if (BsR(1) == 1)
+ {
+ inUse[i * 16 + j] = true;
+ }
+ }
+ }
+ }
+
+ MakeMaps();
+ alphaSize = nInUse + 2;
+
+ /* Now the selectors */
+ nGroups = BsR(3);
+ nSelectors = BsR(15);
+ for (i = 0; i < nSelectors; i++)
+ {
+ j = 0;
+ while (BsR(1) == 1)
+ {
+ j++;
+ }
+ selectorMtf[i] = (char)j;
+ }
+
+ /* Undo the MTF values for the selectors. */
+ {
+ char[] pos = new char[BZip2Constants.N_GROUPS];
+ char tmp, v;
+ for (v = '\0'; v < nGroups; v++)
+ {
+ pos[v] = v;
+ }
+
+ for (i = 0; i < nSelectors; i++)
+ {
+ v = selectorMtf[i];
+ tmp = pos[v];
+ while (v > 0)
+ {
+ pos[v] = pos[v - 1];
+ v--;
+ }
+ pos[0] = tmp;
+ selector[i] = tmp;
+ }
+ }
+
+ /* Now the coding tables */
+ for (t = 0; t < nGroups; t++)
+ {
+ int curr = BsR(5);
+ for (i = 0; i < alphaSize; i++)
+ {
+ while (BsR(1) == 1)
+ {
+ if (BsR(1) == 0)
+ {
+ curr++;
+ }
+ else
+ {
+ curr--;
+ }
+ }
+ len[t][i] = (char)curr;
+ }
+ }
+
+ /* Create the Huffman decoding tables */
+ for (t = 0; t < nGroups; t++)
+ {
+ minLen = 32;
+ maxLen = 0;
+ for (i = 0; i < alphaSize; i++)
+ {
+ if (len[t][i] > maxLen)
+ {
+ maxLen = len[t][i];
+ }
+ if (len[t][i] < minLen)
+ {
+ minLen = len[t][i];
+ }
+ }
+ HbCreateDecodeTables(limit[t], basev[t], perm[t], len[t], minLen,
+ maxLen, alphaSize);
+ minLens[t] = minLen;
+ }
+ }
+
+ private void GetAndMoveToFrontDecode()
+ {
+ char[] yy = new char[256];
+ int i, j, nextSym, limitLast;
+ int EOB, groupNo, groupPos;
+
+ limitLast = BZip2Constants.baseBlockSize * blockSize100k;
+ origPtr = BsGetIntVS(24);
+
+ RecvDecodingTables();
+ EOB = nInUse + 1;
+ groupNo = -1;
+ groupPos = 0;
+
+ /*
+ Setting up the unzftab entries here is not strictly
+ necessary, but it does save having to do it later
+ in a separate pass, and so saves a block's worth of
+ cache misses.
+ */
+ for (i = 0; i <= 255; i++)
+ {
+ unzftab[i] = 0;
+ }
+
+ for (i = 0; i <= 255; i++)
+ {
+ yy[i] = (char)i;
+ }
+
+ last = -1;
+
+ {
+ int zt, zn, zvec, zj;
+ if (groupPos == 0)
+ {
+ groupNo++;
+ groupPos = BZip2Constants.G_SIZE;
+ }
+ groupPos--;
+ zt = selector[groupNo];
+ zn = minLens[zt];
+ zvec = BsR(zn);
+ while (zvec > limit[zt][zn])
+ {
+ zn++;
+ {
+ {
+ while (bsLive < 1)
+ {
+ int zzi;
+ char thech = '\0';
+ try
+ {
+ thech = (char)bsStream.ReadByte();
+ }
+ catch (IOException)
+ {
+ CompressedStreamEOF();
+ }
+ if (thech == '\uffff')
+ {
+ CompressedStreamEOF();
+ }
+ zzi = thech;
+ bsBuff = (bsBuff << 8) | (zzi & 0xff);
+ bsLive += 8;
+ }
+ }
+ zj = (bsBuff >> (bsLive - 1)) & 1;
+ bsLive--;
+ }
+ zvec = (zvec << 1) | zj;
+ }
+ nextSym = perm[zt][zvec - basev[zt][zn]];
+ }
+
+ while (true)
+ {
+
+ if (nextSym == EOB)
+ {
+ break;
+ }
+
+ if (nextSym == BZip2Constants.RUNA || nextSym == BZip2Constants.RUNB)
+ {
+ char ch;
+ int s = -1;
+ int N = 1;
+ do
+ {
+ if (nextSym == BZip2Constants.RUNA)
+ {
+ s = s + (0 + 1) * N;
+ }
+ else if (nextSym == BZip2Constants.RUNB)
+ {
+ s = s + (1 + 1) * N;
+ }
+ N = N * 2;
+ {
+ int zt, zn, zvec, zj;
+ if (groupPos == 0)
+ {
+ groupNo++;
+ groupPos = BZip2Constants.G_SIZE;
+ }
+ groupPos--;
+ zt = selector[groupNo];
+ zn = minLens[zt];
+ zvec = BsR(zn);
+ while (zvec > limit[zt][zn])
+ {
+ zn++;
+ {
+ {
+ while (bsLive < 1)
+ {
+ int zzi;
+ char thech = '\0';
+ try
+ {
+ thech = (char)bsStream.ReadByte();
+ }
+ catch (IOException)
+ {
+ CompressedStreamEOF();
+ }
+ if (thech == '\uffff')
+ {
+ CompressedStreamEOF();
+ }
+ zzi = thech;
+ bsBuff = (bsBuff << 8) | (zzi & 0xff);
+ bsLive += 8;
+ }
+ }
+ zj = (bsBuff >> (bsLive - 1)) & 1;
+ bsLive--;
+ }
+ zvec = (zvec << 1) | zj;
+ }
+ nextSym = perm[zt][zvec - basev[zt][zn]];
+ }
+ } while (nextSym == BZip2Constants.RUNA || nextSym == BZip2Constants.RUNB);
+
+ s++;
+ ch = seqToUnseq[yy[0]];
+ unzftab[ch] += s;
+
+ while (s > 0)
+ {
+ last++;
+ ll8[last] = ch;
+ s--;
+ }
+
+ if (last >= limitLast)
+ {
+ BlockOverrun();
+ }
+ continue;
+ }
+ else
+ {
+ char tmp;
+ last++;
+ if (last >= limitLast)
+ {
+ BlockOverrun();
+ }
+
+ tmp = yy[nextSym - 1];
+ unzftab[seqToUnseq[tmp]]++;
+ ll8[last] = seqToUnseq[tmp];
+
+ /*
+ This loop is hammered during decompression,
+ hence the unrolling.
+
+ for (j = nextSym-1; j > 0; j--) yy[j] = yy[j-1];
+ */
+
+ j = nextSym - 1;
+ for (; j > 3; j -= 4)
+ {
+ yy[j] = yy[j - 1];
+ yy[j - 1] = yy[j - 2];
+ yy[j - 2] = yy[j - 3];
+ yy[j - 3] = yy[j - 4];
+ }
+ for (; j > 0; j--)
+ {
+ yy[j] = yy[j - 1];
+ }
+
+ yy[0] = tmp;
+ {
+ int zt, zn, zvec, zj;
+ if (groupPos == 0)
+ {
+ groupNo++;
+ groupPos = BZip2Constants.G_SIZE;
+ }
+ groupPos--;
+ zt = selector[groupNo];
+ zn = minLens[zt];
+ zvec = BsR(zn);
+ while (zvec > limit[zt][zn])
+ {
+ zn++;
+ {
+ {
+ while (bsLive < 1)
+ {
+ int zzi;
+ char thech = '\0';
+ try
+ {
+ thech = (char)bsStream.ReadByte();
+ }
+ catch (IOException)
+ {
+ CompressedStreamEOF();
+ }
+ zzi = thech;
+ bsBuff = (bsBuff << 8) | (zzi & 0xff);
+ bsLive += 8;
+ }
+ }
+ zj = (bsBuff >> (bsLive - 1)) & 1;
+ bsLive--;
+ }
+ zvec = (zvec << 1) | zj;
+ }
+ nextSym = perm[zt][zvec - basev[zt][zn]];
+ }
+ continue;
+ }
+ }
+ }
+
+ private void SetupBlock()
+ {
+ int[] cftab = new int[257];
+ char ch;
+
+ cftab[0] = 0;
+ for (i = 1; i <= 256; i++)
+ {
+ cftab[i] = unzftab[i - 1];
+ }
+ for (i = 1; i <= 256; i++)
+ {
+ cftab[i] += cftab[i - 1];
+ }
+
+ for (i = 0; i <= last; i++)
+ {
+ ch = (char)ll8[i];
+ tt[cftab[ch]] = i;
+ cftab[ch]++;
+ }
+ cftab = null;
+
+ tPos = tt[origPtr];
+
+ count = 0;
+ i2 = 0;
+ ch2 = 256; /* not a char and not EOF */
+
+ if (blockRandomised)
+ {
+ rNToGo = 0;
+ rTPos = 0;
+ SetupRandPartA();
+ }
+ else
+ {
+ SetupNoRandPartA();
+ }
+ }
+
+ private void SetupRandPartA()
+ {
+ if (i2 <= last)
+ {
+ chPrev = ch2;
+ ch2 = ll8[tPos];
+ tPos = tt[tPos];
+ if (rNToGo == 0)
+ {
+ rNToGo = BZip2Constants.rNums[rTPos];
+ rTPos++;
+ if (rTPos == 512)
+ {
+ rTPos = 0;
+ }
+ }
+ rNToGo--;
+ ch2 ^= (int)((rNToGo == 1) ? 1 : 0);
+ i2++;
+
+ currentChar = ch2;
+ currentState = RAND_PART_B_STATE;
+ mCrc.UpdateCRC(ch2);
+ }
+ else
+ {
+ EndBlock();
+ InitBlock();
+ SetupBlock();
+ }
+ }
+
+ private void SetupNoRandPartA()
+ {
+ if (i2 <= last)
+ {
+ chPrev = ch2;
+ ch2 = ll8[tPos];
+ tPos = tt[tPos];
+ i2++;
+
+ currentChar = ch2;
+ currentState = NO_RAND_PART_B_STATE;
+ mCrc.UpdateCRC(ch2);
+ }
+ else
+ {
+ EndBlock();
+ InitBlock();
+ SetupBlock();
+ }
+ }
+
+ private void SetupRandPartB()
+ {
+ if (ch2 != chPrev)
+ {
+ currentState = RAND_PART_A_STATE;
+ count = 1;
+ SetupRandPartA();
+ }
+ else
+ {
+ count++;
+ if (count >= 4)
+ {
+ z = ll8[tPos];
+ tPos = tt[tPos];
+ if (rNToGo == 0)
+ {
+ rNToGo = BZip2Constants.rNums[rTPos];
+ rTPos++;
+ if (rTPos == 512)
+ {
+ rTPos = 0;
+ }
+ }
+ rNToGo--;
+ z ^= (char)((rNToGo == 1) ? 1 : 0);
+ j2 = 0;
+ currentState = RAND_PART_C_STATE;
+ SetupRandPartC();
+ }
+ else
+ {
+ currentState = RAND_PART_A_STATE;
+ SetupRandPartA();
+ }
+ }
+ }
+
+ private void SetupRandPartC()
+ {
+ if (j2 < (int)z)
+ {
+ currentChar = ch2;
+ mCrc.UpdateCRC(ch2);
+ j2++;
+ }
+ else
+ {
+ currentState = RAND_PART_A_STATE;
+ i2++;
+ count = 0;
+ SetupRandPartA();
+ }
+ }
+
+ private void SetupNoRandPartB()
+ {
+ if (ch2 != chPrev)
+ {
+ currentState = NO_RAND_PART_A_STATE;
+ count = 1;
+ SetupNoRandPartA();
+ }
+ else
+ {
+ count++;
+ if (count >= 4)
+ {
+ z = ll8[tPos];
+ tPos = tt[tPos];
+ currentState = NO_RAND_PART_C_STATE;
+ j2 = 0;
+ SetupNoRandPartC();
+ }
+ else
+ {
+ currentState = NO_RAND_PART_A_STATE;
+ SetupNoRandPartA();
+ }
+ }
+ }
+
+ private void SetupNoRandPartC()
+ {
+ if (j2 < (int)z)
+ {
+ currentChar = ch2;
+ mCrc.UpdateCRC(ch2);
+ j2++;
+ }
+ else
+ {
+ currentState = NO_RAND_PART_A_STATE;
+ i2++;
+ count = 0;
+ SetupNoRandPartA();
+ }
+ }
+
+ private void SetDecompressStructureSizes(int newSize100k)
+ {
+ if (!(0 <= newSize100k && newSize100k <= 9 && 0 <= blockSize100k
+ && blockSize100k <= 9))
+ {
+ // throw new IOException("Invalid block size");
+ }
+
+ blockSize100k = newSize100k;
+
+ if (newSize100k == 0)
+ {
+ return;
+ }
+
+ int n = BZip2Constants.baseBlockSize * newSize100k;
+ ll8 = new char[n];
+ tt = new int[n];
+ }
+
+ public override void Flush()
+ {
+ }
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ int k;
+ for (k = 0; k < count; ++k)
+ {
+ int c = ReadByte();
+ if (c == -1)
+ break;
+ buffer[k + offset] = (byte)c;
+ position += 1;
+ }
+ return k;
+ }
+
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ if (origin != SeekOrigin.Current)
+ throw new NotImplementedException();
+
+ for (int k = 0; k < offset; ++k)
+ {
+ int c = ReadByte();
+ if (c == -1)
+ break;
+ position += 1;
+ }
+ return offset;
+ }
+
+ public override void SetLength(long value)
+ {
+ }
+
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ }
+
+ public override bool CanRead
+ {
+ get
+ {
+ return true;
+ }
+ }
+
+ public override bool CanSeek
+ {
+ get
+ {
+ return false;
+ }
+ }
+
+ public override bool CanWrite
+ {
+ get
+ {
+ return false;
+ }
+ }
+
+ public override long Length
+ {
+ get
+ {
+ return 0;
+ }
+ }
+
+ public override long Position
+ {
+ get
+ {
+ return position;
+ }
+ set
+ {
+ }
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/CBZip2OutputStream.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/CBZip2OutputStream.cs
new file mode 100644
index 00000000..24b763d7
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/CBZip2OutputStream.cs
@@ -0,0 +1,1985 @@
+using System.IO;
+
+/*
+ * Copyright 2001,2004-2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This package is based on the work done by Keiron Liddle, Aftex Software
+ * to whom the Ant project is very grateful for his
+ * great code.
+ */
+
+namespace Compress.SevenZip.Compress.BZip2
+{
+ /**
+ * An output stream that compresses into the BZip2 format (with the file
+ * header chars) into another stream.
+ *
+ * @author Keiron Liddle
+ *
+ * TODO: Update to BZip2 1.0.1
+ * NB: note this class has been modified to add a leading BZ to the
+ * start of the BZIP2 stream to make it compatible with other PGP programs.
+ */
+ internal class CBZip2OutputStream : Stream
+ {
+ protected const int SETMASK = (1 << 21);
+ protected const int CLEARMASK = (~SETMASK);
+ protected const int GREATER_ICOST = 15;
+ protected const int LESSER_ICOST = 0;
+ protected const int SMALL_THRESH = 20;
+ protected const int DEPTH_THRESH = 10;
+
+ /*
+ If you are ever unlucky/improbable enough
+ to get a stack overflow whilst sorting,
+ increase the following constant and try
+ again. In practice I have never seen the
+ stack go above 27 elems, so the following
+ limit seems very generous.
+ */
+ protected const int QSORT_STACK_SIZE = 1000;
+ private bool finished;
+
+ private static void Panic()
+ {
+ //System.out.Println("panic");
+ //throw new CError();
+ }
+
+ private void MakeMaps()
+ {
+ int i;
+ nInUse = 0;
+ for (i = 0; i < 256; i++)
+ {
+ if (inUse[i])
+ {
+ seqToUnseq[nInUse] = (char)i;
+ unseqToSeq[i] = (char)nInUse;
+ nInUse++;
+ }
+ }
+ }
+
+ protected static void HbMakeCodeLengths(char[] len, int[] freq,
+ int alphaSize, int maxLen)
+ {
+ /*
+ Nodes and heap entries run from 1. Entry 0
+ for both the heap and nodes is a sentinel.
+ */
+ int nNodes, nHeap, n1, n2, i, j, k;
+ bool tooLong;
+
+ int[] heap = new int[BZip2Constants.MAX_ALPHA_SIZE + 2];
+ int[] weight = new int[BZip2Constants.MAX_ALPHA_SIZE * 2];
+ int[] parent = new int[BZip2Constants.MAX_ALPHA_SIZE * 2];
+
+ for (i = 0; i < alphaSize; i++)
+ {
+ weight[i + 1] = (freq[i] == 0 ? 1 : freq[i]) << 8;
+ }
+
+ while (true)
+ {
+ nNodes = alphaSize;
+ nHeap = 0;
+
+ heap[0] = 0;
+ weight[0] = 0;
+ parent[0] = -2;
+
+ for (i = 1; i <= alphaSize; i++)
+ {
+ parent[i] = -1;
+ nHeap++;
+ heap[nHeap] = i;
+ {
+ int zz, tmp;
+ zz = nHeap;
+ tmp = heap[zz];
+ while (weight[tmp] < weight[heap[zz >> 1]])
+ {
+ heap[zz] = heap[zz >> 1];
+ zz >>= 1;
+ }
+ heap[zz] = tmp;
+ }
+ }
+ if (!(nHeap < (BZip2Constants.MAX_ALPHA_SIZE + 2)))
+ {
+ Panic();
+ }
+
+ while (nHeap > 1)
+ {
+ n1 = heap[1];
+ heap[1] = heap[nHeap];
+ nHeap--;
+ {
+ int zz = 0, yy = 0, tmp = 0;
+ zz = 1;
+ tmp = heap[zz];
+ while (true)
+ {
+ yy = zz << 1;
+ if (yy > nHeap)
+ {
+ break;
+ }
+ if (yy < nHeap
+ && weight[heap[yy + 1]] < weight[heap[yy]])
+ {
+ yy++;
+ }
+ if (weight[tmp] < weight[heap[yy]])
+ {
+ break;
+ }
+ heap[zz] = heap[yy];
+ zz = yy;
+ }
+ heap[zz] = tmp;
+ }
+ n2 = heap[1];
+ heap[1] = heap[nHeap];
+ nHeap--;
+ {
+ int zz = 0, yy = 0, tmp = 0;
+ zz = 1;
+ tmp = heap[zz];
+ while (true)
+ {
+ yy = zz << 1;
+ if (yy > nHeap)
+ {
+ break;
+ }
+ if (yy < nHeap
+ && weight[heap[yy + 1]] < weight[heap[yy]])
+ {
+ yy++;
+ }
+ if (weight[tmp] < weight[heap[yy]])
+ {
+ break;
+ }
+ heap[zz] = heap[yy];
+ zz = yy;
+ }
+ heap[zz] = tmp;
+ }
+ nNodes++;
+ parent[n1] = parent[n2] = nNodes;
+
+ weight[nNodes] = (int)((uint)((weight[n1] & 0xffffff00)
+ + (weight[n2] & 0xffffff00))
+ | (uint)(1 + (((weight[n1] & 0x000000ff) >
+ (weight[n2] & 0x000000ff)) ?
+ (weight[n1] & 0x000000ff) :
+ (weight[n2] & 0x000000ff))));
+
+ parent[nNodes] = -1;
+ nHeap++;
+ heap[nHeap] = nNodes;
+ {
+ int zz = 0, tmp = 0;
+ zz = nHeap;
+ tmp = heap[zz];
+ while (weight[tmp] < weight[heap[zz >> 1]])
+ {
+ heap[zz] = heap[zz >> 1];
+ zz >>= 1;
+ }
+ heap[zz] = tmp;
+ }
+ }
+ if (!(nNodes < (BZip2Constants.MAX_ALPHA_SIZE * 2)))
+ {
+ Panic();
+ }
+
+ tooLong = false;
+ for (i = 1; i <= alphaSize; i++)
+ {
+ j = 0;
+ k = i;
+ while (parent[k] >= 0)
+ {
+ k = parent[k];
+ j++;
+ }
+ len[i - 1] = (char)j;
+ if (j > maxLen)
+ {
+ tooLong = true;
+ }
+ }
+
+ if (!tooLong)
+ {
+ break;
+ }
+
+ for (i = 1; i < alphaSize; i++)
+ {
+ j = weight[i] >> 8;
+ j = 1 + (j / 2);
+ weight[i] = j << 8;
+ }
+ }
+ }
+
+ /*
+ index of the last char in the block, so
+ the block size == last + 1.
+ */
+ int last;
+
+ /*
+ index in zptr[] of original string after sorting.
+ */
+ int origPtr;
+
+ /*
+ always: in the range 0 .. 9.
+ The current block size is 100000 * this number.
+ */
+ int blockSize100k;
+
+ bool blockRandomised;
+
+ int bytesOut;
+ int bsBuff;
+ int bsLive;
+ CRC mCrc = new CRC();
+
+ private bool[] inUse = new bool[256];
+ private int nInUse;
+
+ private char[] seqToUnseq = new char[256];
+ private char[] unseqToSeq = new char[256];
+
+ private char[] selector = new char[BZip2Constants.MAX_SELECTORS];
+ private char[] selectorMtf = new char[BZip2Constants.MAX_SELECTORS];
+
+ private char[] block;
+ private int[] quadrant;
+ private int[] zptr;
+ private short[] szptr;
+ private int[] ftab;
+
+ private int nMTF;
+
+ private int[] mtfFreq = new int[BZip2Constants.MAX_ALPHA_SIZE];
+
+ /*
+ * Used when sorting. If too many long comparisons
+ * happen, we stop sorting, randomise the block
+ * slightly, and try again.
+ */
+ private int workFactor;
+ private int workDone;
+ private int workLimit;
+ private bool firstAttempt;
+ private int nBlocksRandomised;
+
+ private int currentChar = -1;
+ private int runLength = 0;
+
+ public CBZip2OutputStream(Stream inStream)
+ : this(inStream, 9, false)
+ {
+ }
+
+ public CBZip2OutputStream(Stream inStream, bool leaveOpen)
+ : this(inStream, 9, leaveOpen)
+ {
+ }
+
+ public CBZip2OutputStream(Stream inStream, int inBlockSize, bool leaveOpen)
+ {
+ block = null;
+ quadrant = null;
+ zptr = null;
+ ftab = null;
+
+ inStream.WriteByte((byte)'B');
+ inStream.WriteByte((byte)'Z');
+
+ BsSetStream(inStream, leaveOpen);
+
+ workFactor = 50;
+ if (inBlockSize > 9)
+ {
+ inBlockSize = 9;
+ }
+ if (inBlockSize < 1)
+ {
+ inBlockSize = 1;
+ }
+ blockSize100k = inBlockSize;
+ AllocateCompressStructures();
+ Initialize();
+ InitBlock();
+ }
+
+ /**
+ *
+ * modified by Oliver Merkel, 010128
+ *
+ */
+ public override void WriteByte(byte bv)
+ {
+ int b = (256 + bv) % 256;
+ if (currentChar != -1)
+ {
+ if (currentChar == b)
+ {
+ runLength++;
+ if (runLength > 254)
+ {
+ WriteRun();
+ currentChar = -1;
+ runLength = 0;
+ }
+ }
+ else
+ {
+ WriteRun();
+ runLength = 1;
+ currentChar = b;
+ }
+ }
+ else
+ {
+ currentChar = b;
+ runLength++;
+ }
+ }
+
+ private void WriteRun()
+ {
+ if (last < allowableBlockSize)
+ {
+ inUse[currentChar] = true;
+ for (int i = 0; i < runLength; i++)
+ {
+ mCrc.UpdateCRC((char)currentChar);
+ }
+ switch (runLength)
+ {
+ case 1:
+ last++;
+ block[last + 1] = (char)currentChar;
+ break;
+ case 2:
+ last++;
+ block[last + 1] = (char)currentChar;
+ last++;
+ block[last + 1] = (char)currentChar;
+ break;
+ case 3:
+ last++;
+ block[last + 1] = (char)currentChar;
+ last++;
+ block[last + 1] = (char)currentChar;
+ last++;
+ block[last + 1] = (char)currentChar;
+ break;
+ default:
+ inUse[runLength - 4] = true;
+ last++;
+ block[last + 1] = (char)currentChar;
+ last++;
+ block[last + 1] = (char)currentChar;
+ last++;
+ block[last + 1] = (char)currentChar;
+ last++;
+ block[last + 1] = (char)currentChar;
+ last++;
+ block[last + 1] = (char)(runLength - 4);
+ break;
+ }
+ }
+ else
+ {
+ EndBlock();
+ InitBlock();
+ WriteRun();
+ }
+ }
+
+ bool disposed = false;
+
+ protected override void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ if (disposed)
+ {
+ return;
+ }
+
+ Finish();
+
+ disposed = true;
+ base.Dispose();
+ if (!leaveOpen)
+ bsStream.Dispose();
+ bsStream = null;
+ }
+ }
+
+ public void Finish()
+ {
+ if (finished)
+ {
+ return;
+ }
+
+ if (runLength > 0)
+ {
+ WriteRun();
+ }
+ currentChar = -1;
+ EndBlock();
+ EndCompression();
+ finished = true;
+ Flush();
+ }
+
+ public override void Flush()
+ {
+ bsStream.Flush();
+ }
+
+ private int blockCRC, combinedCRC;
+
+ private void Initialize()
+ {
+ bytesOut = 0;
+ nBlocksRandomised = 0;
+
+ /* Write `magic' bytes h indicating file-format == huffmanised,
+ followed by a digit indicating blockSize100k.
+ */
+ BsPutUChar('h');
+ BsPutUChar('0' + blockSize100k);
+
+ combinedCRC = 0;
+ }
+
+ private int allowableBlockSize;
+
+ private void InitBlock()
+ {
+ // blockNo++;
+ mCrc.InitialiseCRC();
+ last = -1;
+ // ch = 0;
+
+ for (int i = 0; i < 256; i++)
+ {
+ inUse[i] = false;
+ }
+
+ /* 20 is just a paranoia constant */
+ allowableBlockSize = BZip2Constants.baseBlockSize * blockSize100k - 20;
+ }
+
+ private void EndBlock()
+ {
+ blockCRC = mCrc.GetFinalCRC();
+ combinedCRC = (combinedCRC << 1) | (int)(((uint)combinedCRC) >> 31);
+ combinedCRC ^= blockCRC;
+
+ /* sort the block and establish posn of original string */
+ DoReversibleTransformation();
+
+ /*
+ A 6-byte block header, the value chosen arbitrarily
+ as 0x314159265359 :-). A 32 bit value does not really
+ give a strong enough guarantee that the value will not
+ appear by chance in the compressed datastream. Worst-case
+ probability of this event, for a 900k block, is about
+ 2.0e-3 for 32 bits, 1.0e-5 for 40 bits and 4.0e-8 for 48 bits.
+ For a compressed file of size 100Gb -- about 100000 blocks --
+ only a 48-bit marker will do. NB: normal compression/
+ decompression do *not* rely on these statistical properties.
+ They are only important when trying to recover blocks from
+ damaged files.
+ */
+ BsPutUChar(0x31);
+ BsPutUChar(0x41);
+ BsPutUChar(0x59);
+ BsPutUChar(0x26);
+ BsPutUChar(0x53);
+ BsPutUChar(0x59);
+
+ /* Now the block's CRC, so it is in a known place. */
+ BsPutint(blockCRC);
+
+ /* Now a single bit indicating randomisation. */
+ if (blockRandomised)
+ {
+ BsW(1, 1);
+ nBlocksRandomised++;
+ }
+ else
+ {
+ BsW(1, 0);
+ }
+
+ /* Finally, block's contents proper. */
+ MoveToFrontCodeAndSend();
+ }
+
+ private void EndCompression()
+ {
+ /*
+ Now another magic 48-bit number, 0x177245385090, to
+ indicate the end of the last block. (Sqrt(pi), if
+ you want to know. I did want to use e, but it contains
+ too much repetition -- 27 18 28 18 28 46 -- for me
+ to feel statistically comfortable. Call me paranoid.)
+ */
+ BsPutUChar(0x17);
+ BsPutUChar(0x72);
+ BsPutUChar(0x45);
+ BsPutUChar(0x38);
+ BsPutUChar(0x50);
+ BsPutUChar(0x90);
+
+ BsPutint(combinedCRC);
+
+ BsFinishedWithStream();
+ }
+
+ private void HbAssignCodes(int[] code, char[] length, int minLen,
+ int maxLen, int alphaSize)
+ {
+ int n, vec, i;
+
+ vec = 0;
+ for (n = minLen; n <= maxLen; n++)
+ {
+ for (i = 0; i < alphaSize; i++)
+ {
+ if (length[i] == n)
+ {
+ code[i] = vec;
+ vec++;
+ }
+ };
+ vec <<= 1;
+ }
+ }
+
+ private void BsSetStream(Stream f, bool leaveOpen)
+ {
+ bsStream = f;
+ bsLive = 0;
+ bsBuff = 0;
+ bytesOut = 0;
+ this.leaveOpen = leaveOpen;
+ }
+
+ private void BsFinishedWithStream()
+ {
+ while (bsLive > 0)
+ {
+ int ch = (bsBuff >> 24);
+ try
+ {
+ bsStream.WriteByte((byte)ch); // write 8-bit
+ }
+ catch (IOException e)
+ {
+ throw e;
+ }
+ bsBuff <<= 8;
+ bsLive -= 8;
+ bytesOut++;
+ }
+ }
+
+ private void BsW(int n, int v)
+ {
+ while (bsLive >= 8)
+ {
+ int ch = (bsBuff >> 24);
+ try
+ {
+ bsStream.WriteByte((byte)ch); // write 8-bit
+ }
+ catch (IOException e)
+ {
+ throw e;
+ }
+ bsBuff <<= 8;
+ bsLive -= 8;
+ bytesOut++;
+ }
+ bsBuff |= (v << (32 - bsLive - n));
+ bsLive += n;
+ }
+
+ private void BsPutUChar(int c)
+ {
+ BsW(8, c);
+ }
+
+ private void BsPutint(int u)
+ {
+ BsW(8, (u >> 24) & 0xff);
+ BsW(8, (u >> 16) & 0xff);
+ BsW(8, (u >> 8) & 0xff);
+ BsW(8, u & 0xff);
+ }
+
+ private void BsPutIntVS(int numBits, int c)
+ {
+ BsW(numBits, c);
+ }
+
+ private void SendMTFValues()
+ {
+ char[][] len = CBZip2InputStream.InitCharArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE);
+
+ int v, t, i, j, gs, ge, totc, bt, bc, iter;
+ int nSelectors = 0, alphaSize, minLen, maxLen, selCtr;
+ int nGroups;//, nBytes;
+
+ alphaSize = nInUse + 2;
+ for (t = 0; t < BZip2Constants.N_GROUPS; t++)
+ {
+ for (v = 0; v < alphaSize; v++)
+ {
+ len[t][v] = (char)GREATER_ICOST;
+ }
+ }
+
+ /* Decide how many coding tables to use */
+ if (nMTF <= 0)
+ {
+ Panic();
+ }
+
+ if (nMTF < 200)
+ {
+ nGroups = 2;
+ }
+ else if (nMTF < 600)
+ {
+ nGroups = 3;
+ }
+ else if (nMTF < 1200)
+ {
+ nGroups = 4;
+ }
+ else if (nMTF < 2400)
+ {
+ nGroups = 5;
+ }
+ else
+ {
+ nGroups = 6;
+ }
+
+ /* Generate an initial set of coding tables */
+ {
+ int nPart, remF, tFreq, aFreq;
+
+ nPart = nGroups;
+ remF = nMTF;
+ gs = 0;
+ while (nPart > 0)
+ {
+ tFreq = remF / nPart;
+ ge = gs - 1;
+ aFreq = 0;
+ while (aFreq < tFreq && ge < alphaSize - 1)
+ {
+ ge++;
+ aFreq += mtfFreq[ge];
+ }
+
+ if (ge > gs && nPart != nGroups && nPart != 1
+ && ((nGroups - nPart) % 2 == 1))
+ {
+ aFreq -= mtfFreq[ge];
+ ge--;
+ }
+
+ for (v = 0; v < alphaSize; v++)
+ {
+ if (v >= gs && v <= ge)
+ {
+ len[nPart - 1][v] = (char)LESSER_ICOST;
+ }
+ else
+ {
+ len[nPart - 1][v] = (char)GREATER_ICOST;
+ }
+ }
+
+ nPart--;
+ gs = ge + 1;
+ remF -= aFreq;
+ }
+ }
+
+ int[][] rfreq = CBZip2InputStream.InitIntArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE);
+ int[] fave = new int[BZip2Constants.N_GROUPS];
+ short[] cost = new short[BZip2Constants.N_GROUPS];
+ /*
+ Iterate up to N_ITERS times to improve the tables.
+ */
+ for (iter = 0; iter < BZip2Constants.N_ITERS; iter++)
+ {
+ for (t = 0; t < nGroups; t++)
+ {
+ fave[t] = 0;
+ }
+
+ for (t = 0; t < nGroups; t++)
+ {
+ for (v = 0; v < alphaSize; v++)
+ {
+ rfreq[t][v] = 0;
+ }
+ }
+
+ nSelectors = 0;
+ totc = 0;
+ gs = 0;
+ while (true)
+ {
+
+ /* Set group start & end marks. */
+ if (gs >= nMTF)
+ {
+ break;
+ }
+ ge = gs + BZip2Constants.G_SIZE - 1;
+ if (ge >= nMTF)
+ {
+ ge = nMTF - 1;
+ }
+
+ /*
+ Calculate the cost of this group as coded
+ by each of the coding tables.
+ */
+ for (t = 0; t < nGroups; t++)
+ {
+ cost[t] = 0;
+ }
+
+ if (nGroups == 6)
+ {
+ short cost0, cost1, cost2, cost3, cost4, cost5;
+ cost0 = cost1 = cost2 = cost3 = cost4 = cost5 = 0;
+ for (i = gs; i <= ge; i++)
+ {
+ short icv = szptr[i];
+ cost0 += (short)len[0][icv];
+ cost1 += (short)len[1][icv];
+ cost2 += (short)len[2][icv];
+ cost3 += (short)len[3][icv];
+ cost4 += (short)len[4][icv];
+ cost5 += (short)len[5][icv];
+ }
+ cost[0] = cost0;
+ cost[1] = cost1;
+ cost[2] = cost2;
+ cost[3] = cost3;
+ cost[4] = cost4;
+ cost[5] = cost5;
+ }
+ else
+ {
+ for (i = gs; i <= ge; i++)
+ {
+ short icv = szptr[i];
+ for (t = 0; t < nGroups; t++)
+ {
+ cost[t] += (short)len[t][icv];
+ }
+ }
+ }
+
+ /*
+ Find the coding table which is best for this group,
+ and record its identity in the selector table.
+ */
+ bc = 999999999;
+ bt = -1;
+ for (t = 0; t < nGroups; t++)
+ {
+ if (cost[t] < bc)
+ {
+ bc = cost[t];
+ bt = t;
+ }
+ };
+ totc += bc;
+ fave[bt]++;
+ selector[nSelectors] = (char)bt;
+ nSelectors++;
+
+ /*
+ Increment the symbol frequencies for the selected table.
+ */
+ for (i = gs; i <= ge; i++)
+ {
+ rfreq[bt][szptr[i]]++;
+ }
+
+ gs = ge + 1;
+ }
+
+ /*
+ Recompute the tables based on the accumulated frequencies.
+ */
+ for (t = 0; t < nGroups; t++)
+ {
+ HbMakeCodeLengths(len[t], rfreq[t], alphaSize, 20);
+ }
+ }
+
+ rfreq = null;
+ fave = null;
+ cost = null;
+
+ if (!(nGroups < 8))
+ {
+ Panic();
+ }
+ if (!(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZip2Constants.G_SIZE))))
+ {
+ Panic();
+ }
+
+
+ /* Compute MTF values for the selectors. */
+ {
+ char[] pos = new char[BZip2Constants.N_GROUPS];
+ char ll_i, tmp2, tmp;
+ for (i = 0; i < nGroups; i++)
+ {
+ pos[i] = (char)i;
+ }
+ for (i = 0; i < nSelectors; i++)
+ {
+ ll_i = selector[i];
+ j = 0;
+ tmp = pos[j];
+ while (ll_i != tmp)
+ {
+ j++;
+ tmp2 = tmp;
+ tmp = pos[j];
+ pos[j] = tmp2;
+ }
+ pos[0] = tmp;
+ selectorMtf[i] = (char)j;
+ }
+ }
+
+ int[][] code = CBZip2InputStream.InitIntArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE);
+
+ /* Assign actual codes for the tables. */
+ for (t = 0; t < nGroups; t++)
+ {
+ minLen = 32;
+ maxLen = 0;
+ for (i = 0; i < alphaSize; i++)
+ {
+ if (len[t][i] > maxLen)
+ {
+ maxLen = len[t][i];
+ }
+ if (len[t][i] < minLen)
+ {
+ minLen = len[t][i];
+ }
+ }
+ if (maxLen > 20)
+ {
+ Panic();
+ }
+ if (minLen < 1)
+ {
+ Panic();
+ }
+ HbAssignCodes(code[t], len[t], minLen, maxLen, alphaSize);
+ }
+
+ /* Transmit the mapping table. */
+ {
+ bool[] inUse16 = new bool[16];
+ for (i = 0; i < 16; i++)
+ {
+ inUse16[i] = false;
+ for (j = 0; j < 16; j++)
+ {
+ if (inUse[i * 16 + j])
+ {
+ inUse16[i] = true;
+ }
+ }
+ }
+
+ //nBytes = bytesOut;
+ for (i = 0; i < 16; i++)
+ {
+ if (inUse16[i])
+ {
+ BsW(1, 1);
+ }
+ else
+ {
+ BsW(1, 0);
+ }
+ }
+
+ for (i = 0; i < 16; i++)
+ {
+ if (inUse16[i])
+ {
+ for (j = 0; j < 16; j++)
+ {
+ if (inUse[i * 16 + j])
+ {
+ BsW(1, 1);
+ }
+ else
+ {
+ BsW(1, 0);
+ }
+ }
+ }
+ }
+
+ }
+
+ /* Now the selectors. */
+ //nBytes = bytesOut;
+ BsW(3, nGroups);
+ BsW(15, nSelectors);
+ for (i = 0; i < nSelectors; i++)
+ {
+ for (j = 0; j < selectorMtf[i]; j++)
+ {
+ BsW(1, 1);
+ }
+ BsW(1, 0);
+ }
+
+ /* Now the coding tables. */
+ //nBytes = bytesOut;
+
+ for (t = 0; t < nGroups; t++)
+ {
+ int curr = len[t][0];
+ BsW(5, curr);
+ for (i = 0; i < alphaSize; i++)
+ {
+ while (curr < len[t][i])
+ {
+ BsW(2, 2);
+ curr++; /* 10 */
+ }
+ while (curr > len[t][i])
+ {
+ BsW(2, 3);
+ curr--; /* 11 */
+ }
+ BsW(1, 0);
+ }
+ }
+
+ /* And finally, the block data proper */
+ //nBytes = bytesOut;
+ selCtr = 0;
+ gs = 0;
+ while (true)
+ {
+ if (gs >= nMTF)
+ {
+ break;
+ }
+ ge = gs + BZip2Constants.G_SIZE - 1;
+ if (ge >= nMTF)
+ {
+ ge = nMTF - 1;
+ }
+ for (i = gs; i <= ge; i++)
+ {
+ BsW(len[selector[selCtr]][szptr[i]],
+ code[selector[selCtr]][szptr[i]]);
+ }
+
+ gs = ge + 1;
+ selCtr++;
+ }
+ if (!(selCtr == nSelectors))
+ {
+ Panic();
+ }
+ }
+
+ private void MoveToFrontCodeAndSend()
+ {
+ BsPutIntVS(24, origPtr);
+ GenerateMTFValues();
+ SendMTFValues();
+ }
+
+ private Stream bsStream;
+ private bool leaveOpen;
+
+ private void SimpleSort(int lo, int hi, int d)
+ {
+ int i, j, h, bigN, hp;
+ int v;
+
+ bigN = hi - lo + 1;
+ if (bigN < 2)
+ {
+ return;
+ }
+
+ hp = 0;
+ while (incs[hp] < bigN)
+ {
+ hp++;
+ }
+ hp--;
+
+ for (; hp >= 0; hp--)
+ {
+ h = incs[hp];
+
+ i = lo + h;
+ while (true)
+ {
+ /* copy 1 */
+ if (i > hi)
+ {
+ break;
+ }
+ v = zptr[i];
+ j = i;
+ while (FullGtU(zptr[j - h] + d, v + d))
+ {
+ zptr[j] = zptr[j - h];
+ j = j - h;
+ if (j <= (lo + h - 1))
+ {
+ break;
+ }
+ }
+ zptr[j] = v;
+ i++;
+
+ /* copy 2 */
+ if (i > hi)
+ {
+ break;
+ }
+ v = zptr[i];
+ j = i;
+ while (FullGtU(zptr[j - h] + d, v + d))
+ {
+ zptr[j] = zptr[j - h];
+ j = j - h;
+ if (j <= (lo + h - 1))
+ {
+ break;
+ }
+ }
+ zptr[j] = v;
+ i++;
+
+ /* copy 3 */
+ if (i > hi)
+ {
+ break;
+ }
+ v = zptr[i];
+ j = i;
+ while (FullGtU(zptr[j - h] + d, v + d))
+ {
+ zptr[j] = zptr[j - h];
+ j = j - h;
+ if (j <= (lo + h - 1))
+ {
+ break;
+ }
+ }
+ zptr[j] = v;
+ i++;
+
+ if (workDone > workLimit && firstAttempt)
+ {
+ return;
+ }
+ }
+ }
+ }
+
+ private void Vswap(int p1, int p2, int n)
+ {
+ int temp = 0;
+ while (n > 0)
+ {
+ temp = zptr[p1];
+ zptr[p1] = zptr[p2];
+ zptr[p2] = temp;
+ p1++;
+ p2++;
+ n--;
+ }
+ }
+
+ private char Med3(char a, char b, char c)
+ {
+ char t;
+ if (a > b)
+ {
+ t = a;
+ a = b;
+ b = t;
+ }
+ if (b > c)
+ {
+ t = b;
+ b = c;
+ c = t;
+ }
+ if (a > b)
+ {
+ b = a;
+ }
+ return b;
+ }
+
+ internal class StackElem
+ {
+ internal int ll;
+ internal int hh;
+ internal int dd;
+ }
+
+ private void QSort3(int loSt, int hiSt, int dSt)
+ {
+ int unLo, unHi, ltLo, gtHi, med, n, m;
+ int sp, lo, hi, d;
+ StackElem[] stack = new StackElem[QSORT_STACK_SIZE];
+ for (int count = 0; count < QSORT_STACK_SIZE; count++)
+ {
+ stack[count] = new StackElem();
+ }
+
+ sp = 0;
+
+ stack[sp].ll = loSt;
+ stack[sp].hh = hiSt;
+ stack[sp].dd = dSt;
+ sp++;
+
+ while (sp > 0)
+ {
+ if (sp >= QSORT_STACK_SIZE)
+ {
+ Panic();
+ }
+
+ sp--;
+ lo = stack[sp].ll;
+ hi = stack[sp].hh;
+ d = stack[sp].dd;
+
+ if (hi - lo < SMALL_THRESH || d > DEPTH_THRESH)
+ {
+ SimpleSort(lo, hi, d);
+ if (workDone > workLimit && firstAttempt)
+ {
+ return;
+ }
+ continue;
+ }
+
+ med = Med3(block[zptr[lo] + d + 1],
+ block[zptr[hi] + d + 1],
+ block[zptr[(lo + hi) >> 1] + d + 1]);
+
+ unLo = ltLo = lo;
+ unHi = gtHi = hi;
+
+ while (true)
+ {
+ while (true)
+ {
+ if (unLo > unHi)
+ {
+ break;
+ }
+ n = ((int)block[zptr[unLo] + d + 1]) - med;
+ if (n == 0)
+ {
+ int temp = 0;
+ temp = zptr[unLo];
+ zptr[unLo] = zptr[ltLo];
+ zptr[ltLo] = temp;
+ ltLo++;
+ unLo++;
+ continue;
+ };
+ if (n > 0)
+ {
+ break;
+ }
+ unLo++;
+ }
+ while (true)
+ {
+ if (unLo > unHi)
+ {
+ break;
+ }
+ n = ((int)block[zptr[unHi] + d + 1]) - med;
+ if (n == 0)
+ {
+ int temp = 0;
+ temp = zptr[unHi];
+ zptr[unHi] = zptr[gtHi];
+ zptr[gtHi] = temp;
+ gtHi--;
+ unHi--;
+ continue;
+ };
+ if (n < 0)
+ {
+ break;
+ }
+ unHi--;
+ }
+ if (unLo > unHi)
+ {
+ break;
+ }
+ int tempx = zptr[unLo];
+ zptr[unLo] = zptr[unHi];
+ zptr[unHi] = tempx;
+ unLo++;
+ unHi--;
+ }
+
+ if (gtHi < ltLo)
+ {
+ stack[sp].ll = lo;
+ stack[sp].hh = hi;
+ stack[sp].dd = d + 1;
+ sp++;
+ continue;
+ }
+
+ n = ((ltLo - lo) < (unLo - ltLo)) ? (ltLo - lo) : (unLo - ltLo);
+ Vswap(lo, unLo - n, n);
+ m = ((hi - gtHi) < (gtHi - unHi)) ? (hi - gtHi) : (gtHi - unHi);
+ Vswap(unLo, hi - m + 1, m);
+
+ n = lo + unLo - ltLo - 1;
+ m = hi - (gtHi - unHi) + 1;
+
+ stack[sp].ll = lo;
+ stack[sp].hh = n;
+ stack[sp].dd = d;
+ sp++;
+
+ stack[sp].ll = n + 1;
+ stack[sp].hh = m - 1;
+ stack[sp].dd = d + 1;
+ sp++;
+
+ stack[sp].ll = m;
+ stack[sp].hh = hi;
+ stack[sp].dd = d;
+ sp++;
+ }
+ }
+
+ private void MainSort()
+ {
+ int i, j, ss, sb;
+ int[] runningOrder = new int[256];
+ int[] copy = new int[256];
+ bool[] bigDone = new bool[256];
+ int c1, c2;
+ int numQSorted;
+
+ /*
+ In the various block-sized structures, live data runs
+ from 0 to last+NUM_OVERSHOOT_BYTES inclusive. First,
+ set up the overshoot area for block.
+ */
+
+ // if (verbosity >= 4) fprintf ( stderr, " sort initialise ...\n" );
+ for (i = 0; i < BZip2Constants.NUM_OVERSHOOT_BYTES; i++)
+ {
+ block[last + i + 2] = block[(i % (last + 1)) + 1];
+ }
+ for (i = 0; i <= last + BZip2Constants.NUM_OVERSHOOT_BYTES; i++)
+ {
+ quadrant[i] = 0;
+ }
+
+ block[0] = (char)(block[last + 1]);
+
+ if (last < 4000)
+ {
+ /*
+ Use SimpleSort(), since the full sorting mechanism
+ has quite a large constant overhead.
+ */
+ for (i = 0; i <= last; i++)
+ {
+ zptr[i] = i;
+ }
+ firstAttempt = false;
+ workDone = workLimit = 0;
+ SimpleSort(0, last, 0);
+ }
+ else
+ {
+ numQSorted = 0;
+ for (i = 0; i <= 255; i++)
+ {
+ bigDone[i] = false;
+ }
+
+ for (i = 0; i <= 65536; i++)
+ {
+ ftab[i] = 0;
+ }
+
+ c1 = block[0];
+ for (i = 0; i <= last; i++)
+ {
+ c2 = block[i + 1];
+ ftab[(c1 << 8) + c2]++;
+ c1 = c2;
+ }
+
+ for (i = 1; i <= 65536; i++)
+ {
+ ftab[i] += ftab[i - 1];
+ }
+
+ c1 = block[1];
+ for (i = 0; i < last; i++)
+ {
+ c2 = block[i + 2];
+ j = (c1 << 8) + c2;
+ c1 = c2;
+ ftab[j]--;
+ zptr[ftab[j]] = i;
+ }
+
+ j = ((block[last + 1]) << 8) + (block[1]);
+ ftab[j]--;
+ zptr[ftab[j]] = last;
+
+ /*
+ Now ftab contains the first loc of every small bucket.
+ Calculate the running order, from smallest to largest
+ big bucket.
+ */
+
+ for (i = 0; i <= 255; i++)
+ {
+ runningOrder[i] = i;
+ }
+
+ {
+ int vv;
+ int h = 1;
+ do
+ {
+ h = 3 * h + 1;
+ }
+ while (h <= 256);
+ do
+ {
+ h = h / 3;
+ for (i = h; i <= 255; i++)
+ {
+ vv = runningOrder[i];
+ j = i;
+ while ((ftab[((runningOrder[j - h]) + 1) << 8]
+ - ftab[(runningOrder[j - h]) << 8]) >
+ (ftab[((vv) + 1) << 8] - ftab[(vv) << 8]))
+ {
+ runningOrder[j] = runningOrder[j - h];
+ j = j - h;
+ if (j <= (h - 1))
+ {
+ break;
+ }
+ }
+ runningOrder[j] = vv;
+ }
+ } while (h != 1);
+ }
+
+ /*
+ The main sorting loop.
+ */
+ for (i = 0; i <= 255; i++)
+ {
+
+ /*
+ Process big buckets, starting with the least full.
+ */
+ ss = runningOrder[i];
+
+ /*
+ Complete the big bucket [ss] by quicksorting
+ any unsorted small buckets [ss, j]. Hopefully
+ previous pointer-scanning phases have already
+ completed many of the small buckets [ss, j], so
+ we don't have to sort them at all.
+ */
+ for (j = 0; j <= 255; j++)
+ {
+ sb = (ss << 8) + j;
+ if (!((ftab[sb] & SETMASK) == SETMASK))
+ {
+ int lo = ftab[sb] & CLEARMASK;
+ int hi = (ftab[sb + 1] & CLEARMASK) - 1;
+ if (hi > lo)
+ {
+ QSort3(lo, hi, 2);
+ numQSorted += (hi - lo + 1);
+ if (workDone > workLimit && firstAttempt)
+ {
+ return;
+ }
+ }
+ ftab[sb] |= SETMASK;
+ }
+ }
+
+ /*
+ The ss big bucket is now done. Record this fact,
+ and update the quadrant descriptors. Remember to
+ update quadrants in the overshoot area too, if
+ necessary. The "if (i < 255)" test merely skips
+ this updating for the last bucket processed, since
+ updating for the last bucket is pointless.
+ */
+ bigDone[ss] = true;
+
+ if (i < 255)
+ {
+ int bbStart = ftab[ss << 8] & CLEARMASK;
+ int bbSize = (ftab[(ss + 1) << 8] & CLEARMASK) - bbStart;
+ int shifts = 0;
+
+ while ((bbSize >> shifts) > 65534)
+ {
+ shifts++;
+ }
+
+ for (j = 0; j < bbSize; j++)
+ {
+ int a2update = zptr[bbStart + j];
+ int qVal = (j >> shifts);
+ quadrant[a2update] = qVal;
+ if (a2update < BZip2Constants.NUM_OVERSHOOT_BYTES)
+ {
+ quadrant[a2update + last + 1] = qVal;
+ }
+ }
+
+ if (!(((bbSize - 1) >> shifts) <= 65535))
+ {
+ Panic();
+ }
+ }
+
+ /*
+ Now scan this big bucket so as to synthesise the
+ sorted order for small buckets [t, ss] for all t != ss.
+ */
+ for (j = 0; j <= 255; j++)
+ {
+ copy[j] = ftab[(j << 8) + ss] & CLEARMASK;
+ }
+
+ for (j = ftab[ss << 8] & CLEARMASK;
+ j < (ftab[(ss + 1) << 8] & CLEARMASK); j++)
+ {
+ c1 = block[zptr[j]];
+ if (!bigDone[c1])
+ {
+ zptr[copy[c1]] = zptr[j] == 0 ? last : zptr[j] - 1;
+ copy[c1]++;
+ }
+ }
+
+ for (j = 0; j <= 255; j++)
+ {
+ ftab[(j << 8) + ss] |= SETMASK;
+ }
+ }
+ }
+ }
+
+ private void RandomiseBlock()
+ {
+ int i;
+ int rNToGo = 0;
+ int rTPos = 0;
+ for (i = 0; i < 256; i++)
+ {
+ inUse[i] = false;
+ }
+
+ for (i = 0; i <= last; i++)
+ {
+ if (rNToGo == 0)
+ {
+ rNToGo = (char)BZip2Constants.rNums[rTPos];
+ rTPos++;
+ if (rTPos == 512)
+ {
+ rTPos = 0;
+ }
+ }
+ rNToGo--;
+ block[i + 1] ^= (char)((rNToGo == 1) ? 1 : 0);
+ // handle 16 bit signed numbers
+ block[i + 1] &= (char)0xFF;
+
+ inUse[block[i + 1]] = true;
+ }
+ }
+
+ private void DoReversibleTransformation()
+ {
+ int i;
+
+ workLimit = workFactor * last;
+ workDone = 0;
+ blockRandomised = false;
+ firstAttempt = true;
+
+ MainSort();
+
+ if (workDone > workLimit && firstAttempt)
+ {
+ RandomiseBlock();
+ workLimit = workDone = 0;
+ blockRandomised = true;
+ firstAttempt = false;
+ MainSort();
+ }
+
+ origPtr = -1;
+ for (i = 0; i <= last; i++)
+ {
+ if (zptr[i] == 0)
+ {
+ origPtr = i;
+ break;
+ }
+ };
+
+ if (origPtr == -1)
+ {
+ Panic();
+ }
+ }
+
+ private bool FullGtU(int i1, int i2)
+ {
+ int k;
+ char c1, c2;
+ int s1, s2;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2)
+ {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2)
+ {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2)
+ {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2)
+ {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2)
+ {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2)
+ {
+ return (c1 > c2);
+ }
+ i1++;
+ i2++;
+
+ k = last + 1;
+
+ do
+ {
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2)
+ {
+ return (c1 > c2);
+ }
+ s1 = quadrant[i1];
+ s2 = quadrant[i2];
+ if (s1 != s2)
+ {
+ return (s1 > s2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2)
+ {
+ return (c1 > c2);
+ }
+ s1 = quadrant[i1];
+ s2 = quadrant[i2];
+ if (s1 != s2)
+ {
+ return (s1 > s2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2)
+ {
+ return (c1 > c2);
+ }
+ s1 = quadrant[i1];
+ s2 = quadrant[i2];
+ if (s1 != s2)
+ {
+ return (s1 > s2);
+ }
+ i1++;
+ i2++;
+
+ c1 = block[i1 + 1];
+ c2 = block[i2 + 1];
+ if (c1 != c2)
+ {
+ return (c1 > c2);
+ }
+ s1 = quadrant[i1];
+ s2 = quadrant[i2];
+ if (s1 != s2)
+ {
+ return (s1 > s2);
+ }
+ i1++;
+ i2++;
+
+ if (i1 > last)
+ {
+ i1 -= last;
+ i1--;
+ };
+ if (i2 > last)
+ {
+ i2 -= last;
+ i2--;
+ };
+
+ k -= 4;
+ workDone++;
+ } while (k >= 0);
+
+ return false;
+ }
+
+ /*
+ Knuth's increments seem to work better
+ than Incerpi-Sedgewick here. Possibly
+ because the number of elems to sort is
+ usually small, typically <= 20.
+ */
+ private int[] incs = { 1, 4, 13, 40, 121, 364, 1093, 3280,
+ 9841, 29524, 88573, 265720,
+ 797161, 2391484 };
+
+ private void AllocateCompressStructures()
+ {
+ int n = BZip2Constants.baseBlockSize * blockSize100k;
+ block = new char[(n + 1 + BZip2Constants.NUM_OVERSHOOT_BYTES)];
+ quadrant = new int[(n + BZip2Constants.NUM_OVERSHOOT_BYTES)];
+ zptr = new int[n];
+ ftab = new int[65537];
+
+ if (block == null || quadrant == null || zptr == null
+ || ftab == null)
+ {
+ //int totalDraw = (n + 1 + NUM_OVERSHOOT_BYTES) + (n + NUM_OVERSHOOT_BYTES) + n + 65537;
+ //compressOutOfMemory ( totalDraw, n );
+ }
+
+ /*
+ The back end needs a place to store the MTF values
+ whilst it calculates the coding tables. We could
+ put them in the zptr array. However, these values
+ will fit in a short, so we overlay szptr at the
+ start of zptr, in the hope of reducing the number
+ of cache misses induced by the multiple traversals
+ of the MTF values when calculating coding tables.
+ Seems to improve compression speed by about 1%.
+ */
+ // szptr = zptr;
+
+
+ szptr = new short[2 * n];
+ }
+
+ private void GenerateMTFValues()
+ {
+ char[] yy = new char[256];
+ int i, j;
+ char tmp;
+ char tmp2;
+ int zPend;
+ int wr;
+ int EOB;
+
+ MakeMaps();
+ EOB = nInUse + 1;
+
+ for (i = 0; i <= EOB; i++)
+ {
+ mtfFreq[i] = 0;
+ }
+
+ wr = 0;
+ zPend = 0;
+ for (i = 0; i < nInUse; i++)
+ {
+ yy[i] = (char)i;
+ }
+
+
+ for (i = 0; i <= last; i++)
+ {
+ char ll_i;
+
+ ll_i = unseqToSeq[block[zptr[i]]];
+
+ j = 0;
+ tmp = yy[j];
+ while (ll_i != tmp)
+ {
+ j++;
+ tmp2 = tmp;
+ tmp = yy[j];
+ yy[j] = tmp2;
+ };
+ yy[0] = tmp;
+
+ if (j == 0)
+ {
+ zPend++;
+ }
+ else
+ {
+ if (zPend > 0)
+ {
+ zPend--;
+ while (true)
+ {
+ switch (zPend % 2)
+ {
+ case 0:
+ szptr[wr] = (short)BZip2Constants.RUNA;
+ wr++;
+ mtfFreq[BZip2Constants.RUNA]++;
+ break;
+ case 1:
+ szptr[wr] = (short)BZip2Constants.RUNB;
+ wr++;
+ mtfFreq[BZip2Constants.RUNB]++;
+ break;
+ };
+ if (zPend < 2)
+ {
+ break;
+ }
+ zPend = (zPend - 2) / 2;
+ };
+ zPend = 0;
+ }
+ szptr[wr] = (short)(j + 1);
+ wr++;
+ mtfFreq[j + 1]++;
+ }
+ }
+
+ if (zPend > 0)
+ {
+ zPend--;
+ while (true)
+ {
+ switch (zPend % 2)
+ {
+ case 0:
+ szptr[wr] = (short)BZip2Constants.RUNA;
+ wr++;
+ mtfFreq[BZip2Constants.RUNA]++;
+ break;
+ case 1:
+ szptr[wr] = (short)BZip2Constants.RUNB;
+ wr++;
+ mtfFreq[BZip2Constants.RUNB]++;
+ break;
+ }
+ if (zPend < 2)
+ {
+ break;
+ }
+ zPend = (zPend - 2) / 2;
+ }
+ }
+
+ szptr[wr] = (short)EOB;
+ wr++;
+ mtfFreq[EOB]++;
+
+ nMTF = wr;
+ }
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ return 0;
+ }
+
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ return 0;
+ }
+
+ public override void SetLength(long value)
+ {
+ }
+
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ for (int k = 0; k < count; ++k)
+ {
+ WriteByte(buffer[k + offset]);
+ }
+ }
+
+ public override bool CanRead
+ {
+ get
+ {
+ return false;
+ }
+ }
+
+ public override bool CanSeek
+ {
+ get
+ {
+ return false;
+ }
+ }
+
+ public override bool CanWrite
+ {
+ get
+ {
+ return true;
+ }
+ }
+
+ public override long Length
+ {
+ get
+ {
+ return 0;
+ }
+ }
+
+ public override long Position
+ {
+ get
+ {
+ return 0;
+ }
+ set
+ {
+ }
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/CRC.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/CRC.cs
new file mode 100644
index 00000000..e6457051
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/BZip2/CRC.cs
@@ -0,0 +1,138 @@
+
+/*
+ * Copyright 2001,2004-2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This package is based on the work done by Keiron Liddle), Aftex Software
+ * to whom the Ant project is very grateful for his
+ * great code.
+ */
+
+namespace Compress.SevenZip.Compress.BZip2
+{
+ /**
+ * A simple class the hold and calculate the CRC for sanity checking
+ * of the data.
+ *
+ * @author Keiron Liddle
+ */
+ internal class CRC
+ {
+ private static readonly int[] crc32Table = {
+ unchecked((int)0x00000000), unchecked((int)0x04c11db7), unchecked((int)0x09823b6e), unchecked((int)0x0d4326d9),
+ unchecked((int)0x130476dc), unchecked((int)0x17c56b6b), unchecked((int)0x1a864db2), unchecked((int)0x1e475005),
+ unchecked((int)0x2608edb8), unchecked((int)0x22c9f00f), unchecked((int)0x2f8ad6d6), unchecked((int)0x2b4bcb61),
+ unchecked((int)0x350c9b64), unchecked((int)0x31cd86d3), unchecked((int)0x3c8ea00a), unchecked((int)0x384fbdbd),
+ unchecked((int)0x4c11db70), unchecked((int)0x48d0c6c7), unchecked((int)0x4593e01e), unchecked((int)0x4152fda9),
+ unchecked((int)0x5f15adac), unchecked((int)0x5bd4b01b), unchecked((int)0x569796c2), unchecked((int)0x52568b75),
+ unchecked((int)0x6a1936c8), unchecked((int)0x6ed82b7f), unchecked((int)0x639b0da6), unchecked((int)0x675a1011),
+ unchecked((int)0x791d4014), unchecked((int)0x7ddc5da3), unchecked((int)0x709f7b7a), unchecked((int)0x745e66cd),
+ unchecked((int)0x9823b6e0), unchecked((int)0x9ce2ab57), unchecked((int)0x91a18d8e), unchecked((int)0x95609039),
+ unchecked((int)0x8b27c03c), unchecked((int)0x8fe6dd8b), unchecked((int)0x82a5fb52), unchecked((int)0x8664e6e5),
+ unchecked((int)0xbe2b5b58), unchecked((int)0xbaea46ef), unchecked((int)0xb7a96036), unchecked((int)0xb3687d81),
+ unchecked((int)0xad2f2d84), unchecked((int)0xa9ee3033), unchecked((int)0xa4ad16ea), unchecked((int)0xa06c0b5d),
+ unchecked((int)0xd4326d90), unchecked((int)0xd0f37027), unchecked((int)0xddb056fe), unchecked((int)0xd9714b49),
+ unchecked((int)0xc7361b4c), unchecked((int)0xc3f706fb), unchecked((int)0xceb42022), unchecked((int)0xca753d95),
+ unchecked((int)0xf23a8028), unchecked((int)0xf6fb9d9f), unchecked((int)0xfbb8bb46), unchecked((int)0xff79a6f1),
+ unchecked((int)0xe13ef6f4), unchecked((int)0xe5ffeb43), unchecked((int)0xe8bccd9a), unchecked((int)0xec7dd02d),
+ unchecked((int)0x34867077), unchecked((int)0x30476dc0), unchecked((int)0x3d044b19), unchecked((int)0x39c556ae),
+ unchecked((int)0x278206ab), unchecked((int)0x23431b1c), unchecked((int)0x2e003dc5), unchecked((int)0x2ac12072),
+ unchecked((int)0x128e9dcf), unchecked((int)0x164f8078), unchecked((int)0x1b0ca6a1), unchecked((int)0x1fcdbb16),
+ unchecked((int)0x018aeb13), unchecked((int)0x054bf6a4), unchecked((int)0x0808d07d), unchecked((int)0x0cc9cdca),
+ unchecked((int)0x7897ab07), unchecked((int)0x7c56b6b0), unchecked((int)0x71159069), unchecked((int)0x75d48dde),
+ unchecked((int)0x6b93dddb), unchecked((int)0x6f52c06c), unchecked((int)0x6211e6b5), unchecked((int)0x66d0fb02),
+ unchecked((int)0x5e9f46bf), unchecked((int)0x5a5e5b08), unchecked((int)0x571d7dd1), unchecked((int)0x53dc6066),
+ unchecked((int)0x4d9b3063), unchecked((int)0x495a2dd4), unchecked((int)0x44190b0d), unchecked((int)0x40d816ba),
+ unchecked((int)0xaca5c697), unchecked((int)0xa864db20), unchecked((int)0xa527fdf9), unchecked((int)0xa1e6e04e),
+ unchecked((int)0xbfa1b04b), unchecked((int)0xbb60adfc), unchecked((int)0xb6238b25), unchecked((int)0xb2e29692),
+ unchecked((int)0x8aad2b2f), unchecked((int)0x8e6c3698), unchecked((int)0x832f1041), unchecked((int)0x87ee0df6),
+ unchecked((int)0x99a95df3), unchecked((int)0x9d684044), unchecked((int)0x902b669d), unchecked((int)0x94ea7b2a),
+ unchecked((int)0xe0b41de7), unchecked((int)0xe4750050), unchecked((int)0xe9362689), unchecked((int)0xedf73b3e),
+ unchecked((int)0xf3b06b3b), unchecked((int)0xf771768c), unchecked((int)0xfa325055), unchecked((int)0xfef34de2),
+ unchecked((int)0xc6bcf05f), unchecked((int)0xc27dede8), unchecked((int)0xcf3ecb31), unchecked((int)0xcbffd686),
+ unchecked((int)0xd5b88683), unchecked((int)0xd1799b34), unchecked((int)0xdc3abded), unchecked((int)0xd8fba05a),
+ unchecked((int)0x690ce0ee), unchecked((int)0x6dcdfd59), unchecked((int)0x608edb80), unchecked((int)0x644fc637),
+ unchecked((int)0x7a089632), unchecked((int)0x7ec98b85), unchecked((int)0x738aad5c), unchecked((int)0x774bb0eb),
+ unchecked((int)0x4f040d56), unchecked((int)0x4bc510e1), unchecked((int)0x46863638), unchecked((int)0x42472b8f),
+ unchecked((int)0x5c007b8a), unchecked((int)0x58c1663d), unchecked((int)0x558240e4), unchecked((int)0x51435d53),
+ unchecked((int)0x251d3b9e), unchecked((int)0x21dc2629), unchecked((int)0x2c9f00f0), unchecked((int)0x285e1d47),
+ unchecked((int)0x36194d42), unchecked((int)0x32d850f5), unchecked((int)0x3f9b762c), unchecked((int)0x3b5a6b9b),
+ unchecked((int)0x0315d626), unchecked((int)0x07d4cb91), unchecked((int)0x0a97ed48), unchecked((int)0x0e56f0ff),
+ unchecked((int)0x1011a0fa), unchecked((int)0x14d0bd4d), unchecked((int)0x19939b94), unchecked((int)0x1d528623),
+ unchecked((int)0xf12f560e), unchecked((int)0xf5ee4bb9), unchecked((int)0xf8ad6d60), unchecked((int)0xfc6c70d7),
+ unchecked((int)0xe22b20d2), unchecked((int)0xe6ea3d65), unchecked((int)0xeba91bbc), unchecked((int)0xef68060b),
+ unchecked((int)0xd727bbb6), unchecked((int)0xd3e6a601), unchecked((int)0xdea580d8), unchecked((int)0xda649d6f),
+ unchecked((int)0xc423cd6a), unchecked((int)0xc0e2d0dd), unchecked((int)0xcda1f604), unchecked((int)0xc960ebb3),
+ unchecked((int)0xbd3e8d7e), unchecked((int)0xb9ff90c9), unchecked((int)0xb4bcb610), unchecked((int)0xb07daba7),
+ unchecked((int)0xae3afba2), unchecked((int)0xaafbe615), unchecked((int)0xa7b8c0cc), unchecked((int)0xa379dd7b),
+ unchecked((int)0x9b3660c6), unchecked((int)0x9ff77d71), unchecked((int)0x92b45ba8), unchecked((int)0x9675461f),
+ unchecked((int)0x8832161a), unchecked((int)0x8cf30bad), unchecked((int)0x81b02d74), unchecked((int)0x857130c3),
+ unchecked((int)0x5d8a9099), unchecked((int)0x594b8d2e), unchecked((int)0x5408abf7), unchecked((int)0x50c9b640),
+ unchecked((int)0x4e8ee645), unchecked((int)0x4a4ffbf2), unchecked((int)0x470cdd2b), unchecked((int)0x43cdc09c),
+ unchecked((int)0x7b827d21), unchecked((int)0x7f436096), unchecked((int)0x7200464f), unchecked((int)0x76c15bf8),
+ unchecked((int)0x68860bfd), unchecked((int)0x6c47164a), unchecked((int)0x61043093), unchecked((int)0x65c52d24),
+ unchecked((int)0x119b4be9), unchecked((int)0x155a565e), unchecked((int)0x18197087), unchecked((int)0x1cd86d30),
+ unchecked((int)0x029f3d35), unchecked((int)0x065e2082), unchecked((int)0x0b1d065b), unchecked((int)0x0fdc1bec),
+ unchecked((int)0x3793a651), unchecked((int)0x3352bbe6), unchecked((int)0x3e119d3f), unchecked((int)0x3ad08088),
+ unchecked((int)0x2497d08d), unchecked((int)0x2056cd3a), unchecked((int)0x2d15ebe3), unchecked((int)0x29d4f654),
+ unchecked((int)0xc5a92679), unchecked((int)0xc1683bce), unchecked((int)0xcc2b1d17), unchecked((int)0xc8ea00a0),
+ unchecked((int)0xd6ad50a5), unchecked((int)0xd26c4d12), unchecked((int)0xdf2f6bcb), unchecked((int)0xdbee767c),
+ unchecked((int)0xe3a1cbc1), unchecked((int)0xe760d676), unchecked((int)0xea23f0af), unchecked((int)0xeee2ed18),
+ unchecked((int)0xf0a5bd1d), unchecked((int)0xf464a0aa), unchecked((int)0xf9278673), unchecked((int)0xfde69bc4),
+ unchecked((int)0x89b8fd09), unchecked((int)0x8d79e0be), unchecked((int)0x803ac667), unchecked((int)0x84fbdbd0),
+ unchecked((int)0x9abc8bd5), unchecked((int)0x9e7d9662), unchecked((int)0x933eb0bb), unchecked((int)0x97ffad0c),
+ unchecked((int)0xafb010b1), unchecked((int)0xab710d06), unchecked((int)0xa6322bdf), unchecked((int)0xa2f33668),
+ unchecked((int)0xbcb4666d), unchecked((int)0xb8757bda), unchecked((int)0xb5365d03), unchecked((int)0xb1f740b4)
+ };
+
+ public CRC()
+ {
+ InitialiseCRC();
+ }
+
+ internal void InitialiseCRC()
+ {
+ globalCrc = unchecked((int)0xffffffff);
+ }
+
+ internal int GetFinalCRC()
+ {
+ return ~globalCrc;
+ }
+
+ internal int GetGlobalCRC()
+ {
+ return globalCrc;
+ }
+
+ internal void SetGlobalCRC(int newCrc)
+ {
+ globalCrc = newCrc;
+ }
+
+ internal void UpdateCRC(int inCh)
+ {
+ int temp = (globalCrc >> 24) ^ inCh;
+ if (temp < 0)
+ {
+ temp = 256 + temp;
+ }
+ globalCrc = (globalCrc << 8) ^ CRC.crc32Table[temp];
+ }
+
+ internal int globalCrc;
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/LZ/LzBinTree.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/LZ/LzBinTree.cs
new file mode 100644
index 00000000..4ce018eb
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/LZ/LzBinTree.cs
@@ -0,0 +1,366 @@
+using System;
+using Compress.SevenZip.Common;
+
+namespace Compress.SevenZip.Compress.LZ
+{
+ internal class BinTree : InWindow
+ {
+ UInt32 _cyclicBufferPos;
+ UInt32 _cyclicBufferSize = 0;
+ UInt32 _matchMaxLen;
+
+ UInt32[] _son;
+ UInt32[] _hash;
+
+ UInt32 _cutValue = 0xFF;
+ UInt32 _hashMask;
+ UInt32 _hashSizeSum = 0;
+
+ bool HASH_ARRAY = true;
+
+ const UInt32 kHash2Size = 1 << 10;
+ const UInt32 kHash3Size = 1 << 16;
+ const UInt32 kBT2HashSize = 1 << 16;
+ const UInt32 kStartMaxLen = 1;
+ const UInt32 kHash3Offset = kHash2Size;
+ const UInt32 kEmptyHashValue = 0;
+ const UInt32 kMaxValForNormalize = ((UInt32)1 << 31) - 1;
+
+ UInt32 kNumHashDirectBytes = 0;
+ UInt32 kMinMatchCheck = 4;
+ UInt32 kFixHashSize = kHash2Size + kHash3Size;
+
+ public void SetType(int numHashBytes)
+ {
+ HASH_ARRAY = (numHashBytes > 2);
+ if (HASH_ARRAY)
+ {
+ kNumHashDirectBytes = 0;
+ kMinMatchCheck = 4;
+ kFixHashSize = kHash2Size + kHash3Size;
+ }
+ else
+ {
+ kNumHashDirectBytes = 2;
+ kMinMatchCheck = 2 + 1;
+ kFixHashSize = 0;
+ }
+ }
+
+ public new void SetStream(System.IO.Stream stream) { base.SetStream(stream); }
+ public new void ReleaseStream() { base.ReleaseStream(); }
+
+ public new void Init()
+ {
+ base.Init();
+ for (UInt32 i = 0; i < _hashSizeSum; i++)
+ _hash[i] = kEmptyHashValue;
+ _cyclicBufferPos = 0;
+ ReduceOffsets(-1);
+ }
+
+ public new void MovePos()
+ {
+ if (++_cyclicBufferPos >= _cyclicBufferSize)
+ _cyclicBufferPos = 0;
+ base.MovePos();
+ if (_pos == kMaxValForNormalize)
+ Normalize();
+ }
+
+ public new Byte GetIndexByte(Int32 index) { return base.GetIndexByte(index); }
+
+ public new UInt32 GetMatchLen(Int32 index, UInt32 distance, UInt32 limit)
+ { return base.GetMatchLen(index, distance, limit); }
+
+ public new UInt32 GetNumAvailableBytes() { return base.GetNumAvailableBytes(); }
+
+ public void Create(UInt32 historySize, UInt32 keepAddBufferBefore,
+ UInt32 matchMaxLen, UInt32 keepAddBufferAfter)
+ {
+ if (historySize > kMaxValForNormalize - 256)
+ throw new Exception();
+ _cutValue = 16 + (matchMaxLen >> 1);
+
+ UInt32 windowReservSize = (historySize + keepAddBufferBefore +
+ matchMaxLen + keepAddBufferAfter) / 2 + 256;
+
+ base.Create(historySize + keepAddBufferBefore, matchMaxLen + keepAddBufferAfter, windowReservSize);
+
+ _matchMaxLen = matchMaxLen;
+
+ UInt32 cyclicBufferSize = historySize + 1;
+ if (_cyclicBufferSize != cyclicBufferSize)
+ _son = new UInt32[(_cyclicBufferSize = cyclicBufferSize) * 2];
+
+ UInt32 hs = kBT2HashSize;
+
+ if (HASH_ARRAY)
+ {
+ hs = historySize - 1;
+ hs |= (hs >> 1);
+ hs |= (hs >> 2);
+ hs |= (hs >> 4);
+ hs |= (hs >> 8);
+ hs >>= 1;
+ hs |= 0xFFFF;
+ if (hs > (1 << 24))
+ hs >>= 1;
+ _hashMask = hs;
+ hs++;
+ hs += kFixHashSize;
+ }
+ if (hs != _hashSizeSum)
+ _hash = new UInt32[_hashSizeSum = hs];
+ }
+
+ public UInt32 GetMatches(UInt32[] distances)
+ {
+ UInt32 lenLimit;
+ if (_pos + _matchMaxLen <= _streamPos)
+ lenLimit = _matchMaxLen;
+ else
+ {
+ lenLimit = _streamPos - _pos;
+ if (lenLimit < kMinMatchCheck)
+ {
+ MovePos();
+ return 0;
+ }
+ }
+
+ UInt32 offset = 0;
+ UInt32 matchMinPos = (_pos > _cyclicBufferSize) ? (_pos - _cyclicBufferSize) : 0;
+ UInt32 cur = _bufferOffset + _pos;
+ UInt32 maxLen = kStartMaxLen; // to avoid items for len < hashSize;
+ UInt32 hashValue, hash2Value = 0, hash3Value = 0;
+
+ if (HASH_ARRAY)
+ {
+ UInt32 temp = Utils.CRC.CRC32Lookup[_bufferBase[cur]] ^ _bufferBase[cur + 1];
+ hash2Value = temp & (kHash2Size - 1);
+ temp ^= ((UInt32)(_bufferBase[cur + 2]) << 8);
+ hash3Value = temp & (kHash3Size - 1);
+ hashValue = (temp ^ (Utils.CRC.CRC32Lookup[_bufferBase[cur + 3]] << 5)) & _hashMask;
+ }
+ else
+ hashValue = _bufferBase[cur] ^ ((UInt32)(_bufferBase[cur + 1]) << 8);
+
+ UInt32 curMatch = _hash[kFixHashSize + hashValue];
+ if (HASH_ARRAY)
+ {
+ UInt32 curMatch2 = _hash[hash2Value];
+ UInt32 curMatch3 = _hash[kHash3Offset + hash3Value];
+ _hash[hash2Value] = _pos;
+ _hash[kHash3Offset + hash3Value] = _pos;
+ if (curMatch2 > matchMinPos)
+ if (_bufferBase[_bufferOffset + curMatch2] == _bufferBase[cur])
+ {
+ distances[offset++] = maxLen = 2;
+ distances[offset++] = _pos - curMatch2 - 1;
+ }
+ if (curMatch3 > matchMinPos)
+ if (_bufferBase[_bufferOffset + curMatch3] == _bufferBase[cur])
+ {
+ if (curMatch3 == curMatch2)
+ offset -= 2;
+ distances[offset++] = maxLen = 3;
+ distances[offset++] = _pos - curMatch3 - 1;
+ curMatch2 = curMatch3;
+ }
+ if (offset != 0 && curMatch2 == curMatch)
+ {
+ offset -= 2;
+ maxLen = kStartMaxLen;
+ }
+ }
+
+ _hash[kFixHashSize + hashValue] = _pos;
+
+ UInt32 ptr0 = (_cyclicBufferPos << 1) + 1;
+ UInt32 ptr1 = (_cyclicBufferPos << 1);
+
+ UInt32 len0, len1;
+ len0 = len1 = kNumHashDirectBytes;
+
+ if (kNumHashDirectBytes != 0)
+ {
+ if (curMatch > matchMinPos)
+ {
+ if (_bufferBase[_bufferOffset + curMatch + kNumHashDirectBytes] !=
+ _bufferBase[cur + kNumHashDirectBytes])
+ {
+ distances[offset++] = maxLen = kNumHashDirectBytes;
+ distances[offset++] = _pos - curMatch - 1;
+ }
+ }
+ }
+
+ UInt32 count = _cutValue;
+
+ while (true)
+ {
+ if (curMatch <= matchMinPos || count-- == 0)
+ {
+ _son[ptr0] = _son[ptr1] = kEmptyHashValue;
+ break;
+ }
+ UInt32 delta = _pos - curMatch;
+ UInt32 cyclicPos = ((delta <= _cyclicBufferPos) ?
+ (_cyclicBufferPos - delta) :
+ (_cyclicBufferPos - delta + _cyclicBufferSize)) << 1;
+
+ UInt32 pby1 = _bufferOffset + curMatch;
+ UInt32 len = Math.Min(len0, len1);
+ if (_bufferBase[pby1 + len] == _bufferBase[cur + len])
+ {
+ while (++len != lenLimit)
+ if (_bufferBase[pby1 + len] != _bufferBase[cur + len])
+ break;
+ if (maxLen < len)
+ {
+ distances[offset++] = maxLen = len;
+ distances[offset++] = delta - 1;
+ if (len == lenLimit)
+ {
+ _son[ptr1] = _son[cyclicPos];
+ _son[ptr0] = _son[cyclicPos + 1];
+ break;
+ }
+ }
+ }
+ if (_bufferBase[pby1 + len] < _bufferBase[cur + len])
+ {
+ _son[ptr1] = curMatch;
+ ptr1 = cyclicPos + 1;
+ curMatch = _son[ptr1];
+ len1 = len;
+ }
+ else
+ {
+ _son[ptr0] = curMatch;
+ ptr0 = cyclicPos;
+ curMatch = _son[ptr0];
+ len0 = len;
+ }
+ }
+ MovePos();
+ return offset;
+ }
+
+ public void Skip(UInt32 num)
+ {
+ do
+ {
+ UInt32 lenLimit;
+ if (_pos + _matchMaxLen <= _streamPos)
+ lenLimit = _matchMaxLen;
+ else
+ {
+ lenLimit = _streamPos - _pos;
+ if (lenLimit < kMinMatchCheck)
+ {
+ MovePos();
+ continue;
+ }
+ }
+
+ UInt32 matchMinPos = (_pos > _cyclicBufferSize) ? (_pos - _cyclicBufferSize) : 0;
+ UInt32 cur = _bufferOffset + _pos;
+
+ UInt32 hashValue;
+
+ if (HASH_ARRAY)
+ {
+ UInt32 temp = Utils.CRC.CRC32Lookup[_bufferBase[cur]] ^ _bufferBase[cur + 1];
+ UInt32 hash2Value = temp & (kHash2Size - 1);
+ _hash[hash2Value] = _pos;
+ temp ^= ((UInt32)(_bufferBase[cur + 2]) << 8);
+ UInt32 hash3Value = temp & (kHash3Size - 1);
+ _hash[kHash3Offset + hash3Value] = _pos;
+ hashValue = (temp ^ (Utils.CRC.CRC32Lookup[_bufferBase[cur + 3]] << 5)) & _hashMask;
+ }
+ else
+ hashValue = _bufferBase[cur] ^ ((UInt32)(_bufferBase[cur + 1]) << 8);
+
+ UInt32 curMatch = _hash[kFixHashSize + hashValue];
+ _hash[kFixHashSize + hashValue] = _pos;
+
+ UInt32 ptr0 = (_cyclicBufferPos << 1) + 1;
+ UInt32 ptr1 = (_cyclicBufferPos << 1);
+
+ UInt32 len0, len1;
+ len0 = len1 = kNumHashDirectBytes;
+
+ UInt32 count = _cutValue;
+ while (true)
+ {
+ if (curMatch <= matchMinPos || count-- == 0)
+ {
+ _son[ptr0] = _son[ptr1] = kEmptyHashValue;
+ break;
+ }
+
+ UInt32 delta = _pos - curMatch;
+ UInt32 cyclicPos = ((delta <= _cyclicBufferPos) ?
+ (_cyclicBufferPos - delta) :
+ (_cyclicBufferPos - delta + _cyclicBufferSize)) << 1;
+
+ UInt32 pby1 = _bufferOffset + curMatch;
+ UInt32 len = Math.Min(len0, len1);
+ if (_bufferBase[pby1 + len] == _bufferBase[cur + len])
+ {
+ while (++len != lenLimit)
+ if (_bufferBase[pby1 + len] != _bufferBase[cur + len])
+ break;
+ if (len == lenLimit)
+ {
+ _son[ptr1] = _son[cyclicPos];
+ _son[ptr0] = _son[cyclicPos + 1];
+ break;
+ }
+ }
+ if (_bufferBase[pby1 + len] < _bufferBase[cur + len])
+ {
+ _son[ptr1] = curMatch;
+ ptr1 = cyclicPos + 1;
+ curMatch = _son[ptr1];
+ len1 = len;
+ }
+ else
+ {
+ _son[ptr0] = curMatch;
+ ptr0 = cyclicPos;
+ curMatch = _son[ptr0];
+ len0 = len;
+ }
+ }
+ MovePos();
+ }
+ while (--num != 0);
+ }
+
+ void NormalizeLinks(UInt32[] items, UInt32 numItems, UInt32 subValue)
+ {
+ for (UInt32 i = 0; i < numItems; i++)
+ {
+ UInt32 value = items[i];
+ if (value <= subValue)
+ value = kEmptyHashValue;
+ else
+ value -= subValue;
+ items[i] = value;
+ }
+ }
+
+ void Normalize()
+ {
+ UInt32 subValue = _pos - _cyclicBufferSize;
+ NormalizeLinks(_son, _cyclicBufferSize * 2, subValue);
+ NormalizeLinks(_hash, _hashSizeSum, subValue);
+ ReduceOffsets((Int32)subValue);
+ }
+
+ public void SetCutValue(UInt32 cutValue) { _cutValue = cutValue; }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/LZ/LzInWindow.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/LZ/LzInWindow.cs
new file mode 100644
index 00000000..2fd6642a
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/LZ/LzInWindow.cs
@@ -0,0 +1,148 @@
+using System;
+
+namespace Compress.SevenZip.Compress.LZ
+{
+ internal class InWindow
+ {
+ public Byte[] _bufferBase = null; // pointer to buffer with data
+ System.IO.Stream _stream;
+ UInt32 _posLimit; // offset (from _buffer) of first byte when new block reading must be done
+ bool _streamEndWasReached; // if (true) then _streamPos shows real end of stream
+
+ UInt32 _pointerToLastSafePosition;
+
+ public UInt32 _bufferOffset;
+
+ public UInt32 _blockSize; // Size of Allocated memory block
+ public UInt32 _pos; // offset (from _buffer) of curent byte
+ UInt32 _keepSizeBefore; // how many BYTEs must be kept in buffer before _pos
+ UInt32 _keepSizeAfter; // how many BYTEs must be kept buffer after _pos
+ public UInt32 _streamPos; // offset (from _buffer) of first not read byte from Stream
+
+ public void MoveBlock()
+ {
+ UInt32 offset = (UInt32)(_bufferOffset) + _pos - _keepSizeBefore;
+ // we need one additional byte, since MovePos moves on 1 byte.
+ if (offset > 0)
+ offset--;
+
+ UInt32 numBytes = (UInt32)(_bufferOffset) + _streamPos - offset;
+
+ // check negative offset ????
+ for (UInt32 i = 0; i < numBytes; i++)
+ _bufferBase[i] = _bufferBase[offset + i];
+ _bufferOffset -= offset;
+ }
+
+ public virtual void ReadBlock()
+ {
+ if (_streamEndWasReached)
+ return;
+ while (true)
+ {
+ int size = (int)((0 - _bufferOffset) + _blockSize - _streamPos);
+ if (size == 0)
+ return;
+ int numReadBytes = _stream != null ? _stream.Read(_bufferBase, (int)(_bufferOffset + _streamPos), size) : 0;
+ if (numReadBytes == 0)
+ {
+ _posLimit = _streamPos;
+ UInt32 pointerToPostion = _bufferOffset + _posLimit;
+ if (pointerToPostion > _pointerToLastSafePosition)
+ _posLimit = (UInt32)(_pointerToLastSafePosition - _bufferOffset);
+
+ _streamEndWasReached = true;
+ return;
+ }
+ _streamPos += (UInt32)numReadBytes;
+ if (_streamPos >= _pos + _keepSizeAfter)
+ _posLimit = _streamPos - _keepSizeAfter;
+ }
+ }
+
+ void Free() { _bufferBase = null; }
+
+ public void Create(UInt32 keepSizeBefore, UInt32 keepSizeAfter, UInt32 keepSizeReserv)
+ {
+ _keepSizeBefore = keepSizeBefore;
+ _keepSizeAfter = keepSizeAfter;
+ UInt32 blockSize = keepSizeBefore + keepSizeAfter + keepSizeReserv;
+ if (_bufferBase == null || _blockSize != blockSize)
+ {
+ Free();
+ _blockSize = blockSize;
+ _bufferBase = new Byte[_blockSize];
+ }
+ _pointerToLastSafePosition = _blockSize - keepSizeAfter;
+ _streamEndWasReached = false;
+ }
+
+ public void SetStream(System.IO.Stream stream)
+ {
+ _stream = stream;
+ if (_streamEndWasReached)
+ {
+ _streamEndWasReached = false;
+ if (IsDataStarved)
+ ReadBlock();
+ }
+ }
+ public void ReleaseStream() { _stream = null; }
+
+ public void Init()
+ {
+ _bufferOffset = 0;
+ _pos = 0;
+ _streamPos = 0;
+ _streamEndWasReached = false;
+ ReadBlock();
+ }
+
+ public void MovePos()
+ {
+ _pos++;
+ if (_pos > _posLimit)
+ {
+ UInt32 pointerToPostion = _bufferOffset + _pos;
+ if (pointerToPostion > _pointerToLastSafePosition)
+ MoveBlock();
+ ReadBlock();
+ }
+ }
+
+ public Byte GetIndexByte(Int32 index) { return _bufferBase[_bufferOffset + _pos + index]; }
+
+ // index + limit have not to exceed _keepSizeAfter;
+ public UInt32 GetMatchLen(Int32 index, UInt32 distance, UInt32 limit)
+ {
+ if (_streamEndWasReached)
+ if ((_pos + index) + limit > _streamPos)
+ limit = _streamPos - (UInt32)(_pos + index);
+ distance++;
+ // Byte *pby = _buffer + (size_t)_pos + index;
+ UInt32 pby = _bufferOffset + _pos + (UInt32)index;
+
+ UInt32 i;
+ for (i = 0; i < limit && _bufferBase[pby + i] == _bufferBase[pby + i - distance]; i++) ;
+ return i;
+ }
+
+ public UInt32 GetNumAvailableBytes() { return _streamPos - _pos; }
+
+ public void ReduceOffsets(Int32 subValue)
+ {
+ _bufferOffset += (UInt32)subValue;
+ _posLimit -= (UInt32)subValue;
+ _pos -= (UInt32)subValue;
+ _streamPos -= (UInt32)subValue;
+ }
+
+ public bool IsDataStarved
+ {
+ get
+ {
+ return _streamPos - _pos < _keepSizeAfter;
+ }
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/LZ/LzOutWindow.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/LZ/LzOutWindow.cs
new file mode 100644
index 00000000..f5b4a9f8
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/LZ/LzOutWindow.cs
@@ -0,0 +1,186 @@
+using Compress.SevenZip.Common;
+
+namespace Compress.SevenZip.Compress.LZ
+{
+ internal class OutWindow
+ {
+ byte[] _buffer = null;
+ int _windowSize = 0;
+ int _pos;
+ int _streamPos;
+ int _pendingLen;
+ int _pendingDist;
+ System.IO.Stream _stream;
+
+ public long Total;
+ public long Limit;
+
+ public void Create(int windowSize)
+ {
+ if (_windowSize != windowSize)
+ _buffer = new byte[windowSize];
+ else
+ _buffer[windowSize - 1] = 0;
+ _windowSize = windowSize;
+ _pos = 0;
+ _streamPos = 0;
+ _pendingLen = 0;
+ Total = 0;
+ Limit = 0;
+ }
+
+ public void Reset()
+ {
+ Create(_windowSize);
+ }
+
+ public void Init(System.IO.Stream stream)
+ {
+ ReleaseStream();
+ _stream = stream;
+ }
+
+ public void Train(System.IO.Stream stream)
+ {
+ long len = stream.Length;
+ int size = (len < _windowSize) ? (int)len : _windowSize;
+ stream.Position = len - size;
+ Total = 0;
+ Limit = size;
+ _pos = _windowSize - size;
+ CopyStream(stream, size);
+ if (_pos == _windowSize)
+ _pos = 0;
+ _streamPos = _pos;
+ }
+
+ public void ReleaseStream()
+ {
+ Flush();
+ _stream = null;
+ }
+
+ public void Flush()
+ {
+ if (_stream == null)
+ return;
+ int size = _pos - _streamPos;
+ if (size == 0)
+ return;
+ _stream.Write(_buffer, _streamPos, size);
+ if (_pos >= _windowSize)
+ _pos = 0;
+ _streamPos = _pos;
+ }
+
+ public void CopyBlock(int distance, int len)
+ {
+ int size = len;
+ int pos = _pos - distance - 1;
+ if (pos < 0)
+ pos += _windowSize;
+ for (; size > 0 && _pos < _windowSize && Total < Limit; size--)
+ {
+ if (pos >= _windowSize)
+ pos = 0;
+ _buffer[_pos++] = _buffer[pos++];
+ Total++;
+ if (_pos >= _windowSize)
+ Flush();
+ }
+ _pendingLen = size;
+ _pendingDist = distance;
+ }
+
+ public void PutByte(byte b)
+ {
+ _buffer[_pos++] = b;
+ Total++;
+ if (_pos >= _windowSize)
+ Flush();
+ }
+
+ public byte GetByte(int distance)
+ {
+ int pos = _pos - distance - 1;
+ if (pos < 0)
+ pos += _windowSize;
+ return _buffer[pos];
+ }
+
+ public int CopyStream(System.IO.Stream stream, int len)
+ {
+ int size = len;
+ while (size > 0 && _pos < _windowSize && Total < Limit)
+ {
+ int curSize = _windowSize - _pos;
+ if (curSize > Limit - Total)
+ curSize = (int)(Limit - Total);
+ if (curSize > size)
+ curSize = size;
+ int numReadBytes = stream.Read(_buffer, _pos, curSize);
+ if (numReadBytes == 0)
+ throw new DataErrorException();
+ size -= numReadBytes;
+ _pos += numReadBytes;
+ Total += numReadBytes;
+ if (_pos >= _windowSize)
+ Flush();
+ }
+ return len - size;
+ }
+
+ public void SetLimit(long size)
+ {
+ Limit = Total + size;
+ }
+
+ public bool HasSpace
+ {
+ get
+ {
+ return _pos < _windowSize && Total < Limit;
+ }
+ }
+
+ public bool HasPending
+ {
+ get
+ {
+ return _pendingLen > 0;
+ }
+ }
+
+ public int Read(byte[] buffer, int offset, int count)
+ {
+ if (_streamPos >= _pos)
+ return 0;
+
+ int size = _pos - _streamPos;
+ if (size > count)
+ size = count;
+ System.Buffer.BlockCopy(_buffer, _streamPos, buffer, offset, size);
+ _streamPos += size;
+ if (_streamPos >= _windowSize)
+ {
+ _pos = 0;
+ _streamPos = 0;
+ }
+ return size;
+ }
+
+ public void CopyPending()
+ {
+ if (_pendingLen > 0)
+ CopyBlock(_pendingDist, _pendingLen);
+ }
+
+ public int AvailableBytes
+ {
+ get
+ {
+ return _pos - _streamPos;
+ }
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaBase.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaBase.cs
new file mode 100644
index 00000000..a24fd1f5
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaBase.cs
@@ -0,0 +1,74 @@
+namespace Compress.SevenZip.Compress.LZMA
+{
+ internal abstract class Base
+ {
+ public const uint kNumRepDistances = 4;
+ public const uint kNumStates = 12;
+
+ // static byte []kLiteralNextStates = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5};
+ // static byte []kMatchNextStates = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
+ // static byte []kRepNextStates = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
+ // static byte []kShortRepNextStates = {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
+
+ public struct State
+ {
+ public uint Index;
+ public void Init() { Index = 0; }
+ public void UpdateChar()
+ {
+ if (Index < 4) Index = 0;
+ else if (Index < 10) Index -= 3;
+ else Index -= 6;
+ }
+ public void UpdateMatch() { Index = (uint)(Index < 7 ? 7 : 10); }
+ public void UpdateRep() { Index = (uint)(Index < 7 ? 8 : 11); }
+ public void UpdateShortRep() { Index = (uint)(Index < 7 ? 9 : 11); }
+ public bool IsCharState() { return Index < 7; }
+ }
+
+ public const int kNumPosSlotBits = 6;
+ public const int kDicLogSizeMin = 0;
+ // public const int kDicLogSizeMax = 30;
+ // public const uint kDistTableSizeMax = kDicLogSizeMax * 2;
+
+ public const int kNumLenToPosStatesBits = 2; // it's for speed optimization
+ public const uint kNumLenToPosStates = 1 << kNumLenToPosStatesBits;
+
+ public const uint kMatchMinLen = 2;
+
+ public static uint GetLenToPosState(uint len)
+ {
+ len -= kMatchMinLen;
+ if (len < kNumLenToPosStates)
+ return len;
+ return (uint)(kNumLenToPosStates - 1);
+ }
+
+ public const int kNumAlignBits = 4;
+ public const uint kAlignTableSize = 1 << kNumAlignBits;
+ public const uint kAlignMask = (kAlignTableSize - 1);
+
+ public const uint kStartPosModelIndex = 4;
+ public const uint kEndPosModelIndex = 14;
+ public const uint kNumPosModels = kEndPosModelIndex - kStartPosModelIndex;
+
+ public const uint kNumFullDistances = 1 << ((int)kEndPosModelIndex / 2);
+
+ public const uint kNumLitPosStatesBitsEncodingMax = 4;
+ public const uint kNumLitContextBitsMax = 8;
+
+ public const int kNumPosStatesBitsMax = 4;
+ public const uint kNumPosStatesMax = (1 << kNumPosStatesBitsMax);
+ public const int kNumPosStatesBitsEncodingMax = 4;
+ public const uint kNumPosStatesEncodingMax = (1 << kNumPosStatesBitsEncodingMax);
+
+ public const int kNumLowLenBits = 3;
+ public const int kNumMidLenBits = 3;
+ public const int kNumHighLenBits = 8;
+ public const uint kNumLowLenSymbols = 1 << kNumLowLenBits;
+ public const uint kNumMidLenSymbols = 1 << kNumMidLenBits;
+ public const uint kNumLenSymbols = kNumLowLenSymbols + kNumMidLenSymbols +
+ (1 << kNumHighLenBits);
+ public const uint kMatchMaxLen = kMatchMinLen + kNumLenSymbols - 1;
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaDecoder.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaDecoder.cs
new file mode 100644
index 00000000..42705b2a
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaDecoder.cs
@@ -0,0 +1,404 @@
+using System;
+using Compress.SevenZip.Common;
+using Compress.SevenZip.Compress.RangeCoder;
+
+namespace Compress.SevenZip.Compress.LZMA
+{
+ internal class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
+ {
+ class LenDecoder
+ {
+ BitDecoder m_Choice = new BitDecoder();
+ BitDecoder m_Choice2 = new BitDecoder();
+ BitTreeDecoder[] m_LowCoder = new BitTreeDecoder[Base.kNumPosStatesMax];
+ BitTreeDecoder[] m_MidCoder = new BitTreeDecoder[Base.kNumPosStatesMax];
+ BitTreeDecoder m_HighCoder = new BitTreeDecoder(Base.kNumHighLenBits);
+ uint m_NumPosStates = 0;
+
+ public void Create(uint numPosStates)
+ {
+ for (uint posState = m_NumPosStates; posState < numPosStates; posState++)
+ {
+ m_LowCoder[posState] = new BitTreeDecoder(Base.kNumLowLenBits);
+ m_MidCoder[posState] = new BitTreeDecoder(Base.kNumMidLenBits);
+ }
+ m_NumPosStates = numPosStates;
+ }
+
+ public void Init()
+ {
+ m_Choice.Init();
+ for (uint posState = 0; posState < m_NumPosStates; posState++)
+ {
+ m_LowCoder[posState].Init();
+ m_MidCoder[posState].Init();
+ }
+ m_Choice2.Init();
+ m_HighCoder.Init();
+ }
+
+ public uint Decode(RangeCoder.Decoder rangeDecoder, uint posState)
+ {
+ if (m_Choice.Decode(rangeDecoder) == 0)
+ return m_LowCoder[posState].Decode(rangeDecoder);
+ else
+ {
+ uint symbol = Base.kNumLowLenSymbols;
+ if (m_Choice2.Decode(rangeDecoder) == 0)
+ symbol += m_MidCoder[posState].Decode(rangeDecoder);
+ else
+ {
+ symbol += Base.kNumMidLenSymbols;
+ symbol += m_HighCoder.Decode(rangeDecoder);
+ }
+ return symbol;
+ }
+ }
+ }
+
+ class LiteralDecoder
+ {
+ struct Decoder2
+ {
+ BitDecoder[] m_Decoders;
+ public void Create() { m_Decoders = new BitDecoder[0x300]; }
+ public void Init() { for (int i = 0; i < 0x300; i++) m_Decoders[i].Init(); }
+
+ public byte DecodeNormal(RangeCoder.Decoder rangeDecoder)
+ {
+ uint symbol = 1;
+ do
+ symbol = (symbol << 1) | m_Decoders[symbol].Decode(rangeDecoder);
+ while (symbol < 0x100);
+ return (byte)symbol;
+ }
+
+ public byte DecodeWithMatchByte(RangeCoder.Decoder rangeDecoder, byte matchByte)
+ {
+ uint symbol = 1;
+ do
+ {
+ uint matchBit = (uint)(matchByte >> 7) & 1;
+ matchByte <<= 1;
+ uint bit = m_Decoders[((1 + matchBit) << 8) + symbol].Decode(rangeDecoder);
+ symbol = (symbol << 1) | bit;
+ if (matchBit != bit)
+ {
+ while (symbol < 0x100)
+ symbol = (symbol << 1) | m_Decoders[symbol].Decode(rangeDecoder);
+ break;
+ }
+ }
+ while (symbol < 0x100);
+ return (byte)symbol;
+ }
+ }
+
+ Decoder2[] m_Coders;
+ int m_NumPrevBits;
+ int m_NumPosBits;
+ uint m_PosMask;
+
+ public void Create(int numPosBits, int numPrevBits)
+ {
+ if (m_Coders != null && m_NumPrevBits == numPrevBits &&
+ m_NumPosBits == numPosBits)
+ return;
+ m_NumPosBits = numPosBits;
+ m_PosMask = ((uint)1 << numPosBits) - 1;
+ m_NumPrevBits = numPrevBits;
+ uint numStates = (uint)1 << (m_NumPrevBits + m_NumPosBits);
+ m_Coders = new Decoder2[numStates];
+ for (uint i = 0; i < numStates; i++)
+ m_Coders[i].Create();
+ }
+
+ public void Init()
+ {
+ uint numStates = (uint)1 << (m_NumPrevBits + m_NumPosBits);
+ for (uint i = 0; i < numStates; i++)
+ m_Coders[i].Init();
+ }
+
+ uint GetState(uint pos, byte prevByte)
+ { return ((pos & m_PosMask) << m_NumPrevBits) + (uint)(prevByte >> (8 - m_NumPrevBits)); }
+
+ public byte DecodeNormal(RangeCoder.Decoder rangeDecoder, uint pos, byte prevByte)
+ { return m_Coders[GetState(pos, prevByte)].DecodeNormal(rangeDecoder); }
+
+ public byte DecodeWithMatchByte(RangeCoder.Decoder rangeDecoder, uint pos, byte prevByte, byte matchByte)
+ { return m_Coders[GetState(pos, prevByte)].DecodeWithMatchByte(rangeDecoder, matchByte); }
+ };
+
+ LZ.OutWindow m_OutWindow;
+
+ BitDecoder[] m_IsMatchDecoders = new BitDecoder[Base.kNumStates << Base.kNumPosStatesBitsMax];
+ BitDecoder[] m_IsRepDecoders = new BitDecoder[Base.kNumStates];
+ BitDecoder[] m_IsRepG0Decoders = new BitDecoder[Base.kNumStates];
+ BitDecoder[] m_IsRepG1Decoders = new BitDecoder[Base.kNumStates];
+ BitDecoder[] m_IsRepG2Decoders = new BitDecoder[Base.kNumStates];
+ BitDecoder[] m_IsRep0LongDecoders = new BitDecoder[Base.kNumStates << Base.kNumPosStatesBitsMax];
+
+ BitTreeDecoder[] m_PosSlotDecoder = new BitTreeDecoder[Base.kNumLenToPosStates];
+ BitDecoder[] m_PosDecoders = new BitDecoder[Base.kNumFullDistances - Base.kEndPosModelIndex];
+
+ BitTreeDecoder m_PosAlignDecoder = new BitTreeDecoder(Base.kNumAlignBits);
+
+ LenDecoder m_LenDecoder = new LenDecoder();
+ LenDecoder m_RepLenDecoder = new LenDecoder();
+
+ LiteralDecoder m_LiteralDecoder = new LiteralDecoder();
+
+ int m_DictionarySize;
+
+ uint m_PosStateMask;
+
+ Base.State state = new Base.State();
+ uint rep0, rep1, rep2, rep3;
+
+ public Decoder()
+ {
+ m_DictionarySize = -1;
+ for (int i = 0; i < Base.kNumLenToPosStates; i++)
+ m_PosSlotDecoder[i] = new BitTreeDecoder(Base.kNumPosSlotBits);
+ }
+
+ void CreateDictionary()
+ {
+ if (m_DictionarySize < 0)
+ throw new InvalidParamException();
+ m_OutWindow = new LZ.OutWindow();
+ int blockSize = Math.Max(m_DictionarySize, (1 << 12));
+ m_OutWindow.Create(blockSize);
+ }
+
+ void SetLiteralProperties(int lp, int lc)
+ {
+ if (lp > 8)
+ throw new InvalidParamException();
+ if (lc > 8)
+ throw new InvalidParamException();
+ m_LiteralDecoder.Create(lp, lc);
+ }
+
+ void SetPosBitsProperties(int pb)
+ {
+ if (pb > Base.kNumPosStatesBitsMax)
+ throw new InvalidParamException();
+ uint numPosStates = (uint)1 << pb;
+ m_LenDecoder.Create(numPosStates);
+ m_RepLenDecoder.Create(numPosStates);
+ m_PosStateMask = numPosStates - 1;
+ }
+
+ void Init()
+ {
+ uint i;
+ for (i = 0; i < Base.kNumStates; i++)
+ {
+ for (uint j = 0; j <= m_PosStateMask; j++)
+ {
+ uint index = (i << Base.kNumPosStatesBitsMax) + j;
+ m_IsMatchDecoders[index].Init();
+ m_IsRep0LongDecoders[index].Init();
+ }
+ m_IsRepDecoders[i].Init();
+ m_IsRepG0Decoders[i].Init();
+ m_IsRepG1Decoders[i].Init();
+ m_IsRepG2Decoders[i].Init();
+ }
+
+ m_LiteralDecoder.Init();
+ for (i = 0; i < Base.kNumLenToPosStates; i++)
+ m_PosSlotDecoder[i].Init();
+ // m_PosSpecDecoder.Init();
+ for (i = 0; i < Base.kNumFullDistances - Base.kEndPosModelIndex; i++)
+ m_PosDecoders[i].Init();
+
+ m_LenDecoder.Init();
+ m_RepLenDecoder.Init();
+ m_PosAlignDecoder.Init();
+
+ state.Init();
+ rep0 = 0;
+ rep1 = 0;
+ rep2 = 0;
+ rep3 = 0;
+ }
+
+ public void Code(System.IO.Stream inStream, System.IO.Stream outStream,
+ Int64 inSize, Int64 outSize, ICodeProgress progress)
+ {
+ if (m_OutWindow == null)
+ CreateDictionary();
+ m_OutWindow.Init(outStream);
+ if (outSize > 0)
+ m_OutWindow.SetLimit(outSize);
+ else
+ m_OutWindow.SetLimit(Int64.MaxValue - m_OutWindow.Total);
+
+ RangeCoder.Decoder rangeDecoder = new RangeCoder.Decoder();
+ rangeDecoder.Init(inStream);
+
+ Code(m_DictionarySize, m_OutWindow, rangeDecoder);
+
+ m_OutWindow.ReleaseStream();
+ rangeDecoder.ReleaseStream();
+
+ if (!rangeDecoder.IsFinished || (inSize > 0 && rangeDecoder.Total != inSize))
+ throw new DataErrorException();
+ if (m_OutWindow.HasPending)
+ throw new DataErrorException();
+ m_OutWindow = null;
+ }
+
+ internal bool Code(int dictionarySize, LZ.OutWindow outWindow, RangeCoder.Decoder rangeDecoder)
+ {
+ int dictionarySizeCheck = Math.Max(dictionarySize, 1);
+
+ outWindow.CopyPending();
+
+ while (outWindow.HasSpace)
+ {
+ uint posState = (uint)outWindow.Total & m_PosStateMask;
+ if (m_IsMatchDecoders[(state.Index << Base.kNumPosStatesBitsMax) + posState].Decode(rangeDecoder) == 0)
+ {
+ byte b;
+ byte prevByte = outWindow.GetByte(0);
+ if (!state.IsCharState())
+ b = m_LiteralDecoder.DecodeWithMatchByte(rangeDecoder,
+ (uint)outWindow.Total, prevByte, outWindow.GetByte((int)rep0));
+ else
+ b = m_LiteralDecoder.DecodeNormal(rangeDecoder, (uint)outWindow.Total, prevByte);
+ outWindow.PutByte(b);
+ state.UpdateChar();
+ }
+ else
+ {
+ uint len;
+ if (m_IsRepDecoders[state.Index].Decode(rangeDecoder) == 1)
+ {
+ if (m_IsRepG0Decoders[state.Index].Decode(rangeDecoder) == 0)
+ {
+ if (m_IsRep0LongDecoders[(state.Index << Base.kNumPosStatesBitsMax) + posState].Decode(rangeDecoder) == 0)
+ {
+ state.UpdateShortRep();
+ outWindow.PutByte(outWindow.GetByte((int)rep0));
+ continue;
+ }
+ }
+ else
+ {
+ UInt32 distance;
+ if (m_IsRepG1Decoders[state.Index].Decode(rangeDecoder) == 0)
+ {
+ distance = rep1;
+ }
+ else
+ {
+ if (m_IsRepG2Decoders[state.Index].Decode(rangeDecoder) == 0)
+ distance = rep2;
+ else
+ {
+ distance = rep3;
+ rep3 = rep2;
+ }
+ rep2 = rep1;
+ }
+ rep1 = rep0;
+ rep0 = distance;
+ }
+ len = m_RepLenDecoder.Decode(rangeDecoder, posState) + Base.kMatchMinLen;
+ state.UpdateRep();
+ }
+ else
+ {
+ rep3 = rep2;
+ rep2 = rep1;
+ rep1 = rep0;
+ len = Base.kMatchMinLen + m_LenDecoder.Decode(rangeDecoder, posState);
+ state.UpdateMatch();
+ uint posSlot = m_PosSlotDecoder[Base.GetLenToPosState(len)].Decode(rangeDecoder);
+ if (posSlot >= Base.kStartPosModelIndex)
+ {
+ int numDirectBits = (int)((posSlot >> 1) - 1);
+ rep0 = ((2 | (posSlot & 1)) << numDirectBits);
+ if (posSlot < Base.kEndPosModelIndex)
+ rep0 += BitTreeDecoder.ReverseDecode(m_PosDecoders,
+ rep0 - posSlot - 1, rangeDecoder, numDirectBits);
+ else
+ {
+ rep0 += (rangeDecoder.DecodeDirectBits(
+ numDirectBits - Base.kNumAlignBits) << Base.kNumAlignBits);
+ rep0 += m_PosAlignDecoder.ReverseDecode(rangeDecoder);
+ }
+ }
+ else
+ rep0 = posSlot;
+ }
+ if (rep0 >= outWindow.Total || rep0 >= dictionarySizeCheck)
+ {
+ if (rep0 == 0xFFFFFFFF)
+ return true;
+ throw new DataErrorException();
+ }
+ outWindow.CopyBlock((int)rep0, (int)len);
+ }
+ }
+ return false;
+ }
+
+ public void SetDecoderProperties(byte[] properties)
+ {
+ if (properties.Length < 1)
+ throw new InvalidParamException();
+ int lc = properties[0] % 9;
+ int remainder = properties[0] / 9;
+ int lp = remainder % 5;
+ int pb = remainder / 5;
+ if (pb > Base.kNumPosStatesBitsMax)
+ throw new InvalidParamException();
+ SetLiteralProperties(lp, lc);
+ SetPosBitsProperties(pb);
+ Init();
+ if (properties.Length >= 5)
+ {
+ m_DictionarySize = 0;
+ for (int i = 0; i < 4; i++)
+ m_DictionarySize += properties[1 + i] << (i * 8);
+ }
+ }
+
+ public void Train(System.IO.Stream stream)
+ {
+ if (m_OutWindow == null)
+ CreateDictionary();
+ m_OutWindow.Train(stream);
+ }
+
+ /*
+ public override bool CanRead { get { return true; }}
+ public override bool CanWrite { get { return true; }}
+ public override bool CanSeek { get { return true; }}
+ public override long Length { get { return 0; }}
+ public override long Position
+ {
+ get { return 0; }
+ set { }
+ }
+ public override void Flush() { }
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ return 0;
+ }
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ }
+ public override long Seek(long offset, System.IO.SeekOrigin origin)
+ {
+ return 0;
+ }
+ public override void SetLength(long value) {}
+ */
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaEncoder.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaEncoder.cs
new file mode 100644
index 00000000..61340432
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaEncoder.cs
@@ -0,0 +1,1547 @@
+using System;
+using Compress.SevenZip.Common;
+using Compress.SevenZip.Compress.RangeCoder;
+
+namespace Compress.SevenZip.Compress.LZMA
+{
+
+ internal class Encoder : ICoder, ISetCoderProperties, IWriteCoderProperties
+ {
+ enum EMatchFinderType
+ {
+ BT2,
+ BT4,
+ };
+
+ const UInt32 kIfinityPrice = 0xFFFFFFF;
+
+ static Byte[] g_FastPos = new Byte[1 << 11];
+
+ static Encoder()
+ {
+ const Byte kFastSlots = 22;
+ int c = 2;
+ g_FastPos[0] = 0;
+ g_FastPos[1] = 1;
+ for (Byte slotFast = 2; slotFast < kFastSlots; slotFast++)
+ {
+ UInt32 k = ((UInt32)1 << ((slotFast >> 1) - 1));
+ for (UInt32 j = 0; j < k; j++, c++)
+ g_FastPos[c] = slotFast;
+ }
+ }
+
+ static UInt32 GetPosSlot(UInt32 pos)
+ {
+ if (pos < (1 << 11))
+ return g_FastPos[pos];
+ if (pos < (1 << 21))
+ return (UInt32)(g_FastPos[pos >> 10] + 20);
+ return (UInt32)(g_FastPos[pos >> 20] + 40);
+ }
+
+ static UInt32 GetPosSlot2(UInt32 pos)
+ {
+ if (pos < (1 << 17))
+ return (UInt32)(g_FastPos[pos >> 6] + 12);
+ if (pos < (1 << 27))
+ return (UInt32)(g_FastPos[pos >> 16] + 32);
+ return (UInt32)(g_FastPos[pos >> 26] + 52);
+ }
+
+ Base.State _state = new Base.State();
+ Byte _previousByte;
+ UInt32[] _repDistances = new UInt32[Base.kNumRepDistances];
+
+ void BaseInit()
+ {
+ _state.Init();
+ _previousByte = 0;
+ for (UInt32 i = 0; i < Base.kNumRepDistances; i++)
+ _repDistances[i] = 0;
+ }
+
+ const int kDefaultDictionaryLogSize = 22;
+ const UInt32 kNumFastBytesDefault = 0x20;
+
+ class LiteralEncoder
+ {
+ public struct Encoder2
+ {
+ BitEncoder[] m_Encoders;
+
+ public void Create() { m_Encoders = new BitEncoder[0x300]; }
+
+ public void Init() { for (int i = 0; i < 0x300; i++) m_Encoders[i].Init(); }
+
+ public void Encode(RangeCoder.Encoder rangeEncoder, byte symbol)
+ {
+ uint context = 1;
+ for (int i = 7; i >= 0; i--)
+ {
+ uint bit = (uint)((symbol >> i) & 1);
+ m_Encoders[context].Encode(rangeEncoder, bit);
+ context = (context << 1) | bit;
+ }
+ }
+
+ public void EncodeMatched(RangeCoder.Encoder rangeEncoder, byte matchByte, byte symbol)
+ {
+ uint context = 1;
+ bool same = true;
+ for (int i = 7; i >= 0; i--)
+ {
+ uint bit = (uint)((symbol >> i) & 1);
+ uint state = context;
+ if (same)
+ {
+ uint matchBit = (uint)((matchByte >> i) & 1);
+ state += ((1 + matchBit) << 8);
+ same = (matchBit == bit);
+ }
+ m_Encoders[state].Encode(rangeEncoder, bit);
+ context = (context << 1) | bit;
+ }
+ }
+
+ public uint GetPrice(bool matchMode, byte matchByte, byte symbol)
+ {
+ uint price = 0;
+ uint context = 1;
+ int i = 7;
+ if (matchMode)
+ {
+ for (; i >= 0; i--)
+ {
+ uint matchBit = (uint)(matchByte >> i) & 1;
+ uint bit = (uint)(symbol >> i) & 1;
+ price += m_Encoders[((1 + matchBit) << 8) + context].GetPrice(bit);
+ context = (context << 1) | bit;
+ if (matchBit != bit)
+ {
+ i--;
+ break;
+ }
+ }
+ }
+ for (; i >= 0; i--)
+ {
+ uint bit = (uint)(symbol >> i) & 1;
+ price += m_Encoders[context].GetPrice(bit);
+ context = (context << 1) | bit;
+ }
+ return price;
+ }
+ }
+
+ Encoder2[] m_Coders;
+ int m_NumPrevBits;
+ int m_NumPosBits;
+ uint m_PosMask;
+
+ public void Create(int numPosBits, int numPrevBits)
+ {
+ if (m_Coders != null && m_NumPrevBits == numPrevBits && m_NumPosBits == numPosBits)
+ return;
+ m_NumPosBits = numPosBits;
+ m_PosMask = ((uint)1 << numPosBits) - 1;
+ m_NumPrevBits = numPrevBits;
+ uint numStates = (uint)1 << (m_NumPrevBits + m_NumPosBits);
+ m_Coders = new Encoder2[numStates];
+ for (uint i = 0; i < numStates; i++)
+ m_Coders[i].Create();
+ }
+
+ public void Init()
+ {
+ uint numStates = (uint)1 << (m_NumPrevBits + m_NumPosBits);
+ for (uint i = 0; i < numStates; i++)
+ m_Coders[i].Init();
+ }
+
+ public Encoder2 GetSubCoder(UInt32 pos, Byte prevByte)
+ { return m_Coders[((pos & m_PosMask) << m_NumPrevBits) + (uint)(prevByte >> (8 - m_NumPrevBits))]; }
+ }
+
+ class LenEncoder
+ {
+ RangeCoder.BitEncoder _choice = new RangeCoder.BitEncoder();
+ RangeCoder.BitEncoder _choice2 = new RangeCoder.BitEncoder();
+ RangeCoder.BitTreeEncoder[] _lowCoder = new RangeCoder.BitTreeEncoder[Base.kNumPosStatesEncodingMax];
+ RangeCoder.BitTreeEncoder[] _midCoder = new RangeCoder.BitTreeEncoder[Base.kNumPosStatesEncodingMax];
+ RangeCoder.BitTreeEncoder _highCoder = new RangeCoder.BitTreeEncoder(Base.kNumHighLenBits);
+
+ public LenEncoder()
+ {
+ for (UInt32 posState = 0; posState < Base.kNumPosStatesEncodingMax; posState++)
+ {
+ _lowCoder[posState] = new RangeCoder.BitTreeEncoder(Base.kNumLowLenBits);
+ _midCoder[posState] = new RangeCoder.BitTreeEncoder(Base.kNumMidLenBits);
+ }
+ }
+
+ public void Init(UInt32 numPosStates)
+ {
+ _choice.Init();
+ _choice2.Init();
+ for (UInt32 posState = 0; posState < numPosStates; posState++)
+ {
+ _lowCoder[posState].Init();
+ _midCoder[posState].Init();
+ }
+ _highCoder.Init();
+ }
+
+ public void Encode(RangeCoder.Encoder rangeEncoder, UInt32 symbol, UInt32 posState)
+ {
+ if (symbol < Base.kNumLowLenSymbols)
+ {
+ _choice.Encode(rangeEncoder, 0);
+ _lowCoder[posState].Encode(rangeEncoder, symbol);
+ }
+ else
+ {
+ symbol -= Base.kNumLowLenSymbols;
+ _choice.Encode(rangeEncoder, 1);
+ if (symbol < Base.kNumMidLenSymbols)
+ {
+ _choice2.Encode(rangeEncoder, 0);
+ _midCoder[posState].Encode(rangeEncoder, symbol);
+ }
+ else
+ {
+ _choice2.Encode(rangeEncoder, 1);
+ _highCoder.Encode(rangeEncoder, symbol - Base.kNumMidLenSymbols);
+ }
+ }
+ }
+
+ public void SetPrices(UInt32 posState, UInt32 numSymbols, UInt32[] prices, UInt32 st)
+ {
+ UInt32 a0 = _choice.GetPrice0();
+ UInt32 a1 = _choice.GetPrice1();
+ UInt32 b0 = a1 + _choice2.GetPrice0();
+ UInt32 b1 = a1 + _choice2.GetPrice1();
+ UInt32 i = 0;
+ for (i = 0; i < Base.kNumLowLenSymbols; i++)
+ {
+ if (i >= numSymbols)
+ return;
+ prices[st + i] = a0 + _lowCoder[posState].GetPrice(i);
+ }
+ for (; i < Base.kNumLowLenSymbols + Base.kNumMidLenSymbols; i++)
+ {
+ if (i >= numSymbols)
+ return;
+ prices[st + i] = b0 + _midCoder[posState].GetPrice(i - Base.kNumLowLenSymbols);
+ }
+ for (; i < numSymbols; i++)
+ prices[st + i] = b1 + _highCoder.GetPrice(i - Base.kNumLowLenSymbols - Base.kNumMidLenSymbols);
+ }
+ };
+
+ const UInt32 kNumLenSpecSymbols = Base.kNumLowLenSymbols + Base.kNumMidLenSymbols;
+
+ class LenPriceTableEncoder : LenEncoder
+ {
+ UInt32[] _prices = new UInt32[Base.kNumLenSymbols << Base.kNumPosStatesBitsEncodingMax];
+ UInt32 _tableSize;
+ UInt32[] _counters = new UInt32[Base.kNumPosStatesEncodingMax];
+
+ public void SetTableSize(UInt32 tableSize) { _tableSize = tableSize; }
+
+ public UInt32 GetPrice(UInt32 symbol, UInt32 posState)
+ {
+ return _prices[posState * Base.kNumLenSymbols + symbol];
+ }
+
+ void UpdateTable(UInt32 posState)
+ {
+ SetPrices(posState, _tableSize, _prices, posState * Base.kNumLenSymbols);
+ _counters[posState] = _tableSize;
+ }
+
+ public void UpdateTables(UInt32 numPosStates)
+ {
+ for (UInt32 posState = 0; posState < numPosStates; posState++)
+ UpdateTable(posState);
+ }
+
+ public new void Encode(RangeCoder.Encoder rangeEncoder, UInt32 symbol, UInt32 posState)
+ {
+ base.Encode(rangeEncoder, symbol, posState);
+ if (--_counters[posState] == 0)
+ UpdateTable(posState);
+ }
+ }
+
+ const UInt32 kNumOpts = 1 << 12;
+ class Optimal
+ {
+ public Base.State State;
+
+ public bool Prev1IsChar;
+ public bool Prev2;
+
+ public UInt32 PosPrev2;
+ public UInt32 BackPrev2;
+
+ public UInt32 Price;
+ public UInt32 PosPrev;
+ public UInt32 BackPrev;
+
+ public UInt32 Backs0;
+ public UInt32 Backs1;
+ public UInt32 Backs2;
+ public UInt32 Backs3;
+
+ public void MakeAsChar() { BackPrev = 0xFFFFFFFF; Prev1IsChar = false; }
+ public void MakeAsShortRep() { BackPrev = 0; ; Prev1IsChar = false; }
+ public bool IsShortRep() { return (BackPrev == 0); }
+ };
+ Optimal[] _optimum = new Optimal[kNumOpts];
+ LZ.BinTree _matchFinder = null;
+ RangeCoder.Encoder _rangeEncoder = new RangeCoder.Encoder();
+
+ RangeCoder.BitEncoder[] _isMatch = new RangeCoder.BitEncoder[Base.kNumStates << Base.kNumPosStatesBitsMax];
+ RangeCoder.BitEncoder[] _isRep = new RangeCoder.BitEncoder[Base.kNumStates];
+ RangeCoder.BitEncoder[] _isRepG0 = new RangeCoder.BitEncoder[Base.kNumStates];
+ RangeCoder.BitEncoder[] _isRepG1 = new RangeCoder.BitEncoder[Base.kNumStates];
+ RangeCoder.BitEncoder[] _isRepG2 = new RangeCoder.BitEncoder[Base.kNumStates];
+ RangeCoder.BitEncoder[] _isRep0Long = new RangeCoder.BitEncoder[Base.kNumStates << Base.kNumPosStatesBitsMax];
+
+ RangeCoder.BitTreeEncoder[] _posSlotEncoder = new RangeCoder.BitTreeEncoder[Base.kNumLenToPosStates];
+
+ RangeCoder.BitEncoder[] _posEncoders = new RangeCoder.BitEncoder[Base.kNumFullDistances - Base.kEndPosModelIndex];
+ RangeCoder.BitTreeEncoder _posAlignEncoder = new RangeCoder.BitTreeEncoder(Base.kNumAlignBits);
+
+ LenPriceTableEncoder _lenEncoder = new LenPriceTableEncoder();
+ LenPriceTableEncoder _repMatchLenEncoder = new LenPriceTableEncoder();
+
+ LiteralEncoder _literalEncoder = new LiteralEncoder();
+
+ UInt32[] _matchDistances = new UInt32[Base.kMatchMaxLen * 2 + 2];
+
+ UInt32 _numFastBytes = kNumFastBytesDefault;
+ UInt32 _longestMatchLength;
+ UInt32 _numDistancePairs;
+
+ UInt32 _additionalOffset;
+
+ UInt32 _optimumEndIndex;
+ UInt32 _optimumCurrentIndex;
+
+ bool _longestMatchWasFound;
+
+ UInt32[] _posSlotPrices = new UInt32[1 << (Base.kNumPosSlotBits + Base.kNumLenToPosStatesBits)];
+ UInt32[] _distancesPrices = new UInt32[Base.kNumFullDistances << Base.kNumLenToPosStatesBits];
+ UInt32[] _alignPrices = new UInt32[Base.kAlignTableSize];
+ UInt32 _alignPriceCount;
+
+ UInt32 _distTableSize = (kDefaultDictionaryLogSize * 2);
+
+ int _posStateBits = 2;
+ UInt32 _posStateMask = (4 - 1);
+ int _numLiteralPosStateBits = 0;
+ int _numLiteralContextBits = 3;
+
+ UInt32 _dictionarySize = (1 << kDefaultDictionaryLogSize);
+ UInt32 _dictionarySizePrev = 0xFFFFFFFF;
+ UInt32 _numFastBytesPrev = 0xFFFFFFFF;
+
+ Int64 nowPos64;
+ bool _finished;
+ System.IO.Stream _inStream;
+
+ EMatchFinderType _matchFinderType = EMatchFinderType.BT4;
+ bool _writeEndMark = false;
+
+ bool _needReleaseMFStream;
+ bool _processingMode;
+
+ void Create()
+ {
+ if (_matchFinder == null)
+ {
+ LZ.BinTree bt = new LZ.BinTree();
+ int numHashBytes = 4;
+ if (_matchFinderType == EMatchFinderType.BT2)
+ numHashBytes = 2;
+ bt.SetType(numHashBytes);
+ _matchFinder = bt;
+ }
+ _literalEncoder.Create(_numLiteralPosStateBits, _numLiteralContextBits);
+
+ if (_dictionarySize == _dictionarySizePrev && _numFastBytesPrev == _numFastBytes)
+ return;
+ _matchFinder.Create(_dictionarySize, kNumOpts, _numFastBytes, Base.kMatchMaxLen + 1 + kNumOpts);
+ _dictionarySizePrev = _dictionarySize;
+ _numFastBytesPrev = _numFastBytes;
+ }
+
+ public Encoder()
+ {
+ for (int i = 0; i < kNumOpts; i++)
+ _optimum[i] = new Optimal();
+ for (int i = 0; i < Base.kNumLenToPosStates; i++)
+ _posSlotEncoder[i] = new RangeCoder.BitTreeEncoder(Base.kNumPosSlotBits);
+ }
+
+ void SetWriteEndMarkerMode(bool writeEndMarker)
+ {
+ _writeEndMark = writeEndMarker;
+ }
+
+ void Init()
+ {
+ BaseInit();
+ _rangeEncoder.Init();
+
+ uint i;
+ for (i = 0; i < Base.kNumStates; i++)
+ {
+ for (uint j = 0; j <= _posStateMask; j++)
+ {
+ uint complexState = (i << Base.kNumPosStatesBitsMax) + j;
+ _isMatch[complexState].Init();
+ _isRep0Long[complexState].Init();
+ }
+ _isRep[i].Init();
+ _isRepG0[i].Init();
+ _isRepG1[i].Init();
+ _isRepG2[i].Init();
+ }
+ _literalEncoder.Init();
+ for (i = 0; i < Base.kNumLenToPosStates; i++)
+ _posSlotEncoder[i].Init();
+ for (i = 0; i < Base.kNumFullDistances - Base.kEndPosModelIndex; i++)
+ _posEncoders[i].Init();
+
+ _lenEncoder.Init((UInt32)1 << _posStateBits);
+ _repMatchLenEncoder.Init((UInt32)1 << _posStateBits);
+
+ _posAlignEncoder.Init();
+
+ _longestMatchWasFound = false;
+ _optimumEndIndex = 0;
+ _optimumCurrentIndex = 0;
+ _additionalOffset = 0;
+ }
+
+ void ReadMatchDistances(out UInt32 lenRes, out UInt32 numDistancePairs)
+ {
+ lenRes = 0;
+ numDistancePairs = _matchFinder.GetMatches(_matchDistances);
+ if (numDistancePairs > 0)
+ {
+ lenRes = _matchDistances[numDistancePairs - 2];
+ if (lenRes == _numFastBytes)
+ lenRes += _matchFinder.GetMatchLen((int)lenRes - 1, _matchDistances[numDistancePairs - 1],
+ Base.kMatchMaxLen - lenRes);
+ }
+ _additionalOffset++;
+ }
+
+
+ void MovePos(UInt32 num)
+ {
+ if (num > 0)
+ {
+ _matchFinder.Skip(num);
+ _additionalOffset += num;
+ }
+ }
+
+ UInt32 GetRepLen1Price(Base.State state, UInt32 posState)
+ {
+ return _isRepG0[state.Index].GetPrice0() +
+ _isRep0Long[(state.Index << Base.kNumPosStatesBitsMax) + posState].GetPrice0();
+ }
+
+ UInt32 GetPureRepPrice(UInt32 repIndex, Base.State state, UInt32 posState)
+ {
+ UInt32 price;
+ if (repIndex == 0)
+ {
+ price = _isRepG0[state.Index].GetPrice0();
+ price += _isRep0Long[(state.Index << Base.kNumPosStatesBitsMax) + posState].GetPrice1();
+ }
+ else
+ {
+ price = _isRepG0[state.Index].GetPrice1();
+ if (repIndex == 1)
+ price += _isRepG1[state.Index].GetPrice0();
+ else
+ {
+ price += _isRepG1[state.Index].GetPrice1();
+ price += _isRepG2[state.Index].GetPrice(repIndex - 2);
+ }
+ }
+ return price;
+ }
+
+ UInt32 GetRepPrice(UInt32 repIndex, UInt32 len, Base.State state, UInt32 posState)
+ {
+ UInt32 price = _repMatchLenEncoder.GetPrice(len - Base.kMatchMinLen, posState);
+ return price + GetPureRepPrice(repIndex, state, posState);
+ }
+
+ UInt32 GetPosLenPrice(UInt32 pos, UInt32 len, UInt32 posState)
+ {
+ UInt32 price;
+ UInt32 lenToPosState = Base.GetLenToPosState(len);
+ if (pos < Base.kNumFullDistances)
+ price = _distancesPrices[(lenToPosState * Base.kNumFullDistances) + pos];
+ else
+ price = _posSlotPrices[(lenToPosState << Base.kNumPosSlotBits) + GetPosSlot2(pos)] +
+ _alignPrices[pos & Base.kAlignMask];
+ return price + _lenEncoder.GetPrice(len - Base.kMatchMinLen, posState);
+ }
+
+ UInt32 Backward(out UInt32 backRes, UInt32 cur)
+ {
+ _optimumEndIndex = cur;
+ UInt32 posMem = _optimum[cur].PosPrev;
+ UInt32 backMem = _optimum[cur].BackPrev;
+ do
+ {
+ if (_optimum[cur].Prev1IsChar)
+ {
+ _optimum[posMem].MakeAsChar();
+ _optimum[posMem].PosPrev = posMem - 1;
+ if (_optimum[cur].Prev2)
+ {
+ _optimum[posMem - 1].Prev1IsChar = false;
+ _optimum[posMem - 1].PosPrev = _optimum[cur].PosPrev2;
+ _optimum[posMem - 1].BackPrev = _optimum[cur].BackPrev2;
+ }
+ }
+ UInt32 posPrev = posMem;
+ UInt32 backCur = backMem;
+
+ backMem = _optimum[posPrev].BackPrev;
+ posMem = _optimum[posPrev].PosPrev;
+
+ _optimum[posPrev].BackPrev = backCur;
+ _optimum[posPrev].PosPrev = cur;
+ cur = posPrev;
+ }
+ while (cur > 0);
+ backRes = _optimum[0].BackPrev;
+ _optimumCurrentIndex = _optimum[0].PosPrev;
+ return _optimumCurrentIndex;
+ }
+
+ UInt32[] reps = new UInt32[Base.kNumRepDistances];
+ UInt32[] repLens = new UInt32[Base.kNumRepDistances];
+
+
+ UInt32 GetOptimum(UInt32 position, out UInt32 backRes)
+ {
+ if (_optimumEndIndex != _optimumCurrentIndex)
+ {
+ UInt32 lenRes = _optimum[_optimumCurrentIndex].PosPrev - _optimumCurrentIndex;
+ backRes = _optimum[_optimumCurrentIndex].BackPrev;
+ _optimumCurrentIndex = _optimum[_optimumCurrentIndex].PosPrev;
+ return lenRes;
+ }
+ _optimumCurrentIndex = _optimumEndIndex = 0;
+
+ UInt32 lenMain, numDistancePairs;
+ if (!_longestMatchWasFound)
+ {
+ ReadMatchDistances(out lenMain, out numDistancePairs);
+ }
+ else
+ {
+ lenMain = _longestMatchLength;
+ numDistancePairs = _numDistancePairs;
+ _longestMatchWasFound = false;
+ }
+
+ UInt32 numAvailableBytes = _matchFinder.GetNumAvailableBytes() + 1;
+ if (numAvailableBytes < 2)
+ {
+ backRes = 0xFFFFFFFF;
+ return 1;
+ }
+ if (numAvailableBytes > Base.kMatchMaxLen)
+ numAvailableBytes = Base.kMatchMaxLen;
+
+ UInt32 repMaxIndex = 0;
+ UInt32 i;
+ for (i = 0; i < Base.kNumRepDistances; i++)
+ {
+ reps[i] = _repDistances[i];
+ repLens[i] = _matchFinder.GetMatchLen(0 - 1, reps[i], Base.kMatchMaxLen);
+ if (repLens[i] > repLens[repMaxIndex])
+ repMaxIndex = i;
+ }
+ if (repLens[repMaxIndex] >= _numFastBytes)
+ {
+ backRes = repMaxIndex;
+ UInt32 lenRes = repLens[repMaxIndex];
+ MovePos(lenRes - 1);
+ return lenRes;
+ }
+
+ if (lenMain >= _numFastBytes)
+ {
+ backRes = _matchDistances[numDistancePairs - 1] + Base.kNumRepDistances;
+ MovePos(lenMain - 1);
+ return lenMain;
+ }
+
+ Byte currentByte = _matchFinder.GetIndexByte(0 - 1);
+ Byte matchByte = _matchFinder.GetIndexByte((Int32)(0 - _repDistances[0] - 1 - 1));
+
+ if (lenMain < 2 && currentByte != matchByte && repLens[repMaxIndex] < 2)
+ {
+ backRes = (UInt32)0xFFFFFFFF;
+ return 1;
+ }
+
+ _optimum[0].State = _state;
+
+ UInt32 posState = (position & _posStateMask);
+
+ _optimum[1].Price = _isMatch[(_state.Index << Base.kNumPosStatesBitsMax) + posState].GetPrice0() +
+ _literalEncoder.GetSubCoder(position, _previousByte).GetPrice(!_state.IsCharState(), matchByte, currentByte);
+ _optimum[1].MakeAsChar();
+
+ UInt32 matchPrice = _isMatch[(_state.Index << Base.kNumPosStatesBitsMax) + posState].GetPrice1();
+ UInt32 repMatchPrice = matchPrice + _isRep[_state.Index].GetPrice1();
+
+ if (matchByte == currentByte)
+ {
+ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(_state, posState);
+ if (shortRepPrice < _optimum[1].Price)
+ {
+ _optimum[1].Price = shortRepPrice;
+ _optimum[1].MakeAsShortRep();
+ }
+ }
+
+ UInt32 lenEnd = ((lenMain >= repLens[repMaxIndex]) ? lenMain : repLens[repMaxIndex]);
+
+ if (lenEnd < 2)
+ {
+ backRes = _optimum[1].BackPrev;
+ return 1;
+ }
+
+ _optimum[1].PosPrev = 0;
+
+ _optimum[0].Backs0 = reps[0];
+ _optimum[0].Backs1 = reps[1];
+ _optimum[0].Backs2 = reps[2];
+ _optimum[0].Backs3 = reps[3];
+
+ UInt32 len = lenEnd;
+ do
+ _optimum[len--].Price = kIfinityPrice;
+ while (len >= 2);
+
+ for (i = 0; i < Base.kNumRepDistances; i++)
+ {
+ UInt32 repLen = repLens[i];
+ if (repLen < 2)
+ continue;
+ UInt32 price = repMatchPrice + GetPureRepPrice(i, _state, posState);
+ do
+ {
+ UInt32 curAndLenPrice = price + _repMatchLenEncoder.GetPrice(repLen - 2, posState);
+ Optimal optimum = _optimum[repLen];
+ if (curAndLenPrice < optimum.Price)
+ {
+ optimum.Price = curAndLenPrice;
+ optimum.PosPrev = 0;
+ optimum.BackPrev = i;
+ optimum.Prev1IsChar = false;
+ }
+ }
+ while (--repLen >= 2);
+ }
+
+ UInt32 normalMatchPrice = matchPrice + _isRep[_state.Index].GetPrice0();
+
+ len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2);
+ if (len <= lenMain)
+ {
+ UInt32 offs = 0;
+ while (len > _matchDistances[offs])
+ offs += 2;
+ for (; ; len++)
+ {
+ UInt32 distance = _matchDistances[offs + 1];
+ UInt32 curAndLenPrice = normalMatchPrice + GetPosLenPrice(distance, len, posState);
+ Optimal optimum = _optimum[len];
+ if (curAndLenPrice < optimum.Price)
+ {
+ optimum.Price = curAndLenPrice;
+ optimum.PosPrev = 0;
+ optimum.BackPrev = distance + Base.kNumRepDistances;
+ optimum.Prev1IsChar = false;
+ }
+ if (len == _matchDistances[offs])
+ {
+ offs += 2;
+ if (offs == numDistancePairs)
+ break;
+ }
+ }
+ }
+
+ UInt32 cur = 0;
+
+ while (true)
+ {
+ cur++;
+ if (cur == lenEnd)
+ return Backward(out backRes, cur);
+ UInt32 newLen;
+ ReadMatchDistances(out newLen, out numDistancePairs);
+ if (newLen >= _numFastBytes)
+ {
+ _numDistancePairs = numDistancePairs;
+ _longestMatchLength = newLen;
+ _longestMatchWasFound = true;
+ return Backward(out backRes, cur);
+ }
+ position++;
+ UInt32 posPrev = _optimum[cur].PosPrev;
+ Base.State state;
+ if (_optimum[cur].Prev1IsChar)
+ {
+ posPrev--;
+ if (_optimum[cur].Prev2)
+ {
+ state = _optimum[_optimum[cur].PosPrev2].State;
+ if (_optimum[cur].BackPrev2 < Base.kNumRepDistances)
+ state.UpdateRep();
+ else
+ state.UpdateMatch();
+ }
+ else
+ state = _optimum[posPrev].State;
+ state.UpdateChar();
+ }
+ else
+ state = _optimum[posPrev].State;
+ if (posPrev == cur - 1)
+ {
+ if (_optimum[cur].IsShortRep())
+ state.UpdateShortRep();
+ else
+ state.UpdateChar();
+ }
+ else
+ {
+ UInt32 pos;
+ if (_optimum[cur].Prev1IsChar && _optimum[cur].Prev2)
+ {
+ posPrev = _optimum[cur].PosPrev2;
+ pos = _optimum[cur].BackPrev2;
+ state.UpdateRep();
+ }
+ else
+ {
+ pos = _optimum[cur].BackPrev;
+ if (pos < Base.kNumRepDistances)
+ state.UpdateRep();
+ else
+ state.UpdateMatch();
+ }
+ Optimal opt = _optimum[posPrev];
+ if (pos < Base.kNumRepDistances)
+ {
+ if (pos == 0)
+ {
+ reps[0] = opt.Backs0;
+ reps[1] = opt.Backs1;
+ reps[2] = opt.Backs2;
+ reps[3] = opt.Backs3;
+ }
+ else if (pos == 1)
+ {
+ reps[0] = opt.Backs1;
+ reps[1] = opt.Backs0;
+ reps[2] = opt.Backs2;
+ reps[3] = opt.Backs3;
+ }
+ else if (pos == 2)
+ {
+ reps[0] = opt.Backs2;
+ reps[1] = opt.Backs0;
+ reps[2] = opt.Backs1;
+ reps[3] = opt.Backs3;
+ }
+ else
+ {
+ reps[0] = opt.Backs3;
+ reps[1] = opt.Backs0;
+ reps[2] = opt.Backs1;
+ reps[3] = opt.Backs2;
+ }
+ }
+ else
+ {
+ reps[0] = (pos - Base.kNumRepDistances);
+ reps[1] = opt.Backs0;
+ reps[2] = opt.Backs1;
+ reps[3] = opt.Backs2;
+ }
+ }
+ _optimum[cur].State = state;
+ _optimum[cur].Backs0 = reps[0];
+ _optimum[cur].Backs1 = reps[1];
+ _optimum[cur].Backs2 = reps[2];
+ _optimum[cur].Backs3 = reps[3];
+ UInt32 curPrice = _optimum[cur].Price;
+
+ currentByte = _matchFinder.GetIndexByte(0 - 1);
+ matchByte = _matchFinder.GetIndexByte((Int32)(0 - reps[0] - 1 - 1));
+
+ posState = (position & _posStateMask);
+
+ UInt32 curAnd1Price = curPrice +
+ _isMatch[(state.Index << Base.kNumPosStatesBitsMax) + posState].GetPrice0() +
+ _literalEncoder.GetSubCoder(position, _matchFinder.GetIndexByte(0 - 2)).
+ GetPrice(!state.IsCharState(), matchByte, currentByte);
+
+ Optimal nextOptimum = _optimum[cur + 1];
+
+ bool nextIsChar = false;
+ if (curAnd1Price < nextOptimum.Price)
+ {
+ nextOptimum.Price = curAnd1Price;
+ nextOptimum.PosPrev = cur;
+ nextOptimum.MakeAsChar();
+ nextIsChar = true;
+ }
+
+ matchPrice = curPrice + _isMatch[(state.Index << Base.kNumPosStatesBitsMax) + posState].GetPrice1();
+ repMatchPrice = matchPrice + _isRep[state.Index].GetPrice1();
+
+ if (matchByte == currentByte &&
+ !(nextOptimum.PosPrev < cur && nextOptimum.BackPrev == 0))
+ {
+ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(state, posState);
+ if (shortRepPrice <= nextOptimum.Price)
+ {
+ nextOptimum.Price = shortRepPrice;
+ nextOptimum.PosPrev = cur;
+ nextOptimum.MakeAsShortRep();
+ nextIsChar = true;
+ }
+ }
+
+ UInt32 numAvailableBytesFull = _matchFinder.GetNumAvailableBytes() + 1;
+ numAvailableBytesFull = Math.Min(kNumOpts - 1 - cur, numAvailableBytesFull);
+ numAvailableBytes = numAvailableBytesFull;
+
+ if (numAvailableBytes < 2)
+ continue;
+ if (numAvailableBytes > _numFastBytes)
+ numAvailableBytes = _numFastBytes;
+ if (!nextIsChar && matchByte != currentByte)
+ {
+ // try Literal + rep0
+ UInt32 t = Math.Min(numAvailableBytesFull - 1, _numFastBytes);
+ UInt32 lenTest2 = _matchFinder.GetMatchLen(0, reps[0], t);
+ if (lenTest2 >= 2)
+ {
+ Base.State state2 = state;
+ state2.UpdateChar();
+ UInt32 posStateNext = (position + 1) & _posStateMask;
+ UInt32 nextRepMatchPrice = curAnd1Price +
+ _isMatch[(state2.Index << Base.kNumPosStatesBitsMax) + posStateNext].GetPrice1() +
+ _isRep[state2.Index].GetPrice1();
+ {
+ UInt32 offset = cur + 1 + lenTest2;
+ while (lenEnd < offset)
+ _optimum[++lenEnd].Price = kIfinityPrice;
+ UInt32 curAndLenPrice = nextRepMatchPrice + GetRepPrice(
+ 0, lenTest2, state2, posStateNext);
+ Optimal optimum = _optimum[offset];
+ if (curAndLenPrice < optimum.Price)
+ {
+ optimum.Price = curAndLenPrice;
+ optimum.PosPrev = cur + 1;
+ optimum.BackPrev = 0;
+ optimum.Prev1IsChar = true;
+ optimum.Prev2 = false;
+ }
+ }
+ }
+ }
+
+ UInt32 startLen = 2; // speed optimization
+
+ for (UInt32 repIndex = 0; repIndex < Base.kNumRepDistances; repIndex++)
+ {
+ UInt32 lenTest = _matchFinder.GetMatchLen(0 - 1, reps[repIndex], numAvailableBytes);
+ if (lenTest < 2)
+ continue;
+ UInt32 lenTestTemp = lenTest;
+ do
+ {
+ while (lenEnd < cur + lenTest)
+ _optimum[++lenEnd].Price = kIfinityPrice;
+ UInt32 curAndLenPrice = repMatchPrice + GetRepPrice(repIndex, lenTest, state, posState);
+ Optimal optimum = _optimum[cur + lenTest];
+ if (curAndLenPrice < optimum.Price)
+ {
+ optimum.Price = curAndLenPrice;
+ optimum.PosPrev = cur;
+ optimum.BackPrev = repIndex;
+ optimum.Prev1IsChar = false;
+ }
+ }
+ while (--lenTest >= 2);
+ lenTest = lenTestTemp;
+
+ if (repIndex == 0)
+ startLen = lenTest + 1;
+
+ // if (_maxMode)
+ if (lenTest < numAvailableBytesFull)
+ {
+ UInt32 t = Math.Min(numAvailableBytesFull - 1 - lenTest, _numFastBytes);
+ UInt32 lenTest2 = _matchFinder.GetMatchLen((Int32)lenTest, reps[repIndex], t);
+ if (lenTest2 >= 2)
+ {
+ Base.State state2 = state;
+ state2.UpdateRep();
+ UInt32 posStateNext = (position + lenTest) & _posStateMask;
+ UInt32 curAndLenCharPrice =
+ repMatchPrice + GetRepPrice(repIndex, lenTest, state, posState) +
+ _isMatch[(state2.Index << Base.kNumPosStatesBitsMax) + posStateNext].GetPrice0() +
+ _literalEncoder.GetSubCoder(position + lenTest,
+ _matchFinder.GetIndexByte((Int32)lenTest - 1 - 1)).GetPrice(true,
+ _matchFinder.GetIndexByte((Int32)((Int32)lenTest - 1 - (Int32)(reps[repIndex] + 1))),
+ _matchFinder.GetIndexByte((Int32)lenTest - 1));
+ state2.UpdateChar();
+ posStateNext = (position + lenTest + 1) & _posStateMask;
+ UInt32 nextMatchPrice = curAndLenCharPrice + _isMatch[(state2.Index << Base.kNumPosStatesBitsMax) + posStateNext].GetPrice1();
+ UInt32 nextRepMatchPrice = nextMatchPrice + _isRep[state2.Index].GetPrice1();
+
+ // for(; lenTest2 >= 2; lenTest2--)
+ {
+ UInt32 offset = lenTest + 1 + lenTest2;
+ while (lenEnd < cur + offset)
+ _optimum[++lenEnd].Price = kIfinityPrice;
+ UInt32 curAndLenPrice = nextRepMatchPrice + GetRepPrice(0, lenTest2, state2, posStateNext);
+ Optimal optimum = _optimum[cur + offset];
+ if (curAndLenPrice < optimum.Price)
+ {
+ optimum.Price = curAndLenPrice;
+ optimum.PosPrev = cur + lenTest + 1;
+ optimum.BackPrev = 0;
+ optimum.Prev1IsChar = true;
+ optimum.Prev2 = true;
+ optimum.PosPrev2 = cur;
+ optimum.BackPrev2 = repIndex;
+ }
+ }
+ }
+ }
+ }
+
+ if (newLen > numAvailableBytes)
+ {
+ newLen = numAvailableBytes;
+ for (numDistancePairs = 0; newLen > _matchDistances[numDistancePairs]; numDistancePairs += 2) ;
+ _matchDistances[numDistancePairs] = newLen;
+ numDistancePairs += 2;
+ }
+ if (newLen >= startLen)
+ {
+ normalMatchPrice = matchPrice + _isRep[state.Index].GetPrice0();
+ while (lenEnd < cur + newLen)
+ _optimum[++lenEnd].Price = kIfinityPrice;
+
+ UInt32 offs = 0;
+ while (startLen > _matchDistances[offs])
+ offs += 2;
+
+ for (UInt32 lenTest = startLen; ; lenTest++)
+ {
+ UInt32 curBack = _matchDistances[offs + 1];
+ UInt32 curAndLenPrice = normalMatchPrice + GetPosLenPrice(curBack, lenTest, posState);
+ Optimal optimum = _optimum[cur + lenTest];
+ if (curAndLenPrice < optimum.Price)
+ {
+ optimum.Price = curAndLenPrice;
+ optimum.PosPrev = cur;
+ optimum.BackPrev = curBack + Base.kNumRepDistances;
+ optimum.Prev1IsChar = false;
+ }
+
+ if (lenTest == _matchDistances[offs])
+ {
+ if (lenTest < numAvailableBytesFull)
+ {
+ UInt32 t = Math.Min(numAvailableBytesFull - 1 - lenTest, _numFastBytes);
+ UInt32 lenTest2 = _matchFinder.GetMatchLen((Int32)lenTest, curBack, t);
+ if (lenTest2 >= 2)
+ {
+ Base.State state2 = state;
+ state2.UpdateMatch();
+ UInt32 posStateNext = (position + lenTest) & _posStateMask;
+ UInt32 curAndLenCharPrice = curAndLenPrice +
+ _isMatch[(state2.Index << Base.kNumPosStatesBitsMax) + posStateNext].GetPrice0() +
+ _literalEncoder.GetSubCoder(position + lenTest,
+ _matchFinder.GetIndexByte((Int32)lenTest - 1 - 1)).
+ GetPrice(true,
+ _matchFinder.GetIndexByte((Int32)lenTest - (Int32)(curBack + 1) - 1),
+ _matchFinder.GetIndexByte((Int32)lenTest - 1));
+ state2.UpdateChar();
+ posStateNext = (position + lenTest + 1) & _posStateMask;
+ UInt32 nextMatchPrice = curAndLenCharPrice + _isMatch[(state2.Index << Base.kNumPosStatesBitsMax) + posStateNext].GetPrice1();
+ UInt32 nextRepMatchPrice = nextMatchPrice + _isRep[state2.Index].GetPrice1();
+
+ UInt32 offset = lenTest + 1 + lenTest2;
+ while (lenEnd < cur + offset)
+ _optimum[++lenEnd].Price = kIfinityPrice;
+ curAndLenPrice = nextRepMatchPrice + GetRepPrice(0, lenTest2, state2, posStateNext);
+ optimum = _optimum[cur + offset];
+ if (curAndLenPrice < optimum.Price)
+ {
+ optimum.Price = curAndLenPrice;
+ optimum.PosPrev = cur + lenTest + 1;
+ optimum.BackPrev = 0;
+ optimum.Prev1IsChar = true;
+ optimum.Prev2 = true;
+ optimum.PosPrev2 = cur;
+ optimum.BackPrev2 = curBack + Base.kNumRepDistances;
+ }
+ }
+ }
+ offs += 2;
+ if (offs == numDistancePairs)
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ bool ChangePair(UInt32 smallDist, UInt32 bigDist)
+ {
+ const int kDif = 7;
+ return (smallDist < ((UInt32)(1) << (32 - kDif)) && bigDist >= (smallDist << kDif));
+ }
+
+ void WriteEndMarker(UInt32 posState)
+ {
+ if (!_writeEndMark)
+ return;
+
+ _isMatch[(_state.Index << Base.kNumPosStatesBitsMax) + posState].Encode(_rangeEncoder, 1);
+ _isRep[_state.Index].Encode(_rangeEncoder, 0);
+ _state.UpdateMatch();
+ UInt32 len = Base.kMatchMinLen;
+ _lenEncoder.Encode(_rangeEncoder, len - Base.kMatchMinLen, posState);
+ UInt32 posSlot = (1 << Base.kNumPosSlotBits) - 1;
+ UInt32 lenToPosState = Base.GetLenToPosState(len);
+ _posSlotEncoder[lenToPosState].Encode(_rangeEncoder, posSlot);
+ int footerBits = 30;
+ UInt32 posReduced = (((UInt32)1) << footerBits) - 1;
+ _rangeEncoder.EncodeDirectBits(posReduced >> Base.kNumAlignBits, footerBits - Base.kNumAlignBits);
+ _posAlignEncoder.ReverseEncode(_rangeEncoder, posReduced & Base.kAlignMask);
+ }
+
+ void Flush(UInt32 nowPos)
+ {
+ ReleaseMFStream();
+ WriteEndMarker(nowPos & _posStateMask);
+ _rangeEncoder.FlushData();
+ _rangeEncoder.FlushStream();
+ }
+
+ public void CodeOneBlock(out Int64 inSize, out Int64 outSize, out bool finished)
+ {
+ inSize = 0;
+ outSize = 0;
+ finished = true;
+
+ if (_inStream != null)
+ {
+ _matchFinder.SetStream(_inStream);
+ _needReleaseMFStream = true;
+ _inStream = null;
+ }
+
+ if (_finished)
+ return;
+ _finished = true;
+
+
+ Int64 progressPosValuePrev = nowPos64;
+ if (nowPos64 == 0)
+ {
+ if (_trainSize > 0)
+ {
+ for (; _trainSize > 0 && (!_processingMode || !_matchFinder.IsDataStarved); _trainSize--)
+ _matchFinder.Skip(1);
+ if (_trainSize == 0)
+ _previousByte = _matchFinder.GetIndexByte(-1);
+ }
+ if (_processingMode && _matchFinder.IsDataStarved)
+ {
+ _finished = false;
+ return;
+ }
+ if (_matchFinder.GetNumAvailableBytes() == 0)
+ {
+ Flush((UInt32)nowPos64);
+ return;
+ }
+ UInt32 len, numDistancePairs; // it's not used
+ ReadMatchDistances(out len, out numDistancePairs);
+ UInt32 posState = (UInt32)(nowPos64) & _posStateMask;
+ _isMatch[(_state.Index << Base.kNumPosStatesBitsMax) + posState].Encode(_rangeEncoder, 0);
+ _state.UpdateChar();
+ Byte curByte = _matchFinder.GetIndexByte((Int32)(0 - _additionalOffset));
+ _literalEncoder.GetSubCoder((UInt32)(nowPos64), _previousByte).Encode(_rangeEncoder, curByte);
+ _previousByte = curByte;
+ _additionalOffset--;
+ nowPos64++;
+ }
+ if (_processingMode && _matchFinder.IsDataStarved)
+ {
+ _finished = false;
+ return;
+ }
+ if (_matchFinder.GetNumAvailableBytes() == 0)
+ {
+ Flush((UInt32)nowPos64);
+ return;
+ }
+ while (true)
+ {
+ if (_processingMode && _matchFinder.IsDataStarved)
+ {
+ _finished = false;
+ return;
+ }
+
+ UInt32 pos;
+ UInt32 len = GetOptimum((UInt32)nowPos64, out pos);
+
+ UInt32 posState = ((UInt32)nowPos64) & _posStateMask;
+ UInt32 complexState = (_state.Index << Base.kNumPosStatesBitsMax) + posState;
+ if (len == 1 && pos == 0xFFFFFFFF)
+ {
+ _isMatch[complexState].Encode(_rangeEncoder, 0);
+ Byte curByte = _matchFinder.GetIndexByte((Int32)(0 - _additionalOffset));
+ LiteralEncoder.Encoder2 subCoder = _literalEncoder.GetSubCoder((UInt32)nowPos64, _previousByte);
+ if (!_state.IsCharState())
+ {
+ Byte matchByte = _matchFinder.GetIndexByte((Int32)(0 - _repDistances[0] - 1 - _additionalOffset));
+ subCoder.EncodeMatched(_rangeEncoder, matchByte, curByte);
+ }
+ else
+ subCoder.Encode(_rangeEncoder, curByte);
+ _previousByte = curByte;
+ _state.UpdateChar();
+ }
+ else
+ {
+ _isMatch[complexState].Encode(_rangeEncoder, 1);
+ if (pos < Base.kNumRepDistances)
+ {
+ _isRep[_state.Index].Encode(_rangeEncoder, 1);
+ if (pos == 0)
+ {
+ _isRepG0[_state.Index].Encode(_rangeEncoder, 0);
+ if (len == 1)
+ _isRep0Long[complexState].Encode(_rangeEncoder, 0);
+ else
+ _isRep0Long[complexState].Encode(_rangeEncoder, 1);
+ }
+ else
+ {
+ _isRepG0[_state.Index].Encode(_rangeEncoder, 1);
+ if (pos == 1)
+ _isRepG1[_state.Index].Encode(_rangeEncoder, 0);
+ else
+ {
+ _isRepG1[_state.Index].Encode(_rangeEncoder, 1);
+ _isRepG2[_state.Index].Encode(_rangeEncoder, pos - 2);
+ }
+ }
+ if (len == 1)
+ _state.UpdateShortRep();
+ else
+ {
+ _repMatchLenEncoder.Encode(_rangeEncoder, len - Base.kMatchMinLen, posState);
+ _state.UpdateRep();
+ }
+ UInt32 distance = _repDistances[pos];
+ if (pos != 0)
+ {
+ for (UInt32 i = pos; i >= 1; i--)
+ _repDistances[i] = _repDistances[i - 1];
+ _repDistances[0] = distance;
+ }
+ }
+ else
+ {
+ _isRep[_state.Index].Encode(_rangeEncoder, 0);
+ _state.UpdateMatch();
+ _lenEncoder.Encode(_rangeEncoder, len - Base.kMatchMinLen, posState);
+ pos -= Base.kNumRepDistances;
+ UInt32 posSlot = GetPosSlot(pos);
+ UInt32 lenToPosState = Base.GetLenToPosState(len);
+ _posSlotEncoder[lenToPosState].Encode(_rangeEncoder, posSlot);
+
+ if (posSlot >= Base.kStartPosModelIndex)
+ {
+ int footerBits = (int)((posSlot >> 1) - 1);
+ UInt32 baseVal = ((2 | (posSlot & 1)) << footerBits);
+ UInt32 posReduced = pos - baseVal;
+
+ if (posSlot < Base.kEndPosModelIndex)
+ RangeCoder.BitTreeEncoder.ReverseEncode(_posEncoders,
+ baseVal - posSlot - 1, _rangeEncoder, footerBits, posReduced);
+ else
+ {
+ _rangeEncoder.EncodeDirectBits(posReduced >> Base.kNumAlignBits, footerBits - Base.kNumAlignBits);
+ _posAlignEncoder.ReverseEncode(_rangeEncoder, posReduced & Base.kAlignMask);
+ _alignPriceCount++;
+ }
+ }
+ UInt32 distance = pos;
+ for (UInt32 i = Base.kNumRepDistances - 1; i >= 1; i--)
+ _repDistances[i] = _repDistances[i - 1];
+ _repDistances[0] = distance;
+ _matchPriceCount++;
+ }
+ _previousByte = _matchFinder.GetIndexByte((Int32)(len - 1 - _additionalOffset));
+ }
+ _additionalOffset -= len;
+ nowPos64 += len;
+ if (_additionalOffset == 0)
+ {
+ // if (!_fastMode)
+ if (_matchPriceCount >= (1 << 7))
+ FillDistancesPrices();
+ if (_alignPriceCount >= Base.kAlignTableSize)
+ FillAlignPrices();
+ inSize = nowPos64;
+ outSize = _rangeEncoder.GetProcessedSizeAdd();
+ if (_processingMode && _matchFinder.IsDataStarved)
+ {
+ _finished = false;
+ return;
+ }
+ if (_matchFinder.GetNumAvailableBytes() == 0)
+ {
+ Flush((UInt32)nowPos64);
+ return;
+ }
+
+ if (nowPos64 - progressPosValuePrev >= (1 << 12))
+ {
+ _finished = false;
+ finished = false;
+ return;
+ }
+ }
+ }
+ }
+
+ void ReleaseMFStream()
+ {
+ if (_matchFinder != null && _needReleaseMFStream)
+ {
+ _matchFinder.ReleaseStream();
+ _needReleaseMFStream = false;
+ }
+ }
+
+ void SetOutStream(System.IO.Stream outStream) { _rangeEncoder.SetStream(outStream); }
+ void ReleaseOutStream() { _rangeEncoder.ReleaseStream(); }
+
+ void ReleaseStreams()
+ {
+ ReleaseMFStream();
+ ReleaseOutStream();
+ }
+
+ public void SetStreams(System.IO.Stream inStream, System.IO.Stream outStream,
+ Int64 inSize, Int64 outSize)
+ {
+ _inStream = inStream;
+ _finished = false;
+ Create();
+ SetOutStream(outStream);
+ Init();
+ _matchFinder.Init();
+
+ // if (!_fastMode)
+ {
+ FillDistancesPrices();
+ FillAlignPrices();
+ }
+
+ _lenEncoder.SetTableSize(_numFastBytes + 1 - Base.kMatchMinLen);
+ _lenEncoder.UpdateTables((UInt32)1 << _posStateBits);
+ _repMatchLenEncoder.SetTableSize(_numFastBytes + 1 - Base.kMatchMinLen);
+ _repMatchLenEncoder.UpdateTables((UInt32)1 << _posStateBits);
+
+ nowPos64 = 0;
+ }
+
+
+ public void Code(System.IO.Stream inStream, System.IO.Stream outStream,
+ Int64 inSize, Int64 outSize, ICodeProgress progress)
+ {
+ _needReleaseMFStream = false;
+ _processingMode = false;
+ try
+ {
+ SetStreams(inStream, outStream, inSize, outSize);
+ while (true)
+ {
+ Int64 processedInSize;
+ Int64 processedOutSize;
+ bool finished;
+ CodeOneBlock(out processedInSize, out processedOutSize, out finished);
+ if (finished)
+ return;
+ if (progress != null)
+ {
+ progress.SetProgress(processedInSize, processedOutSize);
+ }
+ }
+ }
+ finally
+ {
+ ReleaseStreams();
+ }
+ }
+
+ public long Code(System.IO.Stream inStream, bool final)
+ {
+ _matchFinder.SetStream(inStream);
+ _processingMode = !final;
+ try
+ {
+ while (true)
+ {
+ Int64 processedInSize;
+ Int64 processedOutSize;
+ bool finished;
+ CodeOneBlock(out processedInSize, out processedOutSize, out finished);
+ if (finished)
+ return processedInSize;
+ }
+ }
+ finally
+ {
+ _matchFinder.ReleaseStream();
+ if (final)
+ ReleaseStreams();
+ }
+ }
+
+ public void Train(System.IO.Stream trainStream)
+ {
+ if (nowPos64 > 0)
+ throw new InvalidOperationException();
+ _trainSize = (uint)trainStream.Length;
+ if (_trainSize > 0)
+ {
+ _matchFinder.SetStream(trainStream);
+ for (; _trainSize > 0 && !_matchFinder.IsDataStarved; _trainSize--)
+ _matchFinder.Skip(1);
+ if (_trainSize == 0)
+ _previousByte = _matchFinder.GetIndexByte(-1);
+ _matchFinder.ReleaseStream();
+ }
+ }
+
+ const int kPropSize = 5;
+ Byte[] properties = new Byte[kPropSize];
+
+ public void WriteCoderProperties(System.IO.Stream outStream)
+ {
+ properties[0] = (Byte)((_posStateBits * 5 + _numLiteralPosStateBits) * 9 + _numLiteralContextBits);
+ for (int i = 0; i < 4; i++)
+ properties[1 + i] = (Byte)((_dictionarySize >> (8 * i)) & 0xFF);
+ outStream.Write(properties, 0, kPropSize);
+ }
+
+ UInt32[] tempPrices = new UInt32[Base.kNumFullDistances];
+ UInt32 _matchPriceCount;
+
+ void FillDistancesPrices()
+ {
+ for (UInt32 i = Base.kStartPosModelIndex; i < Base.kNumFullDistances; i++)
+ {
+ UInt32 posSlot = GetPosSlot(i);
+ int footerBits = (int)((posSlot >> 1) - 1);
+ UInt32 baseVal = ((2 | (posSlot & 1)) << footerBits);
+ tempPrices[i] = BitTreeEncoder.ReverseGetPrice(_posEncoders,
+ baseVal - posSlot - 1, footerBits, i - baseVal);
+ }
+
+ for (UInt32 lenToPosState = 0; lenToPosState < Base.kNumLenToPosStates; lenToPosState++)
+ {
+ UInt32 posSlot;
+ RangeCoder.BitTreeEncoder encoder = _posSlotEncoder[lenToPosState];
+
+ UInt32 st = (lenToPosState << Base.kNumPosSlotBits);
+ for (posSlot = 0; posSlot < _distTableSize; posSlot++)
+ _posSlotPrices[st + posSlot] = encoder.GetPrice(posSlot);
+ for (posSlot = Base.kEndPosModelIndex; posSlot < _distTableSize; posSlot++)
+ _posSlotPrices[st + posSlot] += ((((posSlot >> 1) - 1) - Base.kNumAlignBits) << RangeCoder.BitEncoder.kNumBitPriceShiftBits);
+
+ UInt32 st2 = lenToPosState * Base.kNumFullDistances;
+ UInt32 i;
+ for (i = 0; i < Base.kStartPosModelIndex; i++)
+ _distancesPrices[st2 + i] = _posSlotPrices[st + i];
+ for (; i < Base.kNumFullDistances; i++)
+ _distancesPrices[st2 + i] = _posSlotPrices[st + GetPosSlot(i)] + tempPrices[i];
+ }
+ _matchPriceCount = 0;
+ }
+
+ void FillAlignPrices()
+ {
+ for (UInt32 i = 0; i < Base.kAlignTableSize; i++)
+ _alignPrices[i] = _posAlignEncoder.ReverseGetPrice(i);
+ _alignPriceCount = 0;
+ }
+
+
+ static string[] kMatchFinderIDs =
+ {
+ "BT2",
+ "BT4",
+ };
+
+ static int FindMatchFinder(string s)
+ {
+ for (int m = 0; m < kMatchFinderIDs.Length; m++)
+ if (s == kMatchFinderIDs[m])
+ return m;
+ return -1;
+ }
+
+ public void SetCoderProperties(CoderPropID[] propIDs, object[] properties)
+ {
+ for (UInt32 i = 0; i < properties.Length; i++)
+ {
+ object prop = properties[i];
+ switch (propIDs[i])
+ {
+ case CoderPropID.NumFastBytes:
+ {
+ if (!(prop is Int32))
+ throw new InvalidParamException();
+ Int32 numFastBytes = (Int32)prop;
+ if (numFastBytes < 5 || numFastBytes > Base.kMatchMaxLen)
+ throw new InvalidParamException();
+ _numFastBytes = (UInt32)numFastBytes;
+ break;
+ }
+ case CoderPropID.Algorithm:
+ {
+ /*
+ if (!(prop is Int32))
+ throw new InvalidParamException();
+ Int32 maximize = (Int32)prop;
+ _fastMode = (maximize == 0);
+ _maxMode = (maximize >= 2);
+ */
+ break;
+ }
+ case CoderPropID.MatchFinder:
+ {
+ if (!(prop is String))
+ throw new InvalidParamException();
+ EMatchFinderType matchFinderIndexPrev = _matchFinderType;
+ int m = FindMatchFinder(((string)prop).ToUpper());
+ if (m < 0)
+ throw new InvalidParamException();
+ _matchFinderType = (EMatchFinderType)m;
+ if (_matchFinder != null && matchFinderIndexPrev != _matchFinderType)
+ {
+ _dictionarySizePrev = 0xFFFFFFFF;
+ _matchFinder = null;
+ }
+ break;
+ }
+ case CoderPropID.DictionarySize:
+ {
+ const int kDicLogSizeMaxCompress = 30;
+ if (!(prop is Int32))
+ throw new InvalidParamException(); ;
+ Int32 dictionarySize = (Int32)prop;
+ if (dictionarySize < (UInt32)(1 << Base.kDicLogSizeMin) ||
+ dictionarySize > (UInt32)(1 << kDicLogSizeMaxCompress))
+ throw new InvalidParamException();
+ _dictionarySize = (UInt32)dictionarySize;
+ int dicLogSize;
+ for (dicLogSize = 0; dicLogSize < (UInt32)kDicLogSizeMaxCompress; dicLogSize++)
+ if (dictionarySize <= ((UInt32)(1) << dicLogSize))
+ break;
+ _distTableSize = (UInt32)dicLogSize * 2;
+ break;
+ }
+ case CoderPropID.PosStateBits:
+ {
+ if (!(prop is Int32))
+ throw new InvalidParamException();
+ Int32 v = (Int32)prop;
+ if (v < 0 || v > (UInt32)Base.kNumPosStatesBitsEncodingMax)
+ throw new InvalidParamException();
+ _posStateBits = (int)v;
+ _posStateMask = (((UInt32)1) << (int)_posStateBits) - 1;
+ break;
+ }
+ case CoderPropID.LitPosBits:
+ {
+ if (!(prop is Int32))
+ throw new InvalidParamException();
+ Int32 v = (Int32)prop;
+ if (v < 0 || v > (UInt32)Base.kNumLitPosStatesBitsEncodingMax)
+ throw new InvalidParamException();
+ _numLiteralPosStateBits = (int)v;
+ break;
+ }
+ case CoderPropID.LitContextBits:
+ {
+ if (!(prop is Int32))
+ throw new InvalidParamException();
+ Int32 v = (Int32)prop;
+ if (v < 0 || v > (UInt32)Base.kNumLitContextBitsMax)
+ throw new InvalidParamException(); ;
+ _numLiteralContextBits = (int)v;
+ break;
+ }
+ case CoderPropID.EndMarker:
+ {
+ if (!(prop is Boolean))
+ throw new InvalidParamException();
+ SetWriteEndMarkerMode((Boolean)prop);
+ break;
+ }
+ default:
+ throw new InvalidParamException();
+ }
+ }
+ }
+
+ uint _trainSize = 0;
+ public void SetTrainSize(uint trainSize)
+ {
+ _trainSize = trainSize;
+ }
+
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaEncoderProperties.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaEncoderProperties.cs
new file mode 100644
index 00000000..db866ebf
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaEncoderProperties.cs
@@ -0,0 +1,57 @@
+using Compress.SevenZip.Common;
+
+namespace Compress.SevenZip.Compress.LZMA
+{
+ public class LzmaEncoderProperties
+ {
+ internal CoderPropID[] propIDs;
+ internal object[] properties;
+
+ public LzmaEncoderProperties()
+ : this(false)
+ {
+ }
+
+ public LzmaEncoderProperties(bool eos)
+ : this(eos, 1 << 20)
+ {
+ }
+
+ public LzmaEncoderProperties(bool eos, int dictionary)
+ : this(eos, dictionary, 32)
+ {
+ }
+
+ public LzmaEncoderProperties(bool eos, int dictionary, int numFastBytes)
+ {
+ int posStateBits = 2;
+ int litContextBits = 4;
+ int litPosBits = 0;
+ int algorithm = 2;
+ string mf = "bt4";
+
+ propIDs = new CoderPropID[]
+ {
+ CoderPropID.DictionarySize,
+ CoderPropID.PosStateBits,
+ CoderPropID.LitContextBits,
+ CoderPropID.LitPosBits,
+ CoderPropID.Algorithm,
+ CoderPropID.NumFastBytes,
+ CoderPropID.MatchFinder,
+ CoderPropID.EndMarker
+ };
+ properties = new object[]
+ {
+ dictionary,
+ posStateBits,
+ litContextBits,
+ litPosBits,
+ algorithm,
+ numFastBytes,
+ mf,
+ eos
+ };
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaStream.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaStream.cs
new file mode 100644
index 00000000..564e162a
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/LZMA/LzmaStream.cs
@@ -0,0 +1,318 @@
+using System;
+using System.IO;
+using Compress.SevenZip.Common;
+using Compress.SevenZip.Compress.LZ;
+
+namespace Compress.SevenZip.Compress.LZMA
+{
+ public class LzmaStream : Stream
+ {
+ private Stream inputStream;
+ private long inputSize;
+ private long outputSize;
+
+ private int dictionarySize;
+ private OutWindow outWindow = new OutWindow();
+ private RangeCoder.Decoder rangeDecoder = new RangeCoder.Decoder();
+ private Decoder decoder;
+
+ private long position = 0;
+ private bool endReached = false;
+ private long availableBytes;
+ private long rangeDecoderLimit;
+ private long inputPosition = 0;
+
+ // LZMA2
+ private bool isLZMA2;
+ private bool uncompressedChunk = false;
+ private bool needDictReset = true;
+ private bool needProps = true;
+ private byte[] props = new byte[5];
+
+ private Encoder encoder;
+
+ public LzmaStream(byte[] properties, Stream inputStream)
+ : this(properties, inputStream, -1, -1, null, properties.Length < 5)
+ {
+ }
+
+ public LzmaStream(byte[] properties, Stream inputStream, long inputSize)
+ : this(properties, inputStream, inputSize, -1, null, properties.Length < 5)
+ {
+ }
+
+ public LzmaStream(byte[] properties, Stream inputStream, long inputSize, long outputSize)
+ : this(properties, inputStream, inputSize, outputSize, null, properties.Length < 5)
+ {
+ }
+
+ public LzmaStream(byte[] properties, Stream inputStream, long inputSize, long outputSize,
+ Stream presetDictionary, bool isLZMA2)
+ {
+ this.inputStream = inputStream;
+ this.inputSize = inputSize;
+ this.outputSize = outputSize;
+ this.isLZMA2 = isLZMA2;
+
+ if (!isLZMA2)
+ {
+ dictionarySize = BitConverter.ToInt32(properties, 1);
+ outWindow.Create(dictionarySize);
+ if (presetDictionary != null)
+ outWindow.Train(presetDictionary);
+
+ rangeDecoder.Init(inputStream);
+
+ decoder = new Decoder();
+ decoder.SetDecoderProperties(properties);
+ props = properties;
+
+ availableBytes = outputSize < 0 ? long.MaxValue : outputSize;
+ rangeDecoderLimit = inputSize;
+ }
+ else
+ {
+ dictionarySize = 2 | (properties[0] & 1);
+ dictionarySize <<= (properties[0] >> 1) + 11;
+
+ outWindow.Create(dictionarySize);
+ if (presetDictionary != null)
+ {
+ outWindow.Train(presetDictionary);
+ needDictReset = false;
+ }
+
+ props = new byte[1];
+ availableBytes = 0;
+ }
+ }
+
+ public LzmaStream(LzmaEncoderProperties properties, bool isLZMA2, Stream outputStream)
+ : this(properties, isLZMA2, null, outputStream)
+ {
+ }
+
+ public LzmaStream(LzmaEncoderProperties properties, bool isLZMA2, Stream presetDictionary, Stream outputStream)
+ {
+ this.isLZMA2 = isLZMA2;
+ availableBytes = 0;
+ endReached = true;
+
+ if (isLZMA2)
+ throw new NotImplementedException();
+
+ encoder = new Encoder();
+ encoder.SetCoderProperties(properties.propIDs, properties.properties);
+ MemoryStream propStream = new MemoryStream(5);
+ encoder.WriteCoderProperties(propStream);
+ props = propStream.ToArray();
+
+ encoder.SetStreams(null, outputStream, -1, -1);
+ if (presetDictionary != null)
+ encoder.Train(presetDictionary);
+ }
+
+ public override bool CanRead
+ {
+ get { return encoder == null; }
+ }
+
+ public override bool CanSeek
+ {
+ get { return false; }
+ }
+
+ public override bool CanWrite
+ {
+ get { return encoder != null; }
+ }
+
+ public override void Flush()
+ {
+ }
+
+ protected override void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ if (encoder != null)
+ position = encoder.Code(null, true);
+ }
+ base.Dispose(disposing);
+ }
+
+ public override long Length
+ {
+ get { return position + availableBytes; }
+ }
+
+ public override long Position
+ {
+ get
+ {
+ return position;
+ }
+ set
+ {
+ throw new NotImplementedException();
+ }
+ }
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ if (endReached)
+ return 0;
+
+ int total = 0;
+ while (total < count)
+ {
+ if (availableBytes == 0)
+ {
+ if (isLZMA2)
+ decodeChunkHeader();
+ else
+ endReached = true;
+ if (endReached)
+ break;
+ }
+
+ int toProcess = count - total;
+ if (toProcess > availableBytes)
+ toProcess = (int)availableBytes;
+
+ outWindow.SetLimit(toProcess);
+ if(uncompressedChunk)
+ {
+ inputPosition += outWindow.CopyStream(inputStream, toProcess);
+ }
+ else if(decoder.Code(dictionarySize, outWindow, rangeDecoder)
+ && outputSize < 0)
+ {
+ availableBytes = outWindow.AvailableBytes;
+ }
+
+ int read = outWindow.Read(buffer, offset, toProcess);
+ total += read;
+ offset += read;
+ position += read;
+ availableBytes -= read;
+
+ if (availableBytes == 0 && !uncompressedChunk)
+ {
+ rangeDecoder.ReleaseStream();
+ if (!rangeDecoder.IsFinished || (rangeDecoderLimit >= 0 && rangeDecoder.Total != rangeDecoderLimit))
+ throw new DataErrorException();
+ inputPosition += rangeDecoder.Total;
+ if (outWindow.HasPending)
+ throw new DataErrorException();
+ }
+ }
+
+ if (endReached)
+ {
+ if (inputSize >= 0 && inputPosition != inputSize)
+ throw new DataErrorException();
+ if (outputSize >= 0 && position != outputSize)
+ throw new DataErrorException();
+ }
+
+ return total;
+ }
+
+ private void decodeChunkHeader()
+ {
+ int control = inputStream.ReadByte();
+ inputPosition++;
+
+ if (control == 0x00)
+ {
+ endReached = true;
+ return;
+ }
+
+ if (control >= 0xE0 || control == 0x01)
+ {
+ needProps = true;
+ needDictReset = false;
+ outWindow.Reset();
+ }
+ else if (needDictReset)
+ throw new DataErrorException();
+
+ if (control >= 0x80)
+ {
+ uncompressedChunk = false;
+
+ availableBytes = (control & 0x1F) << 16;
+ availableBytes += (inputStream.ReadByte() << 8) + inputStream.ReadByte() + 1;
+ inputPosition += 2;
+
+ rangeDecoderLimit = (inputStream.ReadByte() << 8) + inputStream.ReadByte() + 1;
+ inputPosition += 2;
+
+ if (control >= 0xC0)
+ {
+ needProps = false;
+ props[0] = (byte)inputStream.ReadByte();
+ inputPosition++;
+
+ decoder = new Decoder();
+ decoder.SetDecoderProperties(props);
+ }
+ else if (needProps)
+ throw new DataErrorException();
+ else if (control >= 0xA0)
+ {
+ decoder = new Decoder();
+ decoder.SetDecoderProperties(props);
+ }
+
+ rangeDecoder.Init(inputStream);
+ }
+ else if (control > 0x02)
+ throw new DataErrorException();
+ else
+ {
+ uncompressedChunk = true;
+ availableBytes = (inputStream.ReadByte() << 8) + inputStream.ReadByte() + 1;
+ inputPosition += 2;
+ }
+ }
+
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ if (origin!=SeekOrigin.Current)
+ throw new NotImplementedException();
+
+ byte[] tmpBuff=new byte[1024];
+ long sizeToGo = offset;
+ while (sizeToGo > 0)
+ {
+ int sizenow = sizeToGo > 1024 ? 1024 : (int)sizeToGo;
+ Read(tmpBuff, 0, sizenow);
+ sizeToGo -= sizenow;
+ }
+
+ return offset;
+ }
+
+ public override void SetLength(long value)
+ {
+ throw new NotImplementedException();
+ }
+
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ if (encoder != null)
+ position = encoder.Code(new MemoryStream(buffer, offset, count), false);
+ }
+
+ public byte[] Properties
+ {
+ get
+ {
+ return props;
+ }
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/FreqData.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/FreqData.cs
new file mode 100644
index 00000000..5dfcd62e
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/FreqData.cs
@@ -0,0 +1,75 @@
+using System.Text;
+
+namespace Compress.SevenZip.Compress.PPmd.H
+{
+ internal class FreqData : Pointer
+ {
+ internal const int Size = 6;
+
+ // struct FreqData
+ // {
+ // ushort SummFreq;
+ // STATE _PACK_ATTR * Stats;
+ // };
+
+ internal FreqData(byte[] Memory)
+ : base(Memory)
+ {
+ }
+
+ internal int SummFreq
+ {
+ get
+ {
+ return Utility.readShortLittleEndian(Memory, Address) & 0xffff;
+ }
+
+ set
+ {
+ Utility.WriteLittleEndian(Memory, Address, (short)value);
+ }
+
+ }
+
+ internal FreqData Initialize(byte[] mem)
+ {
+ return base.Initialize(mem);
+ }
+
+ internal void IncrementSummFreq(int dSummFreq)
+ {
+ Utility.incShortLittleEndian(Memory, Address, (short)dSummFreq);
+ }
+
+ internal int GetStats()
+ {
+ return Utility.readIntLittleEndian(Memory, Address + 2);
+ }
+
+ internal virtual void SetStats(State state)
+ {
+ SetStats(state.Address);
+ }
+
+ internal void SetStats(int state)
+ {
+ Utility.WriteLittleEndian(Memory, Address + 2, state);
+ }
+
+ public override System.String ToString()
+ {
+ StringBuilder buffer = new StringBuilder();
+ buffer.Append("FreqData[");
+ buffer.Append("\n Address=");
+ buffer.Append(Address);
+ buffer.Append("\n size=");
+ buffer.Append(Size);
+ buffer.Append("\n summFreq=");
+ buffer.Append(SummFreq);
+ buffer.Append("\n stats=");
+ buffer.Append(GetStats());
+ buffer.Append("\n]");
+ return buffer.ToString();
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/ModelPPM.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/ModelPPM.cs
new file mode 100644
index 00000000..081a70d5
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/ModelPPM.cs
@@ -0,0 +1,945 @@
+using System.IO;
+using System.Text;
+using Decoder = Compress.SevenZip.Compress.RangeCoder.Decoder;
+
+namespace Compress.SevenZip.Compress.PPmd.H
+{
+ internal class ModelPPM
+ {
+ private void InitBlock()
+ {
+ for (int i = 0; i < 25; i++)
+ {
+ SEE2Cont[i] = new SEE2Context[16];
+ }
+ for (int i2 = 0; i2 < 128; i2++)
+ {
+ binSumm[i2] = new int[64];
+ }
+ }
+ public SubAllocator SubAlloc
+ {
+ get
+ {
+ return subAlloc;
+ }
+
+ }
+ virtual public SEE2Context DummySEE2Cont
+ {
+ get
+ {
+ return dummySEE2Cont;
+ }
+
+ }
+ virtual public int InitRL
+ {
+ get
+ {
+ return initRL;
+ }
+
+ }
+ virtual public int EscCount
+ {
+ get
+ {
+ return escCount;
+ }
+
+ set
+ {
+ this.escCount = value & 0xff;
+ }
+
+ }
+ virtual public int[] CharMask
+ {
+ get
+ {
+ return charMask;
+ }
+
+ }
+ virtual public int NumMasked
+ {
+ get
+ {
+ return numMasked;
+ }
+
+ set
+ {
+ this.numMasked = value;
+ }
+
+ }
+ virtual public int PrevSuccess
+ {
+ get
+ {
+ return prevSuccess;
+ }
+
+ set
+ {
+ this.prevSuccess = value & 0xff;
+ }
+
+ }
+ virtual public int InitEsc
+ {
+ get
+ {
+ return initEsc;
+ }
+
+ set
+ {
+ this.initEsc = value;
+ }
+
+ }
+ virtual public int RunLength
+ {
+ get
+ {
+ return runLength;
+ }
+
+ set
+ {
+ this.runLength = value;
+ }
+
+ }
+ virtual public int HiBitsFlag
+ {
+ get
+ {
+ return hiBitsFlag;
+ }
+
+ set
+ {
+ this.hiBitsFlag = value & 0xff;
+ }
+
+ }
+ virtual public int[][] BinSumm
+ {
+ get
+ {
+ return binSumm;
+ }
+
+ }
+ internal RangeCoder Coder
+ {
+ get
+ {
+ return coder;
+ }
+
+ }
+ internal State FoundState
+ {
+ get
+ {
+ return foundState;
+ }
+
+ }
+ virtual public byte[] Heap
+ {
+ get
+ {
+ return subAlloc.Heap;
+ }
+
+ }
+ virtual public int OrderFall
+ {
+ get
+ {
+ return orderFall;
+ }
+
+ }
+ public const int MAX_O = 64; /* maximum allowed model order */
+
+ public const int INT_BITS = 7;
+
+ public const int PERIOD_BITS = 7;
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'TOT_BITS '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ public static readonly int TOT_BITS = INT_BITS + PERIOD_BITS;
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'INTERVAL '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ public static readonly int INTERVAL = 1 << INT_BITS;
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'BIN_SCALE '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ public static readonly int BIN_SCALE = 1 << TOT_BITS;
+
+ public const int MAX_FREQ = 124;
+
+ private SEE2Context[][] SEE2Cont = new SEE2Context[25][];
+
+ private SEE2Context dummySEE2Cont;
+
+ private PPMContext minContext; //medContext
+
+ private PPMContext maxContext;
+
+ private State foundState; // found next state transition
+
+ private int numMasked, initEsc, orderFall, maxOrder, runLength, initRL;
+
+ private int[] charMask = new int[256];
+
+ private int[] NS2Indx = new int[256];
+
+ private int[] NS2BSIndx = new int[256];
+
+ private int[] HB2Flag = new int[256];
+
+ // byte EscCount, PrevSuccess, HiBitsFlag;
+ private int escCount, prevSuccess, hiBitsFlag;
+
+ private int[][] binSumm = new int[128][]; // binary SEE-contexts
+
+ private RangeCoder coder;
+
+ private SubAllocator subAlloc = new SubAllocator();
+
+ private static int[] InitBinEsc = new int[] { 0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051 };
+
+ // Temp fields
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempState1 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private State tempState1 = new State(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempState2 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private State tempState2 = new State(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempState3 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private State tempState3 = new State(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempState4 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private State tempState4 = new State(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempStateRef1 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private StateRef tempStateRef1 = new StateRef();
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempStateRef2 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private StateRef tempStateRef2 = new StateRef();
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempPPMContext1 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private PPMContext tempPPMContext1 = new PPMContext(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempPPMContext2 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private PPMContext tempPPMContext2 = new PPMContext(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempPPMContext3 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private PPMContext tempPPMContext3 = new PPMContext(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempPPMContext4 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private PPMContext tempPPMContext4 = new PPMContext(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'ps '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private int[] ps = new int[MAX_O];
+
+ public ModelPPM()
+ {
+ InitBlock();
+ minContext = null;
+ maxContext = null;
+ //medContext = null;
+ }
+
+ private void restartModelRare()
+ {
+ Utility.Fill(charMask, 0);
+ subAlloc.initSubAllocator();
+ initRL = -(maxOrder < 12 ? maxOrder : 12) - 1;
+ int addr = subAlloc.allocContext();
+ minContext.Address = addr;
+ maxContext.Address = addr;
+ minContext.setSuffix(0);
+ orderFall = maxOrder;
+ minContext.NumStats = 256;
+ minContext.FreqData.SummFreq = minContext.NumStats + 1;
+
+ addr = subAlloc.allocUnits(256 / 2);
+ foundState.Address = addr;
+ minContext.FreqData.SetStats(addr);
+
+ State state = new State(subAlloc.Heap);
+ addr = minContext.FreqData.GetStats();
+ runLength = initRL;
+ prevSuccess = 0;
+ for (int i = 0; i < 256; i++)
+ {
+ state.Address = addr + i * State.Size;
+ state.Symbol = i;
+ state.Freq = 1;
+ state.SetSuccessor(0);
+ }
+
+ for (int i = 0; i < 128; i++)
+ {
+ for (int k = 0; k < 8; k++)
+ {
+ for (int m = 0; m < 64; m += 8)
+ {
+ binSumm[i][k + m] = BIN_SCALE - InitBinEsc[k] / (i + 2);
+ }
+ }
+ }
+ for (int i = 0; i < 25; i++)
+ {
+ for (int k = 0; k < 16; k++)
+ {
+ SEE2Cont[i][k].Initialize(5 * i + 10);
+ }
+ }
+ }
+
+ private void startModelRare(int MaxOrder)
+ {
+ int i, k, m, Step;
+ escCount = 1;
+ this.maxOrder = MaxOrder;
+ restartModelRare();
+ // Bug Fixed
+ NS2BSIndx[0] = 0;
+ NS2BSIndx[1] = 2;
+ for (int j = 0; j < 9; j++)
+ {
+ NS2BSIndx[2 + j] = 4;
+ }
+ for (int j = 0; j < 256 - 11; j++)
+ {
+ NS2BSIndx[11 + j] = 6;
+ }
+ for (i = 0; i < 3; i++)
+ {
+ NS2Indx[i] = i;
+ }
+ for (m = i, k = 1, Step = 1; i < 256; i++)
+ {
+ NS2Indx[i] = m;
+ if ((--k) == 0)
+ {
+ k = ++Step;
+ m++;
+ }
+ }
+ for (int j = 0; j < 0x40; j++)
+ {
+ HB2Flag[j] = 0;
+ }
+ for (int j = 0; j < 0x100 - 0x40; j++)
+ {
+ HB2Flag[0x40 + j] = 0x08;
+ }
+ dummySEE2Cont.Shift = PERIOD_BITS;
+ }
+
+ private void clearMask()
+ {
+ escCount = 1;
+ Utility.Fill(charMask, 0);
+ }
+
+
+ public virtual int decodeChar()
+ {
+ // Debug
+ //subAlloc.dumpHeap();
+
+ if (minContext.Address <= subAlloc.PText || minContext.Address > subAlloc.HeapEnd)
+ {
+ return (-1);
+ }
+
+ if (minContext.NumStats != 1)
+ {
+ if (minContext.FreqData.GetStats() <= subAlloc.PText || minContext.FreqData.GetStats() > subAlloc.HeapEnd)
+ {
+ return (-1);
+ }
+ if (!minContext.decodeSymbol1(this))
+ {
+ return (-1);
+ }
+ }
+ else
+ {
+ minContext.decodeBinSymbol(this);
+ }
+ coder.Decode();
+ while (foundState.Address == 0)
+ {
+ coder.AriDecNormalize();
+ do
+ {
+ orderFall++;
+ minContext.Address = minContext.getSuffix(); // =MinContext->Suffix;
+ if (minContext.Address <= subAlloc.PText || minContext.Address > subAlloc.HeapEnd)
+ {
+ return (-1);
+ }
+ }
+ while (minContext.NumStats == numMasked);
+ if (!minContext.decodeSymbol2(this))
+ {
+ return (-1);
+ }
+ coder.Decode();
+ }
+ int Symbol = foundState.Symbol;
+ if ((orderFall == 0) && foundState.GetSuccessor() > subAlloc.PText)
+ {
+ // MinContext=MaxContext=FoundState->Successor;
+ int addr = foundState.GetSuccessor();
+ minContext.Address = addr;
+ maxContext.Address = addr;
+ }
+ else
+ {
+ updateModel();
+ //this.foundState.Address=foundState.Address);//TODO just 4 debugging
+ if (escCount == 0)
+ {
+ clearMask();
+ }
+ }
+ coder.AriDecNormalize(); // ARI_DEC_NORMALIZE(Coder.code,Coder.low,Coder.range,Coder.UnpackRead);
+ return (Symbol);
+ }
+
+ public virtual SEE2Context[][] getSEE2Cont()
+ {
+ return SEE2Cont;
+ }
+
+ public virtual void incEscCount(int dEscCount)
+ {
+ EscCount = EscCount + dEscCount;
+ }
+
+ public virtual void incRunLength(int dRunLength)
+ {
+ RunLength = RunLength + dRunLength;
+ }
+
+ public virtual int[] getHB2Flag()
+ {
+ return HB2Flag;
+ }
+
+ public virtual int[] getNS2BSIndx()
+ {
+ return NS2BSIndx;
+ }
+
+ public virtual int[] getNS2Indx()
+ {
+ return NS2Indx;
+ }
+
+ private int createSuccessors(bool Skip, State p1)
+ {
+ //State upState = tempState1.Initialize(null);
+ StateRef upState = tempStateRef2;
+ State tempState = tempState1.Initialize(Heap);
+
+ // PPM_CONTEXT* pc=MinContext, * UpBranch=FoundState->Successor;
+ PPMContext pc = tempPPMContext1.Initialize(Heap);
+ pc.Address = minContext.Address;
+ PPMContext upBranch = tempPPMContext2.Initialize(Heap);
+ upBranch.Address = foundState.GetSuccessor();
+
+ // STATE * p, * ps[MAX_O], ** pps=ps;
+ State p = tempState2.Initialize(Heap);
+ int pps = 0;
+
+ bool noLoop = false;
+
+ if (!Skip)
+ {
+ ps[pps++] = foundState.Address; // *pps++ = FoundState;
+ if (pc.getSuffix() == 0)
+ {
+ noLoop = true;
+ }
+ }
+ if (!noLoop)
+ {
+ bool loopEntry = false;
+ if (p1.Address != 0)
+ {
+ p.Address = p1.Address;
+ pc.Address = pc.getSuffix(); // =pc->Suffix;
+ loopEntry = true;
+ }
+ do
+ {
+ if (!loopEntry)
+ {
+ pc.Address = pc.getSuffix(); // pc=pc->Suffix;
+ if (pc.NumStats != 1)
+ {
+ p.Address = pc.FreqData.GetStats(); // p=pc->U.Stats
+ if (p.Symbol != foundState.Symbol)
+ {
+ do
+ {
+ p.IncrementAddress();
+ }
+ while (p.Symbol != foundState.Symbol);
+ }
+ }
+ else
+ {
+ p.Address = pc.getOneState().Address; // p=&(pc->OneState);
+ }
+ } // LOOP_ENTRY:
+ loopEntry = false;
+ if (p.GetSuccessor() != upBranch.Address)
+ {
+ pc.Address = p.GetSuccessor(); // =p->Successor;
+ break;
+ }
+ ps[pps++] = p.Address;
+ }
+ while (pc.getSuffix() != 0);
+ } // NO_LOOP:
+ if (pps == 0)
+ {
+ return pc.Address;
+ }
+ upState.Symbol = Heap[upBranch.Address]; // UpState.Symbol=*(byte*)
+ // UpBranch;
+ // UpState.Successor=(PPM_CONTEXT*) (((byte*) UpBranch)+1);
+ upState.SetSuccessor(upBranch.Address + 1); //TODO check if +1 necessary
+ if (pc.NumStats != 1)
+ {
+ if (pc.Address <= subAlloc.PText)
+ {
+ return (0);
+ }
+ p.Address = pc.FreqData.GetStats();
+ if (p.Symbol != upState.Symbol)
+ {
+ do
+ {
+ p.IncrementAddress();
+ }
+ while (p.Symbol != upState.Symbol);
+ }
+ int cf = p.Freq - 1;
+ int s0 = pc.FreqData.SummFreq - pc.NumStats - cf;
+ // UpState.Freq=1+((2*cf <= s0)?(5*cf > s0):((2*cf+3*s0-1)/(2*s0)));
+ upState.Freq = 1 + ((2 * cf <= s0) ? (5 * cf > s0 ? 1 : 0) : ((2 * cf + 3 * s0 - 1) / (2 * s0)));
+ }
+ else
+ {
+ upState.Freq = pc.getOneState().Freq; // UpState.Freq=pc->OneState.Freq;
+ }
+ do
+ {
+ // pc = pc->createChild(this,*--pps,UpState);
+ tempState.Address = ps[--pps];
+ pc.Address = pc.createChild(this, tempState, upState);
+ if (pc.Address == 0)
+ {
+ return 0;
+ }
+ }
+ while (pps != 0);
+ return pc.Address;
+ }
+
+ private void updateModelRestart()
+ {
+ restartModelRare();
+ escCount = 0;
+ }
+
+ private void updateModel()
+ {
+ //System.out.println("ModelPPM.updateModel()");
+ // STATE fs = *FoundState, *p = NULL;
+ StateRef fs = tempStateRef1;
+ fs.Values = foundState;
+ State p = tempState3.Initialize(Heap);
+ State tempState = tempState4.Initialize(Heap);
+
+ PPMContext pc = tempPPMContext3.Initialize(Heap);
+ PPMContext successor = tempPPMContext4.Initialize(Heap);
+
+ int ns1, ns, cf, sf, s0;
+ pc.Address = minContext.getSuffix();
+ if (fs.Freq < MAX_FREQ / 4 && pc.Address != 0)
+ {
+ if (pc.NumStats != 1)
+ {
+ p.Address = pc.FreqData.GetStats();
+ if (p.Symbol != fs.Symbol)
+ {
+ do
+ {
+ p.IncrementAddress();
+ }
+ while (p.Symbol != fs.Symbol);
+ tempState.Address = p.Address - State.Size;
+ if (p.Freq >= tempState.Freq)
+ {
+ State.PPMDSwap(p, tempState);
+ p.DecrementAddress();
+ }
+ }
+ if (p.Freq < MAX_FREQ - 9)
+ {
+ p.IncrementFreq(2);
+ pc.FreqData.IncrementSummFreq(2);
+ }
+ }
+ else
+ {
+ p.Address = pc.getOneState().Address;
+ if (p.Freq < 32)
+ {
+ p.IncrementFreq(1);
+ }
+ }
+ }
+ if (orderFall == 0)
+ {
+ foundState.SetSuccessor(createSuccessors(true, p));
+ minContext.Address = foundState.GetSuccessor();
+ maxContext.Address = foundState.GetSuccessor();
+ if (minContext.Address == 0)
+ {
+ updateModelRestart();
+ return;
+ }
+ return;
+ }
+ subAlloc.Heap[subAlloc.PText] = (byte)fs.Symbol;
+ subAlloc.incPText();
+ successor.Address = subAlloc.PText;
+ if (subAlloc.PText >= subAlloc.FakeUnitsStart)
+ {
+ updateModelRestart();
+ return;
+ }
+ // // Debug
+ // subAlloc.dumpHeap();
+ if (fs.GetSuccessor() != 0)
+ {
+ if (fs.GetSuccessor() <= subAlloc.PText)
+ {
+ fs.SetSuccessor(createSuccessors(false, p));
+ if (fs.GetSuccessor() == 0)
+ {
+ updateModelRestart();
+ return;
+ }
+ }
+ if (--orderFall == 0)
+ {
+ successor.Address = fs.GetSuccessor();
+ if (maxContext.Address != minContext.Address)
+ {
+ subAlloc.decPText(1);
+ }
+ }
+ }
+ else
+ {
+ foundState.SetSuccessor(successor.Address);
+ fs.SetSuccessor(minContext);
+ }
+ // // Debug
+ // subAlloc.dumpHeap();
+ ns = minContext.NumStats;
+ s0 = minContext.FreqData.SummFreq - (ns) - (fs.Freq - 1);
+ for (pc.Address = maxContext.Address; pc.Address != minContext.Address; pc.Address = pc.getSuffix())
+ {
+ if ((ns1 = pc.NumStats) != 1)
+ {
+ if ((ns1 & 1) == 0)
+ {
+ //System.out.println(ns1);
+ pc.FreqData.SetStats(subAlloc.expandUnits(pc.FreqData.GetStats(), Utility.URShift(ns1, 1)));
+ if (pc.FreqData.GetStats() == 0)
+ {
+ updateModelRestart();
+ return;
+ }
+ }
+ // bug fixed
+ // int sum = ((2 * ns1 < ns) ? 1 : 0) +
+ // 2 * ((4 * ((ns1 <= ns) ? 1 : 0)) & ((pc.getFreqData()
+ // .getSummFreq() <= 8 * ns1) ? 1 : 0));
+ int sum = ((2 * ns1 < ns) ? 1 : 0) + 2 * (((4 * ns1 <= ns) ? 1 : 0) & ((pc.FreqData.SummFreq <= 8 * ns1) ? 1 : 0));
+ pc.FreqData.IncrementSummFreq(sum);
+ }
+ else
+ {
+ p.Address = subAlloc.allocUnits(1);
+ if (p.Address == 0)
+ {
+ updateModelRestart();
+ return;
+ }
+ p.SetValues(pc.getOneState());
+ pc.FreqData.SetStats(p);
+ if (p.Freq < MAX_FREQ / 4 - 1)
+ {
+ p.IncrementFreq(p.Freq);
+ }
+ else
+ {
+ p.Freq = MAX_FREQ - 4;
+ }
+ pc.FreqData.SummFreq = (p.Freq + initEsc + (ns > 3 ? 1 : 0));
+ }
+ cf = 2 * fs.Freq * (pc.FreqData.SummFreq + 6);
+ sf = s0 + pc.FreqData.SummFreq;
+ if (cf < 6 * sf)
+ {
+ cf = 1 + (cf > sf ? 1 : 0) + (cf >= 4 * sf ? 1 : 0);
+ pc.FreqData.IncrementSummFreq(3);
+ }
+ else
+ {
+ cf = 4 + (cf >= 9 * sf ? 1 : 0) + (cf >= 12 * sf ? 1 : 0) + (cf >= 15 * sf ? 1 : 0);
+ pc.FreqData.IncrementSummFreq(cf);
+ }
+ p.Address = pc.FreqData.GetStats() + ns1 * State.Size;
+ p.SetSuccessor(successor);
+ p.Symbol = fs.Symbol;
+ p.Freq = cf;
+ pc.NumStats = ++ns1;
+ }
+
+ int address = fs.GetSuccessor();
+ maxContext.Address = address;
+ minContext.Address = address;
+ //TODO-----debug
+ // int pos = minContext.getFreqData().getStats();
+ // State a = new State(getHeap());
+ // a.Address=pos);
+ // pos+=State.size;
+ // a.Address=pos);
+ //--dbg end
+ return;
+ }
+
+ // Debug
+ public override System.String ToString()
+ {
+ StringBuilder buffer = new StringBuilder();
+ buffer.Append("ModelPPM[");
+ buffer.Append("\n numMasked=");
+ buffer.Append(numMasked);
+ buffer.Append("\n initEsc=");
+ buffer.Append(initEsc);
+ buffer.Append("\n orderFall=");
+ buffer.Append(orderFall);
+ buffer.Append("\n maxOrder=");
+ buffer.Append(maxOrder);
+ buffer.Append("\n runLength=");
+ buffer.Append(runLength);
+ buffer.Append("\n initRL=");
+ buffer.Append(initRL);
+ buffer.Append("\n escCount=");
+ buffer.Append(escCount);
+ buffer.Append("\n prevSuccess=");
+ buffer.Append(prevSuccess);
+ buffer.Append("\n foundState=");
+ buffer.Append(foundState);
+ buffer.Append("\n coder=");
+ buffer.Append(coder);
+ buffer.Append("\n subAlloc=");
+ buffer.Append(subAlloc);
+ buffer.Append("\n]");
+ return buffer.ToString();
+ }
+
+ // Debug
+ // public void dumpHeap() {
+ // subAlloc.dumpHeap();
+ // }
+
+ internal bool decodeInit(Stream stream, int maxOrder, int maxMemory)
+ {
+ if (stream != null)
+ coder = new RangeCoder(stream);
+
+ if (maxOrder == 1)
+ {
+ subAlloc.stopSubAllocator();
+ return (false);
+ }
+ subAlloc.startSubAllocator(maxMemory);
+ minContext = new PPMContext(Heap);
+ //medContext = new PPMContext(Heap);
+ maxContext = new PPMContext(Heap);
+ foundState = new State(Heap);
+ dummySEE2Cont = new SEE2Context();
+ for (int i = 0; i < 25; i++)
+ {
+ for (int j = 0; j < 16; j++)
+ {
+ SEE2Cont[i][j] = new SEE2Context();
+ }
+ }
+ startModelRare(maxOrder);
+
+ return (minContext.Address != 0);
+ }
+
+ internal void nextContext()
+ {
+ int addr = foundState.GetSuccessor();
+ if (orderFall == 0 && addr > subAlloc.PText)
+ {
+ minContext.Address = addr;
+ maxContext.Address = addr;
+ }
+ else
+ updateModel();
+ }
+
+ public int decodeChar(Decoder decoder)
+ {
+ if (minContext.NumStats != 1)
+ {
+ State s = tempState1.Initialize(Heap);
+ s.Address = minContext.FreqData.GetStats();
+ int i;
+ int count, hiCnt;
+ if ((count = (int)decoder.GetThreshold((uint)minContext.FreqData.SummFreq)) < (hiCnt = s.Freq))
+ {
+ byte symbol;
+ decoder.Decode(0, (uint)s.Freq);
+ symbol = (byte)s.Symbol;
+ minContext.update1_0(this, s.Address);
+ nextContext();
+ return symbol;
+ }
+ prevSuccess = 0;
+ i = minContext.NumStats - 1;
+ do
+ {
+ s.IncrementAddress();
+ if ((hiCnt += s.Freq) > count)
+ {
+ byte symbol;
+ decoder.Decode((uint)(hiCnt - s.Freq), (uint)s.Freq);
+ symbol = (byte)s.Symbol;
+ minContext.update1(this, s.Address);
+ nextContext();
+ return symbol;
+ }
+ }
+ while (--i > 0);
+ if (count >= minContext.FreqData.SummFreq)
+ return -2;
+ hiBitsFlag = HB2Flag[foundState.Symbol];
+ decoder.Decode((uint)hiCnt, (uint)(minContext.FreqData.SummFreq - hiCnt));
+ for (i = 0; i < 256; i++)
+ charMask[i] = -1;
+ charMask[s.Symbol] = 0;
+ i = minContext.NumStats - 1;
+ do
+ {
+ s.DecrementAddress();
+ charMask[s.Symbol] = 0;
+ }
+ while (--i > 0);
+ }
+ else
+ {
+ State rs = tempState1.Initialize(Heap);
+ rs.Address = minContext.getOneState().Address;
+ hiBitsFlag = getHB2Flag()[foundState.Symbol];
+ int off1 = rs.Freq - 1;
+ int off2 = minContext.getArrayIndex(this, rs);
+ int bs = binSumm[off1][off2];
+ if (decoder.DecodeBit((uint)bs, 14) == 0)
+ {
+ byte symbol;
+ binSumm[off1][off2] = (bs + INTERVAL - minContext.getMean(bs, PERIOD_BITS, 2)) & 0xFFFF;
+ foundState.Address = rs.Address;
+ symbol = (byte)rs.Symbol;
+ rs.IncrementFreq((rs.Freq < 128) ? 1 : 0);
+ prevSuccess = 1;
+ incRunLength(1);
+ nextContext();
+ return symbol;
+ }
+ bs = (bs - minContext.getMean(bs, PERIOD_BITS, 2)) & 0xFFFF;
+ binSumm[off1][off2] = bs;
+ initEsc = PPMContext.ExpEscape[Utility.URShift(bs, 10)];
+ int i;
+ for (i = 0; i < 256; i++)
+ charMask[i] = -1;
+ charMask[rs.Symbol] = 0;
+ prevSuccess = 0;
+ }
+ for (;;)
+ {
+ State s = tempState1.Initialize(Heap);
+ int i;
+ int freqSum, count, hiCnt;
+ SEE2Context see;
+ int num, numMasked = minContext.NumStats;
+ do
+ {
+ orderFall++;
+ minContext.Address = minContext.getSuffix();
+ if (minContext.Address <= subAlloc.PText || minContext.Address > subAlloc.HeapEnd)
+ return -1;
+ }
+ while (minContext.NumStats == numMasked);
+ hiCnt = 0;
+ s.Address = minContext.FreqData.GetStats();
+ i = 0;
+ num = minContext.NumStats - numMasked;
+ do
+ {
+ int k = charMask[s.Symbol];
+ hiCnt += s.Freq & k;
+ minContext.ps[i] = s.Address;
+ s.IncrementAddress();
+ i -= k;
+ }
+ while (i != num);
+
+ see = minContext.makeEscFreq(this, numMasked, out freqSum);
+ freqSum += hiCnt;
+ count = (int)decoder.GetThreshold((uint)freqSum);
+
+ if (count < hiCnt)
+ {
+ byte symbol;
+ State ps = tempState2.Initialize(Heap);
+ for (hiCnt = 0, i = 0, ps.Address = minContext.ps[i]; (hiCnt += ps.Freq) <= count; i++, ps.Address = minContext.ps[i]);
+ s.Address = ps.Address;
+ decoder.Decode((uint)(hiCnt - s.Freq), (uint)s.Freq);
+ see.update();
+ symbol = (byte)s.Symbol;
+ minContext.update2(this, s.Address);
+ updateModel();
+ return symbol;
+ }
+ if (count >= freqSum)
+ return -2;
+ decoder.Decode((uint)hiCnt, (uint)(freqSum - hiCnt));
+ see.Summ = see.Summ + freqSum;
+ do
+ {
+ s.Address = minContext.ps[--i];
+ charMask[s.Symbol] = 0;
+ }
+ while (i != 0);
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/PPMContext.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/PPMContext.cs
new file mode 100644
index 00000000..10cc6be4
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/PPMContext.cs
@@ -0,0 +1,563 @@
+using System.Text;
+
+namespace Compress.SevenZip.Compress.PPmd.H
+{
+ internal class PPMContext : Pointer
+ {
+ internal FreqData FreqData
+ {
+ get
+ {
+ return freqData;
+ }
+
+ set
+ {
+ this.freqData.SummFreq = value.SummFreq;
+ this.freqData.SetStats(value.GetStats());
+ }
+
+ }
+ virtual public int NumStats
+ {
+ get
+ {
+ if (Memory != null)
+ {
+ numStats = Utility.readShortLittleEndian(Memory, Address) & 0xffff;
+ }
+ return numStats;
+ }
+
+ set
+ {
+ this.numStats = value & 0xffff;
+ if (Memory != null)
+ {
+ Utility.WriteLittleEndian(Memory, Address, (short)value);
+ }
+ }
+
+ }
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'unionSize '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ //UPGRADE_NOTE: The initialization of 'unionSize' was moved to static method 'SharpCompress.Unpack.PPM.PPMContext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1005'"
+ private static readonly int unionSize;
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'size '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ public static readonly int size = 2 + unionSize + 4; // 12
+
+ // ushort NumStats;
+ private int numStats; // determines if feqData or onstate is used
+
+ // (1==onestate)
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'freqData '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private FreqData freqData; // -\
+
+ // |-> union
+ //UPGRADE_NOTE: Final was removed from the declaration of 'oneState '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private State oneState; // -/
+
+ private int suffix; // pointer ppmcontext
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'ExpEscape'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ public static readonly int[] ExpEscape = new int[] { 25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 };
+
+ // Temp fields
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempState1 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private State tempState1 = new State(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempState2 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private State tempState2 = new State(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempState3 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private State tempState3 = new State(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempState4 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private State tempState4 = new State(null);
+ //UPGRADE_NOTE: Final was removed from the declaration of 'tempState5 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private State tempState5 = new State(null);
+ private PPMContext tempPPMContext = null;
+ //UPGRADE_NOTE: Final was removed from the declaration of 'ps '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal int[] ps = new int[256];
+
+ public PPMContext(byte[] Memory)
+ : base(Memory)
+ {
+ oneState = new State(Memory);
+ freqData = new FreqData(Memory);
+ }
+
+ internal PPMContext Initialize(byte[] mem)
+ {
+ oneState.Initialize(mem);
+ freqData.Initialize(mem);
+ return base.Initialize(mem);
+ }
+
+ internal State getOneState()
+ {
+ return oneState;
+ }
+
+ internal void setOneState(StateRef oneState)
+ {
+ this.oneState.SetValues(oneState);
+ }
+
+ internal int getSuffix()
+ {
+ if (Memory != null)
+ {
+ suffix = Utility.readIntLittleEndian(Memory, Address + 8);
+ }
+ return suffix;
+ }
+
+ internal void setSuffix(PPMContext suffix)
+ {
+ setSuffix(suffix.Address);
+ }
+
+ internal void setSuffix(int suffix)
+ {
+ this.suffix = suffix;
+ if (Memory != null)
+ {
+ Utility.WriteLittleEndian(Memory, Address + 8, suffix);
+ }
+ }
+
+ internal override int Address
+ {
+ get
+ {
+ return base.Address;
+ }
+ set
+ {
+ base.Address = value;
+ oneState.Address = value + 2;
+ freqData.Address = value + 2;
+ }
+ }
+
+ private PPMContext getTempPPMContext(byte[] Memory)
+ {
+ if (tempPPMContext == null)
+ {
+ tempPPMContext = new PPMContext(null);
+ }
+ return tempPPMContext.Initialize(Memory);
+ }
+
+ internal int createChild(ModelPPM model, State pStats, StateRef firstState)
+ {
+ PPMContext pc = getTempPPMContext(model.SubAlloc.Heap);
+ pc.Address = model.SubAlloc.allocContext();
+ if (pc != null)
+ {
+ pc.NumStats = 1;
+ pc.setOneState(firstState);
+ pc.setSuffix(this);
+ pStats.SetSuccessor(pc);
+ }
+ return pc.Address;
+ }
+
+ internal void rescale(ModelPPM model)
+ {
+ int OldNS = NumStats, i = NumStats - 1, Adder, EscFreq;
+ // STATE* p1, * p;
+ State p1 = new State(model.Heap);
+ State p = new State(model.Heap);
+ State temp = new State(model.Heap);
+
+ for (p.Address = model.FoundState.Address; p.Address != freqData.GetStats(); p.DecrementAddress())
+ {
+ temp.Address = p.Address - State.Size;
+ State.PPMDSwap(p, temp);
+ }
+ temp.Address = freqData.GetStats();
+ temp.IncrementFreq(4);
+ freqData.IncrementSummFreq(4);
+ EscFreq = freqData.SummFreq - p.Freq;
+ Adder = (model.OrderFall != 0) ? 1 : 0;
+ p.Freq = Utility.URShift((p.Freq + Adder), 1);
+ freqData.SummFreq = p.Freq;
+ do
+ {
+ p.IncrementAddress();
+ EscFreq -= p.Freq;
+ p.Freq = Utility.URShift((p.Freq + Adder), 1);
+ freqData.IncrementSummFreq(p.Freq);
+ temp.Address = p.Address - State.Size;
+ if (p.Freq > temp.Freq)
+ {
+ p1.Address = p.Address;
+ StateRef tmp = new StateRef();
+ tmp.Values = p1;
+ State temp2 = new State(model.Heap);
+ State temp3 = new State(model.Heap);
+ do
+ {
+ // p1[0]=p1[-1];
+ temp2.Address = p1.Address - State.Size;
+ p1.SetValues(temp2);
+ p1.DecrementAddress();
+ temp3.Address = p1.Address - State.Size;
+ }
+ while (p1.Address != freqData.GetStats() && tmp.Freq > temp3.Freq);
+ p1.SetValues(tmp);
+ }
+ }
+ while (--i != 0);
+ if (p.Freq == 0)
+ {
+ do
+ {
+ i++;
+ p.DecrementAddress();
+ }
+ while (p.Freq == 0);
+ EscFreq += i;
+ NumStats = NumStats - i;
+ if (NumStats == 1)
+ {
+ StateRef tmp = new StateRef();
+ temp.Address = freqData.GetStats();
+ tmp.Values = temp;
+ // STATE tmp=*U.Stats;
+ do
+ {
+ // tmp.Freq-=(tmp.Freq >> 1)
+ tmp.DecrementFreq(Utility.URShift(tmp.Freq, 1));
+ EscFreq = Utility.URShift(EscFreq, 1);
+ }
+ while (EscFreq > 1);
+ model.SubAlloc.freeUnits(freqData.GetStats(), Utility.URShift((OldNS + 1), 1));
+ oneState.SetValues(tmp);
+ model.FoundState.Address = oneState.Address;
+ return;
+ }
+ }
+ EscFreq -= Utility.URShift(EscFreq, 1);
+ freqData.IncrementSummFreq(EscFreq);
+ int n0 = Utility.URShift((OldNS + 1), 1), n1 = Utility.URShift((NumStats + 1), 1);
+ if (n0 != n1)
+ {
+ freqData.SetStats(model.SubAlloc.shrinkUnits(freqData.GetStats(), n0, n1));
+ }
+ model.FoundState.Address = freqData.GetStats();
+ }
+
+ internal int getArrayIndex(ModelPPM Model, State rs)
+ {
+ PPMContext tempSuffix = getTempPPMContext(Model.SubAlloc.Heap);
+ tempSuffix.Address = getSuffix();
+ int ret = 0;
+ ret += Model.PrevSuccess;
+ ret += Model.getNS2BSIndx()[tempSuffix.NumStats - 1];
+ ret += Model.HiBitsFlag + 2 * Model.getHB2Flag()[rs.Symbol];
+ ret += ((Utility.URShift(Model.RunLength, 26)) & 0x20);
+ return ret;
+ }
+
+ internal int getMean(int summ, int shift, int round)
+ {
+ return (Utility.URShift((summ + (1 << (shift - round))), (shift)));
+ }
+
+ internal void decodeBinSymbol(ModelPPM model)
+ {
+ State rs = tempState1.Initialize(model.Heap);
+ rs.Address = oneState.Address; // State&
+ model.HiBitsFlag = model.getHB2Flag()[model.FoundState.Symbol];
+ int off1 = rs.Freq - 1;
+ int off2 = getArrayIndex(model, rs);
+ int bs = model.BinSumm[off1][off2];
+ if (model.Coder.GetCurrentShiftCount(ModelPPM.TOT_BITS) < bs)
+ {
+ model.FoundState.Address = rs.Address;
+ rs.IncrementFreq((rs.Freq < 128) ? 1 : 0);
+ model.Coder.SubRange.LowCount = 0;
+ model.Coder.SubRange.HighCount = bs;
+ bs = ((bs + ModelPPM.INTERVAL - getMean(bs, ModelPPM.PERIOD_BITS, 2)) & 0xffff);
+ model.BinSumm[off1][off2] = bs;
+ model.PrevSuccess = 1;
+ model.incRunLength(1);
+ }
+ else
+ {
+ model.Coder.SubRange.LowCount = bs;
+ bs = (bs - getMean(bs, ModelPPM.PERIOD_BITS, 2)) & 0xFFFF;
+ model.BinSumm[off1][off2] = bs;
+ model.Coder.SubRange.HighCount = ModelPPM.BIN_SCALE;
+ model.InitEsc = ExpEscape[Utility.URShift(bs, 10)];
+ model.NumMasked = 1;
+ model.CharMask[rs.Symbol] = model.EscCount;
+ model.PrevSuccess = 0;
+ model.FoundState.Address = 0;
+ }
+ //int a = 0;//TODO just 4 debugging
+ }
+
+ // public static void ppmdSwap(ModelPPM model, StatePtr state1, StatePtr state2)
+ // {
+ // byte[] bytes = model.getSubAlloc().getHeap();
+ // int p1 = state1.Address;
+ // int p2 = state2.Address;
+ //
+ // for (int i = 0; i < StatePtr.size; i++) {
+ // byte temp = bytes[p1+i];
+ // bytes[p1+i] = bytes[p2+i];
+ // bytes[p2+i] = temp;
+ // }
+ // state1.Address=p1);
+ // state2.Address=p2);
+ // }
+
+ internal void update1(ModelPPM model, int p)
+ {
+ model.FoundState.Address = p;
+ model.FoundState.IncrementFreq(4);
+ freqData.IncrementSummFreq(4);
+ State p0 = tempState3.Initialize(model.Heap);
+ State p1 = tempState4.Initialize(model.Heap);
+ p0.Address = p;
+ p1.Address = p - State.Size;
+ if (p0.Freq > p1.Freq)
+ {
+ State.PPMDSwap(p0, p1);
+ model.FoundState.Address = p1.Address;
+ if (p1.Freq > ModelPPM.MAX_FREQ)
+ rescale(model);
+ }
+ }
+
+ internal void update1_0(ModelPPM model, int p)
+ {
+ model.FoundState.Address = p;
+ model.PrevSuccess = 2 * model.FoundState.Freq > freqData.SummFreq ? 1 : 0;
+ model.incRunLength(model.PrevSuccess);
+ freqData.IncrementSummFreq(4);
+ model.FoundState.IncrementFreq(4);
+ if (model.FoundState.Freq > ModelPPM.MAX_FREQ)
+ rescale(model);
+ }
+
+ internal bool decodeSymbol2(ModelPPM model)
+ {
+ long count;
+ int hiCnt, i = NumStats - model.NumMasked;
+ SEE2Context psee2c = makeEscFreq2(model, i);
+ RangeCoder coder = model.Coder;
+ // STATE* ps[256], ** pps=ps, * p=U.Stats-1;
+ State p = tempState1.Initialize(model.Heap);
+ State temp = tempState2.Initialize(model.Heap);
+ p.Address = freqData.GetStats() - State.Size;
+ int pps = 0;
+ hiCnt = 0;
+
+ do
+ {
+ do
+ {
+ p.IncrementAddress(); // p++;
+ }
+ while (model.CharMask[p.Symbol] == model.EscCount);
+ hiCnt += p.Freq;
+ ps[pps++] = p.Address;
+ }
+ while (--i != 0);
+ coder.SubRange.incScale(hiCnt);
+ count = coder.CurrentCount;
+ if (count >= coder.SubRange.Scale)
+ {
+ return false;
+ }
+ pps = 0;
+ p.Address = ps[pps];
+ if (count < hiCnt)
+ {
+ hiCnt = 0;
+ while ((hiCnt += p.Freq) <= count)
+ {
+ p.Address = ps[++pps]; // p=*++pps;
+ }
+ coder.SubRange.HighCount = hiCnt;
+ coder.SubRange.LowCount = hiCnt - p.Freq;
+ psee2c.update();
+ update2(model, p.Address);
+ }
+ else
+ {
+ coder.SubRange.LowCount = hiCnt;
+ coder.SubRange.HighCount = coder.SubRange.Scale;
+ i = NumStats - model.NumMasked; // ->NumMasked;
+ pps--;
+ do
+ {
+ temp.Address = ps[++pps]; // (*++pps)
+ model.CharMask[temp.Symbol] = model.EscCount;
+ }
+ while (--i != 0);
+ psee2c.incSumm((int)coder.SubRange.Scale);
+ model.NumMasked = NumStats;
+ }
+ return (true);
+ }
+
+ internal void update2(ModelPPM model, int p)
+ {
+ State temp = tempState5.Initialize(model.Heap);
+ temp.Address = p;
+ model.FoundState.Address = p;
+ model.FoundState.IncrementFreq(4);
+ freqData.IncrementSummFreq(4);
+ if (temp.Freq > ModelPPM.MAX_FREQ)
+ {
+ rescale(model);
+ }
+ model.incEscCount(1);
+ model.RunLength = model.InitRL;
+ }
+
+ private SEE2Context makeEscFreq2(ModelPPM model, int Diff)
+ {
+ SEE2Context psee2c;
+ int numStats = NumStats;
+ if (numStats != 256)
+ {
+ PPMContext suff = getTempPPMContext(model.Heap);
+ suff.Address = getSuffix();
+ int idx1 = model.getNS2Indx()[Diff - 1];
+ int idx2 = 0;
+ idx2 += ((Diff < suff.NumStats - numStats) ? 1 : 0);
+ idx2 += 2 * ((freqData.SummFreq < 11 * numStats) ? 1 : 0);
+ idx2 += 4 * ((model.NumMasked > Diff) ? 1 : 0);
+ idx2 += model.HiBitsFlag;
+ psee2c = model.getSEE2Cont()[idx1][idx2];
+ model.Coder.SubRange.Scale = psee2c.Mean;
+ }
+ else
+ {
+ psee2c = model.DummySEE2Cont;
+ model.Coder.SubRange.Scale = 1;
+ }
+ return psee2c;
+ }
+
+ internal SEE2Context makeEscFreq(ModelPPM model, int numMasked, out int escFreq)
+ {
+ SEE2Context psee2c;
+ int numStats = NumStats;
+ int nonMasked = numStats - numMasked;
+ if (numStats != 256)
+ {
+ PPMContext suff = getTempPPMContext(model.Heap);
+ suff.Address = getSuffix();
+ int idx1 = model.getNS2Indx()[nonMasked - 1];
+ int idx2 = 0;
+ idx2 += ((nonMasked < suff.NumStats - numStats) ? 1 : 0);
+ idx2 += 2 * ((freqData.SummFreq < 11 * numStats) ? 1 : 0);
+ idx2 += 4 * ((numMasked > nonMasked) ? 1 : 0);
+ idx2 += model.HiBitsFlag;
+ psee2c = model.getSEE2Cont()[idx1][idx2];
+ escFreq = psee2c.Mean;
+ }
+ else
+ {
+ psee2c = model.DummySEE2Cont;
+ escFreq = 1;
+ }
+ return psee2c;
+ }
+
+ internal bool decodeSymbol1(ModelPPM model)
+ {
+
+ RangeCoder coder = model.Coder;
+ coder.SubRange.Scale = freqData.SummFreq;
+ State p = new State(model.Heap);
+ p.Address = freqData.GetStats();
+ int i, HiCnt;
+ long count = coder.CurrentCount;
+ if (count >= coder.SubRange.Scale)
+ {
+ return false;
+ }
+ if (count < (HiCnt = p.Freq))
+ {
+ coder.SubRange.HighCount = HiCnt;
+ model.PrevSuccess = (2 * HiCnt > coder.SubRange.Scale) ? 1 : 0;
+ model.incRunLength(model.PrevSuccess);
+ HiCnt += 4;
+ model.FoundState.Address = p.Address;
+ model.FoundState.Freq = HiCnt;
+ freqData.IncrementSummFreq(4);
+ if (HiCnt > ModelPPM.MAX_FREQ)
+ {
+ rescale(model);
+ }
+ coder.SubRange.LowCount = 0;
+ return true;
+ }
+ else
+ {
+ if (model.FoundState.Address == 0)
+ {
+ return (false);
+ }
+ }
+ model.PrevSuccess = 0;
+ int numStats = NumStats;
+ i = numStats - 1;
+ while ((HiCnt += p.IncrementAddress().Freq) <= count)
+ {
+ if (--i == 0)
+ {
+ model.HiBitsFlag = model.getHB2Flag()[model.FoundState.Symbol];
+ coder.SubRange.LowCount = HiCnt;
+ model.CharMask[p.Symbol] = model.EscCount;
+ model.NumMasked = numStats;
+ i = numStats - 1;
+ model.FoundState.Address = 0;
+ do
+ {
+ model.CharMask[p.DecrementAddress().Symbol] = model.EscCount;
+ }
+ while (--i != 0);
+ coder.SubRange.HighCount = coder.SubRange.Scale;
+ return (true);
+ }
+ }
+ coder.SubRange.LowCount = HiCnt - p.Freq;
+ coder.SubRange.HighCount = HiCnt;
+ update1(model, p.Address);
+ return (true);
+ }
+
+ public override System.String ToString()
+ {
+ StringBuilder buffer = new StringBuilder();
+ buffer.Append("PPMContext[");
+ buffer.Append("\n Address=");
+ buffer.Append(Address);
+ buffer.Append("\n size=");
+ buffer.Append(size);
+ buffer.Append("\n numStats=");
+ buffer.Append(NumStats);
+ buffer.Append("\n Suffix=");
+ buffer.Append(getSuffix());
+ buffer.Append("\n freqData=");
+ buffer.Append(freqData);
+ buffer.Append("\n oneState=");
+ buffer.Append(oneState);
+ buffer.Append("\n]");
+ return buffer.ToString();
+ }
+ static PPMContext()
+ {
+ unionSize = System.Math.Max(FreqData.Size, State.Size);
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/Pointer.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/Pointer.cs
new file mode 100644
index 00000000..edca08ca
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/Pointer.cs
@@ -0,0 +1,34 @@
+
+namespace Compress.SevenZip.Compress.PPmd.H
+{
+ internal abstract class Pointer
+ {
+ /// Initialize the object with the array (may be null)
+ /// the byte array
+ ///
+ internal Pointer(byte[] mem)
+ {
+ Memory = mem;
+ }
+
+ internal byte[] Memory
+ {
+ get;
+ private set;
+ }
+
+ internal virtual int Address
+ {
+ get;
+ set;
+ }
+
+ protected T Initialize(byte[] mem)
+ where T : Pointer
+ {
+ Memory = mem;
+ Address = 0;
+ return this as T;
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/RangeCoder.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/RangeCoder.cs
new file mode 100644
index 00000000..da085256
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/RangeCoder.cs
@@ -0,0 +1,183 @@
+using System.IO;
+using System.Text;
+
+namespace Compress.SevenZip.Compress.PPmd.H
+{
+ internal class RangeCoder
+ {
+ internal const int TOP = 1 << 24;
+ internal const int BOT = 1 << 15;
+ internal const long UintMask = 0xFFFFffffL;
+
+ // uint low, code, range;
+ private long low, code, range;
+ private Stream stream;
+
+ internal RangeCoder(Stream stream)
+ {
+ this.stream = stream;
+ Init();
+ }
+
+ private void Init()
+ {
+ this.SubRange = new SubRange();
+
+ low = code = 0L;
+ range = 0xFFFFffffL;
+ for (int i = 0; i < 4; i++)
+ {
+ code = ((code << 8) | Char) & UintMask;
+ }
+ }
+
+ internal int CurrentCount
+ {
+ get
+ {
+ range = (range / SubRange.Scale) & UintMask;
+ return (int)((code - low) / (range));
+ }
+
+ }
+
+ private long Char
+ {
+ get
+ {
+ if (stream != null)
+ return stream.ReadByte();
+ return -1;
+ }
+
+ }
+
+ internal SubRange SubRange
+ {
+ get;
+ private set;
+ }
+
+
+ internal long GetCurrentShiftCount(int SHIFT)
+ {
+ range = Utility.URShift(range, SHIFT);
+ return ((code - low) / (range)) & UintMask;
+ }
+
+ internal void Decode()
+ {
+ low = (low + (range * SubRange.LowCount)) & UintMask;
+ range = (range * (SubRange.HighCount - SubRange.LowCount)) & UintMask;
+ }
+
+ internal void AriDecNormalize()
+ {
+ // while ((low ^ (low + range)) < TOP || range < BOT && ((range = -low & (BOT - 1)) != 0 ? true : true))
+ // {
+ // code = ((code << 8) | unpackRead.getChar()&0xff)&uintMask;
+ // range = (range << 8)&uintMask;
+ // low = (low << 8)&uintMask;
+ // }
+
+ // Rewrote for clarity
+ bool c2 = false;
+ while ((low ^ (low + range)) < TOP || (c2 = range < BOT))
+ {
+ if (c2)
+ {
+ range = (-low & (BOT - 1)) & UintMask;
+ c2 = false;
+ }
+ code = ((code << 8) | Char) & UintMask;
+ range = (range << 8) & UintMask;
+ low = (low << 8) & UintMask;
+ }
+ }
+
+ // Debug
+ public override System.String ToString()
+ {
+ StringBuilder buffer = new StringBuilder();
+ buffer.Append("RangeCoder[");
+ buffer.Append("\n low=");
+ buffer.Append(low);
+ buffer.Append("\n code=");
+ buffer.Append(code);
+ buffer.Append("\n range=");
+ buffer.Append(range);
+ buffer.Append("\n subrange=");
+ buffer.Append(SubRange);
+ buffer.Append("]");
+ return buffer.ToString();
+ }
+ }
+
+ internal class SubRange
+ {
+ // uint LowCount, HighCount, scale;
+ private long lowCount, highCount, scale;
+
+ internal void incScale(int dScale)
+ {
+ Scale = Scale + dScale;
+ }
+
+ internal long HighCount
+ {
+ get
+ {
+ return highCount;
+ }
+
+ set
+ {
+ this.highCount = value & RangeCoder.UintMask;
+ }
+
+ }
+
+ internal long LowCount
+ {
+ get
+ {
+ return lowCount & RangeCoder.UintMask;
+ }
+
+ set
+ {
+ this.lowCount = value & RangeCoder.UintMask;
+ }
+
+ }
+
+ internal long Scale
+ {
+ get
+ {
+ return scale;
+ }
+
+ set
+ {
+ this.scale = value & RangeCoder.UintMask;
+ }
+
+ }
+
+ // Debug
+ public override System.String ToString()
+ {
+ StringBuilder buffer = new StringBuilder();
+ buffer.Append("SubRange[");
+ buffer.Append("\n lowCount=");
+ buffer.Append(lowCount);
+ buffer.Append("\n highCount=");
+ buffer.Append(highCount);
+ buffer.Append("\n scale=");
+ buffer.Append(scale);
+ buffer.Append("]");
+ return buffer.ToString();
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/RarMemBlock.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/RarMemBlock.cs
new file mode 100644
index 00000000..f583f56f
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/RarMemBlock.cs
@@ -0,0 +1,125 @@
+
+namespace Compress.SevenZip.Compress.PPmd.H
+{
+ internal class RarMemBlock : Pointer
+ {
+ public const int size = 12;
+
+ private int stamp, NU;
+
+ private int next, prev; // Pointer RarMemBlock
+
+ public RarMemBlock(byte[] Memory)
+ : base(Memory)
+ {
+ }
+
+ internal int Stamp
+ {
+ get
+ {
+ if (Memory != null)
+ {
+ stamp = Utility.readShortLittleEndian(Memory, Address) & 0xffff;
+ }
+ return stamp;
+ }
+
+ set
+ {
+ this.stamp = value;
+ if (Memory != null)
+ {
+ Utility.WriteLittleEndian(Memory, Address, (short)value);
+ }
+ }
+
+ }
+
+ internal void InsertAt(RarMemBlock p)
+ {
+ RarMemBlock temp = new RarMemBlock(Memory);
+ SetPrev(p.Address);
+ temp.Address = GetPrev();
+ SetNext(temp.GetNext()); // prev.getNext();
+ temp.SetNext(this); // prev.setNext(this);
+ temp.Address = GetNext();
+ temp.SetPrev(this); // next.setPrev(this);
+ }
+
+ internal void Remove()
+ {
+ RarMemBlock temp = new RarMemBlock(Memory);
+ temp.Address = GetPrev();
+ temp.SetNext(GetNext()); // prev.setNext(next);
+ temp.Address = GetNext();
+ temp.SetPrev(GetPrev()); // next.setPrev(prev);
+ // next = -1;
+ // prev = -1;
+ }
+
+ internal int GetNext()
+ {
+ if (Memory != null)
+ {
+ next = Utility.readIntLittleEndian(Memory, Address + 4);
+ }
+ return next;
+ }
+
+ internal void SetNext(RarMemBlock next)
+ {
+ SetNext(next.Address);
+ }
+
+ internal void SetNext(int next)
+ {
+ this.next = next;
+ if (Memory != null)
+ {
+ Utility.WriteLittleEndian(Memory, Address + 4, next);
+ }
+ }
+
+ internal int GetNU()
+ {
+ if (Memory != null)
+ {
+ NU = Utility.readShortLittleEndian(Memory, Address + 2) & 0xffff;
+ }
+ return NU;
+ }
+
+ internal void SetNU(int nu)
+ {
+ NU = nu & 0xffff;
+ if (Memory != null)
+ {
+ Utility.WriteLittleEndian(Memory, Address + 2, (short)nu);
+ }
+ }
+
+ internal int GetPrev()
+ {
+ if (Memory != null)
+ {
+ prev = Utility.readIntLittleEndian(Memory, Address + 8);
+ }
+ return prev;
+ }
+
+ internal void SetPrev(RarMemBlock prev)
+ {
+ SetPrev(prev.Address);
+ }
+
+ internal void SetPrev(int prev)
+ {
+ this.prev = prev;
+ if (Memory != null)
+ {
+ Utility.WriteLittleEndian(Memory, Address + 8, prev);
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/RarNode.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/RarNode.cs
new file mode 100644
index 00000000..168fe24e
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/RarNode.cs
@@ -0,0 +1,53 @@
+using System.Text;
+
+namespace Compress.SevenZip.Compress.PPmd.H
+{
+ internal class RarNode : Pointer
+ {
+ private int next; //rarnode pointer
+
+ public const int size = 4;
+
+ public RarNode(byte[] Memory)
+ : base(Memory)
+ {
+ }
+
+ internal int GetNext()
+ {
+ if (Memory != null)
+ {
+ next = Utility.readIntLittleEndian(Memory, Address);
+ }
+ return next;
+ }
+
+ internal void SetNext(RarNode next)
+ {
+ SetNext(next.Address);
+ }
+
+ internal void SetNext(int next)
+ {
+ this.next = next;
+ if (Memory != null)
+ {
+ Utility.WriteLittleEndian(Memory, Address, next);
+ }
+ }
+
+ public override string ToString()
+ {
+ StringBuilder buffer = new StringBuilder();
+ buffer.Append("State[");
+ buffer.Append("\n Address=");
+ buffer.Append(Address);
+ buffer.Append("\n size=");
+ buffer.Append(size);
+ buffer.Append("\n next=");
+ buffer.Append(GetNext());
+ buffer.Append("\n]");
+ return buffer.ToString();
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/SEE2Context.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/SEE2Context.cs
new file mode 100644
index 00000000..6d53f26a
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/SEE2Context.cs
@@ -0,0 +1,107 @@
+using System.Text;
+
+namespace Compress.SevenZip.Compress.PPmd.H
+{
+ internal class SEE2Context
+ {
+ virtual public int Mean
+ {
+ get
+ {
+ int retVal = Utility.URShift(summ, shift);
+ summ -= retVal;
+ return retVal + ((retVal == 0) ? 1 : 0);
+ }
+
+ }
+ virtual public int Count
+ {
+ get
+ {
+ return count;
+ }
+
+ set
+ {
+ this.count = value & 0xff;
+ }
+
+ }
+ virtual public int Shift
+ {
+ get
+ {
+ return shift;
+ }
+
+ set
+ {
+ this.shift = value & 0xff;
+ }
+
+ }
+ virtual public int Summ
+ {
+ get
+ {
+ return summ;
+ }
+
+ set
+ {
+ this.summ = value & 0xffff;
+ }
+
+ }
+ public const int size = 4;
+
+ // ushort Summ;
+ private int summ;
+
+ // byte Shift;
+ private int shift;
+
+ // byte Count;
+ private int count;
+
+ public void Initialize(int initVal)
+ {
+ shift = (ModelPPM.PERIOD_BITS - 4) & 0xff;
+ summ = (initVal << shift) & 0xffff;
+ count = 4;
+ }
+
+ public virtual void update()
+ {
+ if (shift < ModelPPM.PERIOD_BITS && --count == 0)
+ {
+ summ += summ;
+ count = (3 << shift++);
+ }
+ summ &= 0xffff;
+ count &= 0xff;
+ shift &= 0xff;
+ }
+
+ public virtual void incSumm(int dSumm)
+ {
+ Summ = Summ + dSumm;
+ }
+
+ public override System.String ToString()
+ {
+ StringBuilder buffer = new StringBuilder();
+ buffer.Append("SEE2Context[");
+ buffer.Append("\n size=");
+ buffer.Append(size);
+ buffer.Append("\n summ=");
+ buffer.Append(summ);
+ buffer.Append("\n shift=");
+ buffer.Append(shift);
+ buffer.Append("\n count=");
+ buffer.Append(count);
+ buffer.Append("\n]");
+ return buffer.ToString();
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/State.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/State.cs
new file mode 100644
index 00000000..6a9f005b
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/State.cs
@@ -0,0 +1,120 @@
+using System;
+using System.Text;
+
+namespace Compress.SevenZip.Compress.PPmd.H
+{
+ internal class State : Pointer
+ {
+ internal const int Size = 6;
+
+ internal State(byte[] Memory)
+ : base(Memory)
+ {
+ }
+
+ internal int Symbol
+ {
+ get
+ {
+ return Memory[Address] & 0xff;
+ }
+
+ set
+ {
+ Memory[Address] = (byte)value;
+ }
+
+ }
+ internal int Freq
+ {
+ get
+ {
+ return Memory[Address + 1] & 0xff;
+ }
+
+ set
+ {
+ Memory[Address + 1] = (byte)value;
+ }
+
+ }
+
+ internal State Initialize(byte[] mem)
+ {
+ return base.Initialize(mem);
+ }
+
+ internal void IncrementFreq(int dFreq)
+ {
+ Memory[Address + 1] = (byte)(Memory[Address + 1] + dFreq);
+ }
+
+ internal int GetSuccessor()
+ {
+ return Utility.readIntLittleEndian(Memory, Address + 2);
+ }
+
+ internal void SetSuccessor(PPMContext successor)
+ {
+ SetSuccessor(successor.Address);
+ }
+
+ internal void SetSuccessor(int successor)
+ {
+ Utility.WriteLittleEndian(Memory, Address + 2, successor);
+ }
+
+ internal void SetValues(StateRef state)
+ {
+ Symbol = state.Symbol;
+ Freq = state.Freq;
+ SetSuccessor(state.GetSuccessor());
+ }
+
+ internal void SetValues(State ptr)
+ {
+ Array.Copy(ptr.Memory, ptr.Address, Memory, Address, Size);
+ }
+
+ internal State DecrementAddress()
+ {
+ Address = Address - Size;
+ return this;
+ }
+
+ internal State IncrementAddress()
+ {
+ Address = Address + Size;
+ return this;
+ }
+
+ internal static void PPMDSwap(State ptr1, State ptr2)
+ {
+ byte[] mem1 = ptr1.Memory, mem2 = ptr2.Memory;
+ for (int i = 0, pos1 = ptr1.Address, pos2 = ptr2.Address; i < Size; i++, pos1++, pos2++)
+ {
+ byte temp = mem1[pos1];
+ mem1[pos1] = mem2[pos2];
+ mem2[pos2] = temp;
+ }
+ }
+
+ public override System.String ToString()
+ {
+ StringBuilder buffer = new StringBuilder();
+ buffer.Append("State[");
+ buffer.Append("\n Address=");
+ buffer.Append(Address);
+ buffer.Append("\n size=");
+ buffer.Append(Size);
+ buffer.Append("\n symbol=");
+ buffer.Append(Symbol);
+ buffer.Append("\n freq=");
+ buffer.Append(Freq);
+ buffer.Append("\n successor=");
+ buffer.Append(GetSuccessor());
+ buffer.Append("\n]");
+ return buffer.ToString();
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/StateRef.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/StateRef.cs
new file mode 100644
index 00000000..23a52883
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/StateRef.cs
@@ -0,0 +1,90 @@
+using System.Text;
+
+namespace Compress.SevenZip.Compress.PPmd.H
+{
+ internal class StateRef
+ {
+ private int symbol;
+
+ private int freq;
+
+ private int successor; // pointer ppmcontext
+
+ internal int Symbol
+ {
+ get
+ {
+ return symbol;
+ }
+
+ set
+ {
+ this.symbol = value & 0xff;
+ }
+
+ }
+ internal int Freq
+ {
+ get
+ {
+ return freq;
+ }
+
+ set
+ {
+ this.freq = value & 0xff;
+ }
+
+ }
+
+ internal State Values
+ {
+ set
+ {
+ Freq = value.Freq;
+ SetSuccessor(value.GetSuccessor());
+ Symbol = value.Symbol;
+ }
+ }
+
+
+ public virtual void IncrementFreq(int dFreq)
+ {
+ freq = (freq + dFreq) & 0xff;
+ }
+
+ public virtual void DecrementFreq(int dFreq)
+ {
+ freq = (freq - dFreq) & 0xff;
+ }
+
+ public virtual int GetSuccessor()
+ {
+ return successor;
+ }
+
+ public virtual void SetSuccessor(PPMContext successor)
+ {
+ SetSuccessor(successor.Address);
+ }
+
+ public virtual void SetSuccessor(int successor)
+ {
+ this.successor = successor;
+ }
+
+ public override System.String ToString()
+ {
+ StringBuilder buffer = new StringBuilder();
+ buffer.Append("State[");
+ buffer.Append("\n symbol=");
+ buffer.Append(Symbol);
+ buffer.Append("\n freq=");
+ buffer.Append(Freq);
+ buffer.Append("\n successor=");
+ buffer.Append(GetSuccessor());
+ buffer.Append("\n]");
+ return buffer.ToString();
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/SubAllocator.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/SubAllocator.cs
new file mode 100644
index 00000000..957b8205
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/H/SubAllocator.cs
@@ -0,0 +1,489 @@
+using System;
+using System.Text;
+
+namespace Compress.SevenZip.Compress.PPmd.H
+{
+ internal class SubAllocator
+ {
+ virtual public int FakeUnitsStart
+ {
+ get
+ {
+ return fakeUnitsStart;
+ }
+
+ set
+ {
+ this.fakeUnitsStart = value;
+ }
+
+ }
+ virtual public int HeapEnd
+ {
+ get
+ {
+ return heapEnd;
+ }
+
+ }
+ virtual public int PText
+ {
+ get
+ {
+ return pText;
+ }
+
+ set
+ {
+ pText = value;
+ }
+
+ }
+ virtual public int UnitsStart
+ {
+ get
+ {
+ return unitsStart;
+ }
+
+ set
+ {
+ this.unitsStart = value;
+ }
+
+ }
+ virtual public byte[] Heap
+ {
+ get
+ {
+ return heap;
+ }
+
+ }
+ //UPGRADE_NOTE: Final was removed from the declaration of 'N4 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ public const int N1 = 4;
+ public const int N2 = 4;
+ public const int N3 = 4;
+ public static readonly int N4 = (128 + 3 - 1 * N1 - 2 * N2 - 3 * N3) / 4;
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'N_INDEXES '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ public static readonly int N_INDEXES = N1 + N2 + N3 + N4;
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'UNIT_SIZE '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ //UPGRADE_NOTE: The initialization of 'UNIT_SIZE' was moved to static method 'SharpCompress.Unpack.PPM.SubAllocator'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1005'"
+ public static readonly int UNIT_SIZE;
+
+ public const int FIXED_UNIT_SIZE = 12;
+
+ private int subAllocatorSize;
+
+ // byte Indx2Units[N_INDEXES], Units2Indx[128], GlueCount;
+ private int[] indx2Units = new int[N_INDEXES];
+ private int[] units2Indx = new int[128];
+ private int glueCount;
+
+ // byte *HeapStart,*LoUnit, *HiUnit;
+ private int heapStart, loUnit, hiUnit;
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'freeList '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ private RarNode[] freeList = new RarNode[N_INDEXES];
+
+ // byte *pText, *UnitsStart,*HeapEnd,*FakeUnitsStart;
+ private int pText, unitsStart, heapEnd, fakeUnitsStart;
+
+ private byte[] heap;
+
+ private int freeListPos;
+
+ private int tempMemBlockPos;
+
+ // Temp fields
+ private RarNode tempRarNode = null;
+ private RarMemBlock tempRarMemBlock1 = null;
+ private RarMemBlock tempRarMemBlock2 = null;
+ private RarMemBlock tempRarMemBlock3 = null;
+
+ public SubAllocator()
+ {
+ clean();
+ }
+
+ public virtual void clean()
+ {
+ subAllocatorSize = 0;
+ }
+
+ private void insertNode(int p, int indx)
+ {
+ RarNode temp = tempRarNode;
+ temp.Address = p;
+ temp.SetNext(freeList[indx].GetNext());
+ freeList[indx].SetNext(temp);
+ }
+
+ public virtual void incPText()
+ {
+ pText++;
+ }
+
+ private int removeNode(int indx)
+ {
+ int retVal = freeList[indx].GetNext();
+ RarNode temp = tempRarNode;
+ temp.Address = retVal;
+ freeList[indx].SetNext(temp.GetNext());
+ return retVal;
+ }
+
+ private int U2B(int NU)
+ {
+ return UNIT_SIZE * NU;
+ }
+
+ /* memblockptr */
+ private int MBPtr(int BasePtr, int Items)
+ {
+ return (BasePtr + U2B(Items));
+ }
+
+ private void splitBlock(int pv, int oldIndx, int newIndx)
+ {
+ int i, uDiff = indx2Units[oldIndx] - indx2Units[newIndx];
+ int p = pv + U2B(indx2Units[newIndx]);
+ if (indx2Units[i = units2Indx[uDiff - 1]] != uDiff)
+ {
+ insertNode(p, --i);
+ p += U2B(i = indx2Units[i]);
+ uDiff -= i;
+ }
+ insertNode(p, units2Indx[uDiff - 1]);
+ }
+
+ public virtual void stopSubAllocator()
+ {
+ if (subAllocatorSize != 0)
+ {
+ subAllocatorSize = 0;
+ //ArrayFactory.BYTES_FACTORY.recycle(heap);
+ heap = null;
+ heapStart = 1;
+ // rarfree(HeapStart);
+ // Free temp fields
+ tempRarNode = null;
+ tempRarMemBlock1 = null;
+ tempRarMemBlock2 = null;
+ tempRarMemBlock3 = null;
+ }
+ }
+
+ public virtual int GetAllocatedMemory()
+ {
+ return subAllocatorSize;
+ }
+
+
+ public virtual bool startSubAllocator(int SASize)
+ {
+ int t = SASize;
+ if (subAllocatorSize == t)
+ {
+ return true;
+ }
+ stopSubAllocator();
+ int allocSize = t / FIXED_UNIT_SIZE * UNIT_SIZE + UNIT_SIZE;
+
+ // adding space for freelist (needed for poiters)
+ // 1+ for null pointer
+ int realAllocSize = 1 + allocSize + 4 * N_INDEXES;
+ // adding space for an additional memblock
+ tempMemBlockPos = realAllocSize;
+ realAllocSize += RarMemBlock.size;
+
+ heap = new byte[realAllocSize];
+ heapStart = 1;
+ heapEnd = heapStart + allocSize - UNIT_SIZE;
+ subAllocatorSize = t;
+ // Bug fixed
+ freeListPos = heapStart + allocSize;
+ //UPGRADE_ISSUE: The following fragment of code could not be parsed and was not converted. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1156'"
+ //assert(realAllocSize - tempMemBlockPos == RarMemBlock.size): realAllocSize
+ //+ + tempMemBlockPos + + RarMemBlock.size;
+
+ // Init freeList
+ for (int i = 0, pos = freeListPos; i < freeList.Length; i++, pos += RarNode.size)
+ {
+ freeList[i] = new RarNode(heap);
+ freeList[i].Address = pos;
+ }
+
+ // Init temp fields
+ tempRarNode = new RarNode(heap);
+ tempRarMemBlock1 = new RarMemBlock(heap);
+ tempRarMemBlock2 = new RarMemBlock(heap);
+ tempRarMemBlock3 = new RarMemBlock(heap);
+
+ return true;
+ }
+
+ private void glueFreeBlocks()
+ {
+ RarMemBlock s0 = tempRarMemBlock1;
+ s0.Address = tempMemBlockPos;
+ RarMemBlock p = tempRarMemBlock2;
+ RarMemBlock p1 = tempRarMemBlock3;
+ int i, k, sz;
+ if (loUnit != hiUnit)
+ {
+ heap[loUnit] = 0;
+ }
+ for (i = 0, s0.SetPrev(s0), s0.SetNext(s0); i < N_INDEXES; i++)
+ {
+ while (freeList[i].GetNext() != 0)
+ {
+ p.Address = removeNode(i); // =(RAR_MEM_BLK*)RemoveNode(i);
+ p.InsertAt(s0); // p->insertAt(&s0);
+ p.Stamp = 0xFFFF; // p->Stamp=0xFFFF;
+ p.SetNU(indx2Units[i]); // p->NU=Indx2Units[i];
+ }
+ }
+ for (p.Address = s0.GetNext(); p.Address != s0.Address; p.Address = p.GetNext())
+ {
+ // while ((p1=MBPtr(p,p->NU))->Stamp == 0xFFFF && int(p->NU)+p1->NU
+ // < 0x10000)
+ // Bug fixed
+ p1.Address = MBPtr(p.Address, p.GetNU());
+ while (p1.Stamp == 0xFFFF && p.GetNU() + p1.GetNU() < 0x10000)
+ {
+ p1.Remove();
+ p.SetNU(p.GetNU() + p1.GetNU()); // ->NU += p1->NU;
+ p1.Address = MBPtr(p.Address, p.GetNU());
+ }
+ }
+ // while ((p=s0.next) != &s0)
+ // Bug fixed
+ p.Address = s0.GetNext();
+ while (p.Address != s0.Address)
+ {
+ for (p.Remove(), sz = p.GetNU(); sz > 128; sz -= 128, p.Address = MBPtr(p.Address, 128))
+ {
+ insertNode(p.Address, N_INDEXES - 1);
+ }
+ if (indx2Units[i = units2Indx[sz - 1]] != sz)
+ {
+ k = sz - indx2Units[--i];
+ insertNode(MBPtr(p.Address, sz - k), k - 1);
+ }
+ insertNode(p.Address, i);
+ p.Address = s0.GetNext();
+ }
+ }
+
+ private int allocUnitsRare(int indx)
+ {
+ if (glueCount == 0)
+ {
+ glueCount = 255;
+ glueFreeBlocks();
+ if (freeList[indx].GetNext() != 0)
+ {
+ return removeNode(indx);
+ }
+ }
+ int i = indx;
+ do
+ {
+ if (++i == N_INDEXES)
+ {
+ glueCount--;
+ i = U2B(indx2Units[indx]);
+ int j = FIXED_UNIT_SIZE * indx2Units[indx];
+ if (fakeUnitsStart - pText > j)
+ {
+ fakeUnitsStart -= j;
+ unitsStart -= i;
+ return unitsStart;
+ }
+ return (0);
+ }
+ }
+ while (freeList[i].GetNext() == 0);
+ int retVal = removeNode(i);
+ splitBlock(retVal, i, indx);
+ return retVal;
+ }
+
+ public virtual int allocUnits(int NU)
+ {
+ int indx = units2Indx[NU - 1];
+ if (freeList[indx].GetNext() != 0)
+ {
+ return removeNode(indx);
+ }
+ int retVal = loUnit;
+ loUnit += U2B(indx2Units[indx]);
+ if (loUnit <= hiUnit)
+ {
+ return retVal;
+ }
+ loUnit -= U2B(indx2Units[indx]);
+ return allocUnitsRare(indx);
+ }
+
+ public virtual int allocContext()
+ {
+ if (hiUnit != loUnit)
+ return (hiUnit -= UNIT_SIZE);
+ if (freeList[0].GetNext() != 0)
+ {
+ return removeNode(0);
+ }
+ return allocUnitsRare(0);
+ }
+
+ public virtual int expandUnits(int oldPtr, int OldNU)
+ {
+ int i0 = units2Indx[OldNU - 1];
+ int i1 = units2Indx[OldNU - 1 + 1];
+ if (i0 == i1)
+ {
+ return oldPtr;
+ }
+ int ptr = allocUnits(OldNU + 1);
+ if (ptr != 0)
+ {
+ // memcpy(ptr,OldPtr,U2B(OldNU));
+ Array.Copy(heap, oldPtr, heap, ptr, U2B(OldNU));
+ insertNode(oldPtr, i0);
+ }
+ return ptr;
+ }
+
+ public virtual int shrinkUnits(int oldPtr, int oldNU, int newNU)
+ {
+ // System.out.println("SubAllocator.shrinkUnits(" + OldPtr + ", " +
+ // OldNU + ", " + NewNU + ")");
+ int i0 = units2Indx[oldNU - 1];
+ int i1 = units2Indx[newNU - 1];
+ if (i0 == i1)
+ {
+ return oldPtr;
+ }
+ if (freeList[i1].GetNext() != 0)
+ {
+ int ptr = removeNode(i1);
+ // memcpy(ptr,OldPtr,U2B(NewNU));
+ // for (int i = 0; i < U2B(NewNU); i++) {
+ // heap[ptr + i] = heap[OldPtr + i];
+ // }
+ Array.Copy(heap, oldPtr, heap, ptr, U2B(newNU));
+ insertNode(oldPtr, i0);
+ return ptr;
+ }
+ else
+ {
+ splitBlock(oldPtr, i0, i1);
+ return oldPtr;
+ }
+ }
+
+ public virtual void freeUnits(int ptr, int OldNU)
+ {
+ insertNode(ptr, units2Indx[OldNU - 1]);
+ }
+
+ public virtual void decPText(int dPText)
+ {
+ PText = PText - dPText;
+ }
+
+ public virtual void initSubAllocator()
+ {
+ int i, k;
+ Utility.Fill(heap, freeListPos, freeListPos + sizeOfFreeList(), (byte)0);
+
+ pText = heapStart;
+
+ int size2 = FIXED_UNIT_SIZE * (subAllocatorSize / 8 / FIXED_UNIT_SIZE * 7);
+ int realSize2 = size2 / FIXED_UNIT_SIZE * UNIT_SIZE;
+ int size1 = subAllocatorSize - size2;
+ int realSize1 = size1 / FIXED_UNIT_SIZE * UNIT_SIZE + size1 % FIXED_UNIT_SIZE;
+ hiUnit = heapStart + subAllocatorSize;
+ loUnit = unitsStart = heapStart + realSize1;
+ fakeUnitsStart = heapStart + size1;
+ hiUnit = loUnit + realSize2;
+
+ for (i = 0, k = 1; i < N1; i++, k += 1)
+ {
+ indx2Units[i] = k & 0xff;
+ }
+ for (k++; i < N1 + N2; i++, k += 2)
+ {
+ indx2Units[i] = k & 0xff;
+ }
+ for (k++; i < N1 + N2 + N3; i++, k += 3)
+ {
+ indx2Units[i] = k & 0xff;
+ }
+ for (k++; i < (N1 + N2 + N3 + N4); i++, k += 4)
+ {
+ indx2Units[i] = k & 0xff;
+ }
+
+ for (glueCount = 0, k = 0, i = 0; k < 128; k++)
+ {
+ i += ((indx2Units[i] < (k + 1)) ? 1 : 0);
+ units2Indx[k] = i & 0xff;
+ }
+ }
+
+ private int sizeOfFreeList()
+ {
+ return freeList.Length * RarNode.size;
+ }
+
+ // Debug
+ // public void dumpHeap() {
+ // File file = new File("P:\\test\\heapdumpj");
+ // OutputStream out = null;
+ // try {
+ // out = new FileOutputStream(file);
+ // out.write(heap, heapStart, heapEnd - heapStart);
+ // out.flush();
+ // System.out.println("Heap dumped to " + file.getAbsolutePath());
+ // }
+ // catch (IOException e) {
+ // e.printStackTrace();
+ // }
+ // finally {
+ // FileUtil.close(out);
+ // }
+ // }
+
+ // Debug
+ public override System.String ToString()
+ {
+ StringBuilder buffer = new StringBuilder();
+ buffer.Append("SubAllocator[");
+ buffer.Append("\n subAllocatorSize=");
+ buffer.Append(subAllocatorSize);
+ buffer.Append("\n glueCount=");
+ buffer.Append(glueCount);
+ buffer.Append("\n heapStart=");
+ buffer.Append(heapStart);
+ buffer.Append("\n loUnit=");
+ buffer.Append(loUnit);
+ buffer.Append("\n hiUnit=");
+ buffer.Append(hiUnit);
+ buffer.Append("\n pText=");
+ buffer.Append(pText);
+ buffer.Append("\n unitsStart=");
+ buffer.Append(unitsStart);
+ buffer.Append("\n]");
+ return buffer.ToString();
+ }
+ static SubAllocator()
+ {
+ UNIT_SIZE = System.Math.Max(PPMContext.size, RarMemBlock.size);
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Allocator.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Allocator.cs
new file mode 100644
index 00000000..e85afa88
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Allocator.cs
@@ -0,0 +1,457 @@
+#region Using
+
+
+
+#endregion
+
+namespace Compress.SevenZip.Compress.PPmd.I1
+{
+
+ /// Allocate a single, large array and then provide sections of this array to callers. Callers are provided with
+ /// instances of (which simply contain a single address value, representing a location
+ /// in the large array). Callers can then cast to one of the following structures (all
+ /// of which also simply contain a single address value):
+ internal class Allocator
+ {
+ private const uint UnitSize = 12;
+ private const uint LocalOffset = 4; // reserve the first four bytes for Pointer.Zero
+ private const uint NodeOffset = LocalOffset + MemoryNode.Size; // reserve space for a single memory node
+ private const uint HeapOffset = NodeOffset + IndexCount * MemoryNode.Size; // reserve space for the array of memory nodes
+ private const uint N1 = 4;
+ private const uint N2 = 4;
+ private const uint N3 = 4;
+ private const uint N4 = (128 + 3 - 1 * N1 - 2 * N2 - 3 * N3) / 4;
+ private const uint IndexCount = N1 + N2 + N3 + N4;
+
+ private static readonly byte[] indexToUnits;
+ private static readonly byte[] unitsToIndex;
+
+ public uint AllocatorSize;
+ public uint GlueCount;
+ public Pointer BaseUnit;
+ public Pointer LowUnit;
+ public Pointer HighUnit;
+ public Pointer Text;
+ public Pointer Heap;
+ public MemoryNode[] MemoryNodes;
+
+ public byte[] Memory;
+
+ ///
+ /// Initializes static read-only arrays used by the .
+ ///
+ static Allocator()
+ {
+ // Construct the static index to units lookup array. It will contain the following values.
+ //
+ // 1 2 3 4 6 8 10 12 15 18 21 24 28 32 36 40 44 48 52 56 60 64 68 72 76 80 84 88 92 96 100 104 108
+ // 112 116 120 124 128
+
+ uint index;
+ uint unitCount;
+
+ indexToUnits = new byte[IndexCount];
+
+ for (index = 0, unitCount = 1; index < N1; index++, unitCount += 1)
+ indexToUnits[index] = (byte)unitCount;
+
+ for (unitCount++; index < N1 + N2; index++, unitCount += 2)
+ indexToUnits[index] = (byte)unitCount;
+
+ for (unitCount++; index < N1 + N2 + N3; index++, unitCount += 3)
+ indexToUnits[index] = (byte)unitCount;
+
+ for (unitCount++; index < N1 + N2 + N3 + N4; index++, unitCount += 4)
+ indexToUnits[index] = (byte)unitCount;
+
+ // Construct the static units to index lookup array. It will contain the following values.
+ //
+ // 00 01 02 03 04 04 05 05 06 06 07 07 08 08 08 09 09 09 10 10 10 11 11 11 12 12 12 12 13 13 13 13
+ // 14 14 14 14 15 15 15 15 16 16 16 16 17 17 17 17 18 18 18 18 19 19 19 19 20 20 20 20 21 21 21 21
+ // 22 22 22 22 23 23 23 23 24 24 24 24 25 25 25 25 26 26 26 26 27 27 27 27 28 28 28 28 29 29 29 29
+ // 30 30 30 30 31 31 31 31 32 32 32 32 33 33 33 33 34 34 34 34 35 35 35 35 36 36 36 36 37 37 37 37
+
+ unitsToIndex = new byte[128];
+
+ for (unitCount = index = 0; unitCount < 128; unitCount++)
+ {
+ index += (uint)((indexToUnits[index] < unitCount + 1) ? 1 : 0);
+ unitsToIndex[unitCount] = (byte)index;
+ }
+ }
+
+ #region Public Methods
+
+ public Allocator()
+ {
+ MemoryNodes = new MemoryNode[IndexCount];
+ }
+
+ ///
+ /// Initialize or reset the memory allocator (so that the single, large array can be re-used without destroying
+ /// and re-creating it).
+ ///
+ public void Initialize()
+ {
+ for (int index = 0; index < IndexCount; index++)
+ {
+ MemoryNodes[index] = new MemoryNode((uint)(NodeOffset + index * MemoryNode.Size), Memory);
+ MemoryNodes[index].Stamp = 0;
+ MemoryNodes[index].Next = MemoryNode.Zero;
+ MemoryNodes[index].UnitCount = 0;
+ }
+
+ Text = Heap;
+
+ uint difference = UnitSize * (AllocatorSize / 8 / UnitSize * 7);
+
+ HighUnit = Heap + AllocatorSize;
+ LowUnit = HighUnit - difference;
+ BaseUnit = HighUnit - difference;
+
+ GlueCount = 0;
+ }
+
+ ///
+ /// Start the allocator (create a single, large array of bytes).
+ ///
+ ///
+ /// Note that .NET will create that array on the large object heap (because it is so large).
+ ///
+ ///
+ public void Start(int allocatorSize)
+ {
+ uint size = (uint)allocatorSize;
+ if (AllocatorSize != size)
+ {
+ Stop();
+ Memory = new byte[HeapOffset + size]; // the single, large array of bytes
+ Heap = new Pointer(HeapOffset, Memory); // reserve bytes in the range 0 .. HeapOffset - 1
+ AllocatorSize = size;
+ }
+ }
+
+ ///
+ /// Stop the allocator (free the single, large array of bytes). This can safely be called multiple times (without
+ /// intervening calls to ).
+ ///
+ ///
+ /// Because the array is on the large object heap it may not be freed immediately.
+ ///
+ public void Stop()
+ {
+ if (AllocatorSize != 0)
+ {
+ AllocatorSize = 0;
+ Memory = null;
+ Heap = Pointer.Zero;
+ }
+ }
+
+ ///
+ /// Determine how much memory (from the single, large array) is currenly in use.
+ ///
+ ///
+ public uint GetMemoryUsed()
+ {
+ uint memoryUsed = AllocatorSize - (HighUnit - LowUnit) - (BaseUnit - Text);
+ for (uint index = 0; index < IndexCount; index++)
+ memoryUsed -= UnitSize * indexToUnits[index] * MemoryNodes[index].Stamp;
+ return memoryUsed;
+ }
+
+ ///
+ /// Allocate a given number of units from the single, large array. Each unit is bytes
+ /// in size.
+ ///
+ ///
+ ///
+ public Pointer AllocateUnits(uint unitCount)
+ {
+ uint index = unitsToIndex[unitCount - 1];
+ if (MemoryNodes[index].Available)
+ return MemoryNodes[index].Remove();
+
+ Pointer allocatedBlock = LowUnit;
+ LowUnit += indexToUnits[index] * UnitSize;
+ if (LowUnit <= HighUnit)
+ return allocatedBlock;
+
+ LowUnit -= indexToUnits[index] * UnitSize;
+ return AllocateUnitsRare(index);
+ }
+
+ ///
+ /// Allocate enough space for a PpmContext instance in the single, large array.
+ ///
+ ///
+ public Pointer AllocateContext()
+ {
+ if (HighUnit != LowUnit)
+ return (HighUnit -= UnitSize);
+ else if (MemoryNodes[0].Available)
+ return MemoryNodes[0].Remove();
+ else
+ return AllocateUnitsRare(0);
+ }
+
+ ///
+ /// Increase the size of an existing allocation (represented by a ).
+ ///
+ ///
+ ///
+ ///
+ public Pointer ExpandUnits(Pointer oldPointer, uint oldUnitCount)
+ {
+ uint oldIndex = unitsToIndex[oldUnitCount - 1];
+ uint newIndex = unitsToIndex[oldUnitCount];
+
+ if (oldIndex == newIndex)
+ return oldPointer;
+
+ Pointer pointer = AllocateUnits(oldUnitCount + 1);
+
+ if (pointer != Pointer.Zero)
+ {
+ CopyUnits(pointer, oldPointer, oldUnitCount);
+ MemoryNodes[oldIndex].Insert(oldPointer, oldUnitCount);
+ }
+
+ return pointer;
+ }
+
+ ///
+ /// Decrease the size of an existing allocation (represented by a ).
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Pointer ShrinkUnits(Pointer oldPointer, uint oldUnitCount, uint newUnitCount)
+ {
+ uint oldIndex = unitsToIndex[oldUnitCount - 1];
+ uint newIndex = unitsToIndex[newUnitCount - 1];
+
+ if (oldIndex == newIndex)
+ return oldPointer;
+
+ if (MemoryNodes[newIndex].Available)
+ {
+ Pointer pointer = MemoryNodes[newIndex].Remove();
+ CopyUnits(pointer, oldPointer, newUnitCount);
+ MemoryNodes[oldIndex].Insert(oldPointer, indexToUnits[oldIndex]);
+ return pointer;
+ }
+ else
+ {
+ SplitBlock(oldPointer, oldIndex, newIndex);
+ return oldPointer;
+ }
+ }
+
+ ///
+ /// Free previously allocated space (the location and amount of space to free must be specified by using
+ /// a to indicate the location and a number of units to indicate the amount).
+ ///
+ ///
+ ///
+ public void FreeUnits(Pointer pointer, uint unitCount)
+ {
+ uint index = unitsToIndex[unitCount - 1];
+ MemoryNodes[index].Insert(pointer, indexToUnits[index]);
+ }
+
+ public void SpecialFreeUnits(Pointer pointer)
+ {
+ if (pointer != BaseUnit)
+ {
+ MemoryNodes[0].Insert(pointer, 1);
+ }
+ else
+ {
+ MemoryNode memoryNode = pointer;
+ memoryNode.Stamp = uint.MaxValue;
+ BaseUnit += UnitSize;
+ }
+ }
+
+ public Pointer MoveUnitsUp(Pointer oldPointer, uint unitCount)
+ {
+ uint index = unitsToIndex[unitCount - 1];
+
+ if (oldPointer > BaseUnit + 16 * 1024 || oldPointer > MemoryNodes[index].Next)
+ return oldPointer;
+
+ Pointer pointer = MemoryNodes[index].Remove();
+ CopyUnits(pointer, oldPointer, unitCount);
+ unitCount = indexToUnits[index];
+
+ if (oldPointer != BaseUnit)
+ MemoryNodes[index].Insert(oldPointer, unitCount);
+ else
+ BaseUnit += unitCount * UnitSize;
+
+ return pointer;
+ }
+
+ ///
+ /// Expand the space allocated (in the single, large array) for the bytes of the data (ie. the "text") that is
+ /// being encoded or decoded.
+ ///
+ public void ExpandText()
+ {
+ MemoryNode memoryNode;
+ uint[] counts = new uint[IndexCount];
+
+ while ((memoryNode = BaseUnit).Stamp == uint.MaxValue)
+ {
+ BaseUnit = memoryNode + memoryNode.UnitCount;
+ counts[unitsToIndex[memoryNode.UnitCount - 1]]++;
+ memoryNode.Stamp = 0;
+ }
+
+ for (uint index = 0; index < IndexCount; index++)
+ {
+ for (memoryNode = MemoryNodes[index]; counts[index] != 0; memoryNode = memoryNode.Next)
+ {
+ while (memoryNode.Next.Stamp == 0)
+ {
+ memoryNode.Unlink();
+ MemoryNodes[index].Stamp--;
+ if (--counts[index] == 0)
+ break;
+ }
+ }
+ }
+ }
+
+ #endregion
+
+ #region Private Methods
+
+ private Pointer AllocateUnitsRare(uint index)
+ {
+ if (GlueCount == 0)
+ {
+ GlueFreeBlocks();
+ if (MemoryNodes[index].Available)
+ return MemoryNodes[index].Remove();
+ }
+
+ uint oldIndex = index;
+ do
+ {
+ if (++oldIndex == IndexCount)
+ {
+ GlueCount--;
+ oldIndex = indexToUnits[index] * UnitSize;
+ return (BaseUnit - Text > oldIndex) ? (BaseUnit -= oldIndex) : Pointer.Zero;
+ }
+ } while (!MemoryNodes[oldIndex].Available);
+
+ Pointer allocatedBlock = MemoryNodes[oldIndex].Remove();
+ SplitBlock(allocatedBlock, oldIndex, index);
+ return allocatedBlock;
+ }
+
+ private void SplitBlock(Pointer pointer, uint oldIndex, uint newIndex)
+ {
+ uint unitCountDifference = (uint)(indexToUnits[oldIndex] - indexToUnits[newIndex]);
+ Pointer newPointer = pointer + indexToUnits[newIndex] * UnitSize;
+
+ uint index = unitsToIndex[unitCountDifference - 1];
+ if (indexToUnits[index] != unitCountDifference)
+ {
+ uint unitCount = indexToUnits[--index];
+ MemoryNodes[index].Insert(newPointer, unitCount);
+ newPointer += unitCount * UnitSize;
+ unitCountDifference -= unitCount;
+ }
+
+ MemoryNodes[unitsToIndex[unitCountDifference - 1]].Insert(newPointer, unitCountDifference);
+ }
+
+ private void GlueFreeBlocks()
+ {
+ MemoryNode memoryNode = new MemoryNode(LocalOffset, Memory);
+ memoryNode.Stamp = 0;
+ memoryNode.Next = MemoryNode.Zero;
+ memoryNode.UnitCount = 0;
+
+ MemoryNode memoryNode0;
+ MemoryNode memoryNode1;
+ MemoryNode memoryNode2;
+
+ if (LowUnit != HighUnit)
+ LowUnit[0] = 0;
+
+ // Find all unused memory nodes.
+
+ memoryNode1 = memoryNode;
+ for (uint index = 0; index < IndexCount; index++)
+ {
+ while (MemoryNodes[index].Available)
+ {
+ memoryNode0 = MemoryNodes[index].Remove();
+ if (memoryNode0.UnitCount != 0)
+ {
+ while ((memoryNode2 = memoryNode0 + memoryNode0.UnitCount).Stamp == uint.MaxValue)
+ {
+ memoryNode0.UnitCount = memoryNode0.UnitCount + memoryNode2.UnitCount;
+ memoryNode2.UnitCount = 0;
+ }
+ memoryNode1.Link(memoryNode0);
+ memoryNode1 = memoryNode0;
+ }
+ }
+ }
+
+ // Coalesce the memory represented by the unused memory nodes.
+
+ while (memoryNode.Available)
+ {
+ memoryNode0 = memoryNode.Remove();
+ uint unitCount = memoryNode0.UnitCount;
+ if (unitCount != 0)
+ {
+ for (; unitCount > 128; unitCount -= 128, memoryNode0 += 128)
+ MemoryNodes[IndexCount - 1].Insert(memoryNode0, 128);
+
+ uint index = unitsToIndex[unitCount - 1];
+ if (indexToUnits[index] != unitCount)
+ {
+ uint unitCountDifference = unitCount - indexToUnits[--index];
+ MemoryNodes[unitCountDifference - 1].Insert(memoryNode0 + (unitCount - unitCountDifference), unitCountDifference);
+ }
+
+ MemoryNodes[index].Insert(memoryNode0, indexToUnits[index]);
+ }
+ }
+
+ GlueCount = 1 << 13;
+ }
+
+ private void CopyUnits(Pointer target, Pointer source, uint unitCount)
+ {
+ do
+ {
+ target[0] = source[0];
+ target[1] = source[1];
+ target[2] = source[2];
+ target[3] = source[3];
+ target[4] = source[4];
+ target[5] = source[5];
+ target[6] = source[6];
+ target[7] = source[7];
+ target[8] = source[8];
+ target[9] = source[9];
+ target[10] = source[10];
+ target[11] = source[11];
+ target += UnitSize;
+ source += UnitSize;
+ } while (--unitCount != 0);
+ }
+
+ #endregion
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Coder.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Coder.cs
new file mode 100644
index 00000000..c243ae60
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Coder.cs
@@ -0,0 +1,100 @@
+#region Using
+
+using System.IO;
+
+#endregion
+
+namespace Compress.SevenZip.Compress.PPmd.I1
+{
+ ///
+ /// A simple range coder.
+ ///
+ ///
+ /// Note that in most cases fields are used rather than properties for performance reasons (for example,
+ /// is a field rather than a property).
+ ///
+ internal class Coder
+ {
+ private const uint RangeTop = 1 << 24;
+ private const uint RangeBottom = 1 << 15;
+ private uint low;
+ private uint code;
+ private uint range;
+
+ public uint LowCount;
+ public uint HighCount;
+ public uint Scale;
+
+ public void RangeEncoderInitialize()
+ {
+ low = 0;
+ range = uint.MaxValue;
+ }
+
+ public void RangeEncoderNormalize(Stream stream)
+ {
+ while ((low ^ (low + range)) < RangeTop || range < RangeBottom && ((range = (uint) -low & (RangeBottom - 1)) != 0 || true))
+ {
+ stream.WriteByte((byte) (low >> 24));
+ range <<= 8;
+ low <<= 8;
+ }
+ }
+
+ public void RangeEncodeSymbol()
+ {
+ low += LowCount * (range /= Scale);
+ range *= HighCount - LowCount;
+ }
+
+ public void RangeShiftEncodeSymbol(int rangeShift)
+ {
+ low += LowCount * (range >>= rangeShift);
+ range *= HighCount - LowCount;
+ }
+
+ public void RangeEncoderFlush(Stream stream)
+ {
+ for (uint index = 0; index < 4; index++)
+ {
+ stream.WriteByte((byte) (low >> 24));
+ low <<= 8;
+ }
+ }
+
+ public void RangeDecoderInitialize(Stream stream)
+ {
+ low = 0;
+ code = 0;
+ range = uint.MaxValue;
+ for (uint index = 0; index < 4; index++)
+ code = (code << 8) | (byte) stream.ReadByte();
+ }
+
+ public void RangeDecoderNormalize(Stream stream)
+ {
+ while ((low ^ (low + range)) < RangeTop || range < RangeBottom && ((range = (uint) -low & (RangeBottom - 1)) != 0 || true))
+ {
+ code = (code << 8) | (byte) stream.ReadByte();
+ range <<= 8;
+ low <<= 8;
+ }
+ }
+
+ public uint RangeGetCurrentCount()
+ {
+ return (code - low) / (range /= Scale);
+ }
+
+ public uint RangeGetCurrentShiftCount(int rangeShift)
+ {
+ return (code - low) / (range >>= rangeShift);
+ }
+
+ public void RangeRemoveSubrange()
+ {
+ low += range * LowCount;
+ range *= HighCount - LowCount;
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/MemoryNode.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/MemoryNode.cs
new file mode 100644
index 00000000..bde225f8
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/MemoryNode.cs
@@ -0,0 +1,246 @@
+#region Using
+
+
+
+#endregion
+
+namespace Compress.SevenZip.Compress.PPmd.I1
+{
+ ///
+ /// A structure containing a single address. The address represents a location in the
+ /// array. That location in the array contains information itself describing a section
+ /// of the array (ie. a block of memory).
+ ///
+ ///
+ ///
+ /// This must be a structure rather than a class because several places in the associated code assume that
+ /// is a value type (meaning that assignment creates a completely new copy of
+ /// the instance rather than just copying a reference to the same instance).
+ ///
+ ///
+ /// MemoryNode
+ /// 4 Stamp
+ /// 4 Next
+ /// 4 UnitCount
+ ///
+ ///
+ /// Note that is a field rather than a property for performance reasons.
+ ///
+ ///
+ internal struct MemoryNode
+ {
+ public uint Address;
+ public byte[] Memory;
+ public static readonly MemoryNode Zero = new MemoryNode(0, null);
+ public const int Size = 12;
+
+ ///
+ /// Initializes a new instance of the structure.
+ ///
+ public MemoryNode(uint address, byte[] memory)
+ {
+ Address = address;
+ Memory = memory;
+ }
+
+ ///
+ /// Gets or sets the stamp.
+ ///
+ public uint Stamp
+ {
+ get { return ((uint) Memory[Address]) | ((uint) Memory[Address + 1]) << 8 | ((uint) Memory[Address + 2]) << 16 | ((uint) Memory[Address + 3]) << 24; }
+ set
+ {
+ Memory[Address] = (byte) value;
+ Memory[Address + 1] = (byte) (value >> 8);
+ Memory[Address + 2] = (byte) (value >> 16);
+ Memory[Address + 3] = (byte) (value >> 24);
+ }
+ }
+
+ ///
+ /// Gets or sets the next memory node.
+ ///
+ public MemoryNode Next
+ {
+ get { return new MemoryNode(((uint) Memory[Address + 4]) | ((uint) Memory[Address + 5]) << 8 | ((uint) Memory[Address + 6]) << 16 | ((uint) Memory[Address + 7]) << 24, Memory); }
+ set
+ {
+ Memory[Address + 4] = (byte) value.Address;
+ Memory[Address + 5] = (byte) (value.Address >> 8);
+ Memory[Address + 6] = (byte) (value.Address >> 16);
+ Memory[Address + 7] = (byte) (value.Address >> 24);
+ }
+ }
+
+ ///
+ /// Gets or sets the unit count.
+ ///
+ public uint UnitCount
+ {
+ get { return ((uint) Memory[Address + 8]) | ((uint) Memory[Address + 9]) << 8 | ((uint) Memory[Address + 10]) << 16 | ((uint) Memory[Address + 11]) << 24; }
+ set
+ {
+ Memory[Address + 8] = (byte) value;
+ Memory[Address + 9] = (byte) (value >> 8);
+ Memory[Address + 10] = (byte) (value >> 16);
+ Memory[Address + 11] = (byte) (value >> 24);
+ }
+ }
+
+ ///
+ /// Gets whether there is a next memory node available.
+ ///
+ public bool Available
+ {
+ get { return Next.Address != 0; }
+ }
+
+ ///
+ /// Link in the provided memory node.
+ ///
+ ///
+ public void Link(MemoryNode memoryNode)
+ {
+ memoryNode.Next = Next;
+ Next = memoryNode;
+ }
+
+ ///
+ /// Unlink this memory node.
+ ///
+ public void Unlink()
+ {
+ Next = Next.Next;
+ }
+
+ ///
+ /// Insert the memory node into the linked list.
+ ///
+ ///
+ ///
+ public void Insert(MemoryNode memoryNode, uint unitCount)
+ {
+ Link(memoryNode);
+ memoryNode.Stamp = uint.MaxValue;
+ memoryNode.UnitCount = unitCount;
+ Stamp++;
+ }
+
+ ///
+ /// Remove this memory node from the linked list.
+ ///
+ ///
+ public MemoryNode Remove()
+ {
+ MemoryNode next = Next;
+ Unlink();
+ Stamp--;
+ return next;
+ }
+
+ ///
+ /// Allow a pointer to be implicitly converted to a memory node.
+ ///
+ ///
+ ///
+ public static implicit operator MemoryNode(Pointer pointer)
+ {
+ return new MemoryNode(pointer.Address, pointer.Memory);
+ }
+
+ ///
+ /// Allow pointer-like addition on a memory node.
+ ///
+ ///
+ ///
+ ///
+ public static MemoryNode operator +(MemoryNode memoryNode, int offset)
+ {
+ memoryNode.Address = (uint) (memoryNode.Address + offset * Size);
+ return memoryNode;
+ }
+
+ ///
+ /// Allow pointer-like addition on a memory node.
+ ///
+ ///
+ ///
+ ///
+ public static MemoryNode operator +(MemoryNode memoryNode, uint offset)
+ {
+ memoryNode.Address += offset * Size;
+ return memoryNode;
+ }
+
+ ///
+ /// Allow pointer-like subtraction on a memory node.
+ ///
+ ///
+ ///
+ ///
+ public static MemoryNode operator -(MemoryNode memoryNode, int offset)
+ {
+ memoryNode.Address = (uint) (memoryNode.Address - offset * Size);
+ return memoryNode;
+ }
+
+ ///
+ /// Allow pointer-like subtraction on a memory node.
+ ///
+ ///
+ ///
+ ///
+ public static MemoryNode operator -(MemoryNode memoryNode, uint offset)
+ {
+ memoryNode.Address -= offset * Size;
+ return memoryNode;
+ }
+
+ ///
+ /// Compare two memory nodes.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator ==(MemoryNode memoryNode1, MemoryNode memoryNode2)
+ {
+ return memoryNode1.Address == memoryNode2.Address;
+ }
+
+ ///
+ /// Compare two memory nodes.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator !=(MemoryNode memoryNode1, MemoryNode memoryNode2)
+ {
+ return memoryNode1.Address != memoryNode2.Address;
+ }
+
+ ///
+ /// Indicates whether this instance and a specified object are equal.
+ ///
+ /// true if obj and this instance are the same type and represent the same value; otherwise, false.
+ /// Another object to compare to.
+ public override bool Equals(object obj)
+ {
+ if (obj is MemoryNode)
+ {
+ MemoryNode memoryNode = (MemoryNode) obj;
+ return memoryNode.Address == Address;
+ }
+ return base.Equals(obj);
+ }
+
+ ///
+ /// Returns the hash code for this instance.
+ ///
+ /// A 32-bit signed integer that is the hash code for this instance.
+ public override int GetHashCode()
+ {
+ return Address.GetHashCode();
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Model.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Model.cs
new file mode 100644
index 00000000..f464c626
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Model.cs
@@ -0,0 +1,819 @@
+#region Using
+
+using System;
+using System.IO;
+
+#endregion
+
+// This is a port of Dmitry Shkarin's PPMd Variant I Revision 1.
+// Ported by Michael Bone (mjbone03@yahoo.com.au).
+namespace Compress.SevenZip.Compress.PPmd.I1
+{
+ ///
+ /// The model.
+ ///
+ internal partial class Model
+ {
+ public const uint Signature = 0x84acaf8fU;
+ public const char Variant = 'I';
+ public const int MaximumOrder = 16; // maximum allowed model order
+
+ private const byte UpperFrequency = 5;
+ private const byte IntervalBitCount = 7;
+ private const byte PeriodBitCount = 7;
+ private const byte TotalBitCount = IntervalBitCount + PeriodBitCount;
+ private const uint Interval = 1 << IntervalBitCount;
+ private const uint BinaryScale = 1 << TotalBitCount;
+ private const uint MaximumFrequency = 124;
+ private const uint OrderBound = 9;
+
+ private See2Context[,] see2Contexts;
+ private See2Context emptySee2Context;
+ private PpmContext maximumContext;
+ private ushort[,] binarySummary = new ushort[25, 64]; // binary SEE-contexts
+ private byte[] numberStatisticsToBinarySummaryIndex = new byte[256];
+ private byte[] probabilities = new byte[260];
+ private byte[] characterMask = new byte[256];
+ private byte escapeCount;
+ private int modelOrder;
+ private int orderFall;
+ private int initialEscape;
+ private int initialRunLength;
+ private int runLength;
+ private byte previousSuccess;
+ private byte numberMasked;
+ private ModelRestorationMethod method;
+ private PpmState foundState; // found next state transition
+
+ private Allocator Allocator;
+ private Coder Coder;
+ private PpmContext minimumContext;
+ private byte numberStatistics;
+ private PpmState[] decodeStates = new PpmState[256];
+
+ private static readonly ushort[] InitialBinaryEscapes = { 0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051 };
+ private static readonly byte[] ExponentialEscapes = { 25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 };
+
+ #region Public Methods
+
+ public Model()
+ {
+ // Construct the conversion table for number statistics. Initially it will contain the following values.
+ //
+ // 0 2 4 4 4 4 4 4 4 4 4 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
+ // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
+ // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
+ // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
+ // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
+ // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
+ // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
+ // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
+
+ numberStatisticsToBinarySummaryIndex[0] = 2 * 0;
+ numberStatisticsToBinarySummaryIndex[1] = 2 * 1;
+ for (int index = 2; index < 11; index++)
+ numberStatisticsToBinarySummaryIndex[index] = 2 * 2;
+ for (int index = 11; index < 256; index++)
+ numberStatisticsToBinarySummaryIndex[index] = 2 * 3;
+
+ // Construct the probability table. Initially it will contain the following values (depending on the value of
+ // the upper frequency).
+ //
+ // 00 01 02 03 04 05 06 06 07 07 07 08 08 08 08 09 09 09 09 09 10 10 10 10 10 10 11 11 11 11 11 11
+ // 11 12 12 12 12 12 12 12 12 13 13 13 13 13 13 13 13 13 14 14 14 14 14 14 14 14 14 14 15 15 15 15
+ // 15 15 15 15 15 15 15 16 16 16 16 16 16 16 16 16 16 16 16 17 17 17 17 17 17 17 17 17 17 17 17 17
+ // 18 18 18 18 18 18 18 18 18 18 18 18 18 18 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 20 20 20
+ // 20 20 20 20 20 20 20 20 20 20 20 20 20 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 22 22
+ // 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23
+ // 23 23 23 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 25 25 25 25 25 25 25 25 25
+ // 25 25 25 25 25 25 25 25 25 25 25 25 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26
+ // 26 26 27 27
+
+ uint count = 1;
+ uint step = 1;
+ uint probability = UpperFrequency;
+
+ for (int index = 0; index < UpperFrequency; index++)
+ probabilities[index] = (byte)index;
+
+ for (int index = UpperFrequency; index < 260; index++)
+ {
+ probabilities[index] = (byte)probability;
+ count--;
+ if (count == 0)
+ {
+ step++;
+ count = step;
+ probability++;
+ }
+ }
+
+ // Create the context array.
+
+ see2Contexts = new See2Context[24, 32];
+ for (int index1 = 0; index1 < 24; index1++)
+ for (int index2 = 0; index2 < 32; index2++)
+ see2Contexts[index1, index2] = new See2Context();
+
+ // Set the signature (identifying the algorithm).
+
+ emptySee2Context = new See2Context();
+ emptySee2Context.Summary = (ushort)(Signature & 0x0000ffff);
+ emptySee2Context.Shift = (byte)((Signature >> 16) & 0x000000ff);
+ emptySee2Context.Count = (byte)(Signature >> 24);
+ }
+
+ ///
+ /// Encode (ie. compress) a given source stream, writing the encoded result to the target stream.
+ ///
+ public void Encode(Stream target, Stream source, PpmdProperties properties)
+ {
+ if (target == null)
+ throw new ArgumentNullException("target");
+
+ if (source == null)
+ throw new ArgumentNullException("source");
+
+ EncodeStart(properties);
+ EncodeBlock(target, source, true);
+ }
+
+ internal Coder EncodeStart(PpmdProperties properties)
+ {
+ Allocator = properties.Allocator;
+ Coder = new Coder();
+ Coder.RangeEncoderInitialize();
+ StartModel(properties.ModelOrder, properties.ModelRestorationMethod);
+ return Coder;
+ }
+
+ internal void EncodeBlock(Stream target, Stream source, bool final)
+ {
+ while (true)
+ {
+ minimumContext = maximumContext;
+ numberStatistics = minimumContext.NumberStatistics;
+
+ int c = source.ReadByte();
+ if (c < 0 && !final)
+ return;
+
+ if (numberStatistics != 0)
+ {
+ EncodeSymbol1(c, minimumContext);
+ Coder.RangeEncodeSymbol();
+ }
+ else
+ {
+ EncodeBinarySymbol(c, minimumContext);
+ Coder.RangeShiftEncodeSymbol(TotalBitCount);
+ }
+
+ while (foundState == PpmState.Zero)
+ {
+ Coder.RangeEncoderNormalize(target);
+ do
+ {
+ orderFall++;
+ minimumContext = minimumContext.Suffix;
+ if (minimumContext == PpmContext.Zero)
+ goto StopEncoding;
+ } while (minimumContext.NumberStatistics == numberMasked);
+ EncodeSymbol2(c, minimumContext);
+ Coder.RangeEncodeSymbol();
+ }
+
+ if (orderFall == 0 && (Pointer)foundState.Successor >= Allocator.BaseUnit)
+ {
+ maximumContext = foundState.Successor;
+ }
+ else
+ {
+ UpdateModel(minimumContext);
+ if (escapeCount == 0)
+ ClearMask();
+ }
+
+ Coder.RangeEncoderNormalize(target);
+ }
+
+ StopEncoding:
+ Coder.RangeEncoderFlush(target);
+ }
+
+
+ ///
+ /// Dencode (ie. decompress) a given source stream, writing the decoded result to the target stream.
+ ///
+ public void Decode(Stream target, Stream source, PpmdProperties properties)
+ {
+ if (target == null)
+ throw new ArgumentNullException("target");
+
+ if (source == null)
+ throw new ArgumentNullException("source");
+
+ DecodeStart(source, properties);
+ byte[] buffer = new byte[65536];
+ int read;
+ while ((read = DecodeBlock(source, buffer, 0, buffer.Length)) != 0)
+ target.Write(buffer, 0, read);
+
+ return;
+ }
+
+ internal Coder DecodeStart(Stream source, PpmdProperties properties)
+ {
+ Allocator = properties.Allocator;
+ Coder = new Coder();
+ Coder.RangeDecoderInitialize(source);
+ StartModel(properties.ModelOrder, properties.ModelRestorationMethod);
+ minimumContext = maximumContext;
+ numberStatistics = minimumContext.NumberStatistics;
+ return Coder;
+ }
+
+ internal int DecodeBlock(Stream source, byte[] buffer, int offset, int count)
+ {
+ if (minimumContext == PpmContext.Zero)
+ return 0;
+
+ int total = 0;
+ while (total < count)
+ {
+ if (numberStatistics != 0)
+ DecodeSymbol1(minimumContext);
+ else
+ DecodeBinarySymbol(minimumContext);
+
+ Coder.RangeRemoveSubrange();
+
+ while (foundState == PpmState.Zero)
+ {
+ Coder.RangeDecoderNormalize(source);
+ do
+ {
+ orderFall++;
+ minimumContext = minimumContext.Suffix;
+ if (minimumContext == PpmContext.Zero)
+ goto StopDecoding;
+ } while (minimumContext.NumberStatistics == numberMasked);
+ DecodeSymbol2(minimumContext);
+ Coder.RangeRemoveSubrange();
+ }
+
+ buffer[offset] = foundState.Symbol;
+ offset++;
+ total++;
+
+ if (orderFall == 0 && (Pointer)foundState.Successor >= Allocator.BaseUnit)
+ {
+ maximumContext = foundState.Successor;
+ }
+ else
+ {
+ UpdateModel(minimumContext);
+ if (escapeCount == 0)
+ ClearMask();
+ }
+
+ minimumContext = maximumContext;
+ numberStatistics = minimumContext.NumberStatistics;
+ Coder.RangeDecoderNormalize(source);
+ }
+
+ StopDecoding:
+ return total;
+ }
+
+ #endregion
+
+ #region Private Methods
+
+ ///
+ /// Initialise the model (unless the model order is set to 1 in which case the model should be cleared so that
+ /// the statistics are carried over, allowing "solid" mode compression).
+ ///
+ private void StartModel(int modelOrder, ModelRestorationMethod modelRestorationMethod)
+ {
+ Array.Clear(characterMask, 0, characterMask.Length);
+ escapeCount = 1;
+
+ // Compress in "solid" mode if the model order value is set to 1 (this will examine the current PPM context
+ // structures to determine the value of orderFall).
+
+ if (modelOrder < 2)
+ {
+ orderFall = this.modelOrder;
+ for (PpmContext context = maximumContext; context.Suffix != PpmContext.Zero; context = context.Suffix)
+ orderFall--;
+ return;
+ }
+
+ this.modelOrder = modelOrder;
+ orderFall = modelOrder;
+ method = modelRestorationMethod;
+ Allocator.Initialize();
+ initialRunLength = -((modelOrder < 12) ? modelOrder : 12) - 1;
+ runLength = initialRunLength;
+
+ // Allocate the context structure.
+
+ maximumContext = Allocator.AllocateContext();
+ maximumContext.Suffix = PpmContext.Zero;
+ maximumContext.NumberStatistics = 255;
+ maximumContext.SummaryFrequency = (ushort)(maximumContext.NumberStatistics + 2);
+ maximumContext.Statistics = Allocator.AllocateUnits(256 / 2); // allocates enough space for 256 PPM states (each is 6 bytes)
+
+ previousSuccess = 0;
+ for (int index = 0; index < 256; index++)
+ {
+ PpmState state = maximumContext.Statistics[index];
+ state.Symbol = (byte)index;
+ state.Frequency = 1;
+ state.Successor = PpmContext.Zero;
+ }
+
+ uint probability = 0;
+ for (int index1 = 0; probability < 25; probability++)
+ {
+ while (probabilities[index1] == probability)
+ index1++;
+ for (int index2 = 0; index2 < 8; index2++)
+ binarySummary[probability, index2] = (ushort)(BinaryScale - InitialBinaryEscapes[index2] / (index1 + 1));
+ for (int index2 = 8; index2 < 64; index2 += 8)
+ for (int index3 = 0; index3 < 8; index3++)
+ binarySummary[probability, index2 + index3] = binarySummary[probability, index3];
+ }
+
+ probability = 0;
+ for (uint index1 = 0; probability < 24; probability++)
+ {
+ while (probabilities[index1 + 3] == probability + 3)
+ index1++;
+ for (int index2 = 0; index2 < 32; index2++)
+ see2Contexts[probability, index2].Initialize(2 * index1 + 5);
+ }
+ }
+
+ private void UpdateModel(PpmContext minimumContext)
+ {
+ PpmState state = PpmState.Zero;
+ PpmContext Successor;
+ PpmContext currentContext = maximumContext;
+ uint numberStatistics;
+ uint ns1;
+ uint cf;
+ uint sf;
+ uint s0;
+ uint foundStateFrequency = foundState.Frequency;
+ byte foundStateSymbol = foundState.Symbol;
+ byte symbol;
+ byte flag;
+
+ PpmContext foundStateSuccessor = foundState.Successor;
+ PpmContext context = minimumContext.Suffix;
+
+ if ((foundStateFrequency < MaximumFrequency / 4) && (context != PpmContext.Zero))
+ {
+ if (context.NumberStatistics != 0)
+ {
+ state = context.Statistics;
+ if (state.Symbol != foundStateSymbol)
+ {
+ do
+ {
+ symbol = state[1].Symbol;
+ state++;
+ } while (symbol != foundStateSymbol);
+ if (state[0].Frequency >= state[-1].Frequency)
+ {
+ Swap(state[0], state[-1]);
+ state--;
+ }
+ }
+ cf = (uint)((state.Frequency < MaximumFrequency - 9) ? 2 : 0);
+ state.Frequency += (byte)cf;
+ context.SummaryFrequency += (byte)cf;
+ }
+ else
+ {
+ state = context.FirstState;
+ state.Frequency += (byte)((state.Frequency < 32) ? 1 : 0);
+ }
+ }
+
+ if (orderFall == 0 && foundStateSuccessor != PpmContext.Zero)
+ {
+ foundState.Successor = CreateSuccessors(true, state, minimumContext);
+ if (foundState.Successor == PpmContext.Zero)
+ goto RestartModel;
+ maximumContext = foundState.Successor;
+ return;
+ }
+
+ Allocator.Text[0] = foundStateSymbol;
+ Allocator.Text++;
+ Successor = Allocator.Text;
+
+ if (Allocator.Text >= Allocator.BaseUnit)
+ goto RestartModel;
+
+ if (foundStateSuccessor != PpmContext.Zero)
+ {
+ if (foundStateSuccessor < Allocator.BaseUnit)
+ foundStateSuccessor = CreateSuccessors(false, state, minimumContext);
+ }
+ else
+ {
+ foundStateSuccessor = ReduceOrder(state, minimumContext);
+ }
+
+ if (foundStateSuccessor == PpmContext.Zero)
+ goto RestartModel;
+
+ if (--orderFall == 0)
+ {
+ Successor = foundStateSuccessor;
+ Allocator.Text -= (maximumContext != minimumContext) ? 1 : 0;
+ }
+ else if (method > ModelRestorationMethod.Freeze)
+ {
+ Successor = foundStateSuccessor;
+ Allocator.Text = Allocator.Heap;
+ orderFall = 0;
+ }
+
+ numberStatistics = minimumContext.NumberStatistics;
+ s0 = minimumContext.SummaryFrequency - numberStatistics - foundStateFrequency;
+ flag = (byte)((foundStateSymbol >= 0x40) ? 0x08 : 0x00);
+ for (; currentContext != minimumContext; currentContext = currentContext.Suffix)
+ {
+ ns1 = currentContext.NumberStatistics;
+ if (ns1 != 0)
+ {
+ if ((ns1 & 1) != 0)
+ {
+ state = Allocator.ExpandUnits(currentContext.Statistics, (ns1 + 1) >> 1);
+ if (state == PpmState.Zero)
+ goto RestartModel;
+ currentContext.Statistics = state;
+ }
+ currentContext.SummaryFrequency += (ushort)((3 * ns1 + 1 < numberStatistics) ? 1 : 0);
+ }
+ else
+ {
+ state = Allocator.AllocateUnits(1);
+ if (state == PpmState.Zero)
+ goto RestartModel;
+ Copy(state, currentContext.FirstState);
+ currentContext.Statistics = state;
+ if (state.Frequency < MaximumFrequency / 4 - 1)
+ state.Frequency += state.Frequency;
+ else
+ state.Frequency = (byte)(MaximumFrequency - 4);
+ currentContext.SummaryFrequency = (ushort)(state.Frequency + initialEscape + ((numberStatistics > 2) ? 1 : 0));
+ }
+
+ cf = (uint)(2 * foundStateFrequency * (currentContext.SummaryFrequency + 6));
+ sf = s0 + currentContext.SummaryFrequency;
+
+ if (cf < 6 * sf)
+ {
+ cf = (uint)(1 + ((cf > sf) ? 1 : 0) + ((cf >= 4 * sf) ? 1 : 0));
+ currentContext.SummaryFrequency += 4;
+ }
+ else
+ {
+ cf = (uint)(4 + ((cf > 9 * sf) ? 1 : 0) + ((cf > 12 * sf) ? 1 : 0) + ((cf > 15 * sf) ? 1 : 0));
+ currentContext.SummaryFrequency += (ushort)cf;
+ }
+
+ state = currentContext.Statistics + (++currentContext.NumberStatistics);
+ state.Successor = Successor;
+ state.Symbol = foundStateSymbol;
+ state.Frequency = (byte)cf;
+ currentContext.Flags |= flag;
+ }
+
+ maximumContext = foundStateSuccessor;
+ return;
+
+ RestartModel:
+ RestoreModel(currentContext, minimumContext, foundStateSuccessor);
+ }
+
+ private PpmContext CreateSuccessors(bool skip, PpmState state, PpmContext context)
+ {
+ PpmContext upBranch = foundState.Successor;
+ PpmState[] states = new PpmState[MaximumOrder];
+ uint stateIndex = 0;
+ byte symbol = foundState.Symbol;
+
+ if (!skip)
+ {
+ states[stateIndex++] = foundState;
+ if (context.Suffix == PpmContext.Zero)
+ goto NoLoop;
+ }
+
+ bool gotoLoopEntry = false;
+ if (state != PpmState.Zero)
+ {
+ context = context.Suffix;
+ gotoLoopEntry = true;
+ }
+
+ do
+ {
+ if (gotoLoopEntry)
+ {
+ gotoLoopEntry = false;
+ goto LoopEntry;
+ }
+
+ context = context.Suffix;
+ if (context.NumberStatistics != 0)
+ {
+ byte temporary;
+ state = context.Statistics;
+ if (state.Symbol != symbol)
+ {
+ do
+ {
+ temporary = state[1].Symbol;
+ state++;
+ } while (temporary != symbol);
+ }
+ temporary = (byte)((state.Frequency < MaximumFrequency - 9) ? 1 : 0);
+ state.Frequency += temporary;
+ context.SummaryFrequency += temporary;
+ }
+ else
+ {
+ state = context.FirstState;
+ state.Frequency += (byte)(((context.Suffix.NumberStatistics == 0) ? 1 : 0) & ((state.Frequency < 24) ? 1 : 0));
+ }
+
+ LoopEntry:
+ if (state.Successor != upBranch)
+ {
+ context = state.Successor;
+ break;
+ }
+ states[stateIndex++] = state;
+ } while (context.Suffix != PpmContext.Zero);
+
+ NoLoop:
+ if (stateIndex == 0)
+ return context;
+
+ byte localNumberStatistics = 0;
+ byte localFlags = (byte)((symbol >= 0x40) ? 0x10 : 0x00);
+ symbol = upBranch.NumberStatistics;
+ byte localSymbol = symbol;
+ byte localFrequency;
+ PpmContext localSuccessor = ((Pointer)upBranch) + 1;
+ localFlags |= (byte)((symbol >= 0x40) ? 0x08 : 0x00);
+
+ if (context.NumberStatistics != 0)
+ {
+ state = context.Statistics;
+ if (state.Symbol != symbol)
+ {
+ byte temporary;
+ do
+ {
+ temporary = state[1].Symbol;
+ state++;
+ } while (temporary != symbol);
+ }
+ uint cf = (uint)(state.Frequency - 1);
+ uint s0 = (uint)(context.SummaryFrequency - context.NumberStatistics - cf);
+ localFrequency = (byte)(1 + ((2 * cf <= s0) ? (uint)((5 * cf > s0) ? 1 : 0) : ((cf + 2 * s0 - 3) / s0)));
+ }
+ else
+ {
+ localFrequency = context.FirstStateFrequency;
+ }
+
+ do
+ {
+ PpmContext currentContext = Allocator.AllocateContext();
+ if (currentContext == PpmContext.Zero)
+ return PpmContext.Zero;
+ currentContext.NumberStatistics = localNumberStatistics;
+ currentContext.Flags = localFlags;
+ currentContext.FirstStateSymbol = localSymbol;
+ currentContext.FirstStateFrequency = localFrequency;
+ currentContext.FirstStateSuccessor = localSuccessor;
+ currentContext.Suffix = context;
+ context = currentContext;
+ states[--stateIndex].Successor = context;
+ } while (stateIndex != 0);
+
+ return context;
+ }
+
+ private PpmContext ReduceOrder(PpmState state, PpmContext context)
+ {
+ PpmState currentState;
+ PpmState[] states = new PpmState[MaximumOrder];
+ uint stateIndex = 0;
+ PpmContext currentContext = context;
+ PpmContext UpBranch = Allocator.Text;
+ byte temporary;
+ byte symbol = foundState.Symbol;
+
+ states[stateIndex++] = foundState;
+ foundState.Successor = UpBranch;
+ orderFall++;
+
+ bool gotoLoopEntry = false;
+ if (state != PpmState.Zero)
+ {
+ context = context.Suffix;
+ gotoLoopEntry = true;
+ }
+
+ while (true)
+ {
+ if (gotoLoopEntry)
+ {
+ gotoLoopEntry = false;
+ goto LoopEntry;
+ }
+
+ if (context.Suffix == PpmContext.Zero)
+ {
+ if (method > ModelRestorationMethod.Freeze)
+ {
+ do
+ {
+ states[--stateIndex].Successor = context;
+ } while (stateIndex != 0);
+ Allocator.Text = Allocator.Heap + 1;
+ orderFall = 1;
+ }
+ return context;
+ }
+
+ context = context.Suffix;
+ if (context.NumberStatistics != 0)
+ {
+ state = context.Statistics;
+ if (state.Symbol != symbol)
+ {
+ do
+ {
+ temporary = state[1].Symbol;
+ state++;
+ } while (temporary != symbol);
+ }
+ temporary = (byte)((state.Frequency < MaximumFrequency - 9) ? 2 : 0);
+ state.Frequency += temporary;
+ context.SummaryFrequency += temporary;
+ }
+ else
+ {
+ state = context.FirstState;
+ state.Frequency += (byte)((state.Frequency < 32) ? 1 : 0);
+ }
+
+ LoopEntry:
+ if (state.Successor != PpmContext.Zero)
+ break;
+ states[stateIndex++] = state;
+ state.Successor = UpBranch;
+ orderFall++;
+ }
+
+ if (method > ModelRestorationMethod.Freeze)
+ {
+ context = state.Successor;
+ do
+ {
+ states[--stateIndex].Successor = context;
+ } while (stateIndex != 0);
+ Allocator.Text = Allocator.Heap + 1;
+ orderFall = 1;
+ return context;
+ }
+ else if (state.Successor <= UpBranch)
+ {
+ currentState = foundState;
+ foundState = state;
+ state.Successor = CreateSuccessors(false, PpmState.Zero, context);
+ foundState = currentState;
+ }
+
+ if (orderFall == 1 && currentContext == maximumContext)
+ {
+ foundState.Successor = state.Successor;
+ Allocator.Text--;
+ }
+
+ return state.Successor;
+ }
+
+ private void RestoreModel(PpmContext context, PpmContext minimumContext, PpmContext foundStateSuccessor)
+ {
+ PpmContext currentContext;
+
+ Allocator.Text = Allocator.Heap;
+ for (currentContext = maximumContext; currentContext != context; currentContext = currentContext.Suffix)
+ {
+ if (--currentContext.NumberStatistics == 0)
+ {
+ currentContext.Flags = (byte)((currentContext.Flags & 0x10) + ((currentContext.Statistics.Symbol >= 0x40) ? 0x08 : 0x00));
+ PpmState state = currentContext.Statistics;
+ Copy(currentContext.FirstState, state);
+ Allocator.SpecialFreeUnits(state);
+ currentContext.FirstStateFrequency = (byte)((currentContext.FirstStateFrequency + 11) >> 3);
+ }
+ else
+ {
+ Refresh((uint)((currentContext.NumberStatistics + 3) >> 1), false, currentContext);
+ }
+ }
+
+ for (; currentContext != minimumContext; currentContext = currentContext.Suffix)
+ {
+ if (currentContext.NumberStatistics == 0)
+ currentContext.FirstStateFrequency -= (byte)(currentContext.FirstStateFrequency >> 1);
+ else if ((currentContext.SummaryFrequency += 4) > 128 + 4 * currentContext.NumberStatistics)
+ Refresh((uint)((currentContext.NumberStatistics + 2) >> 1), true, currentContext);
+ }
+
+ if (method > ModelRestorationMethod.Freeze)
+ {
+ maximumContext = foundStateSuccessor;
+ Allocator.GlueCount += (uint)(((Allocator.MemoryNodes[1].Stamp & 1) == 0) ? 1 : 0);
+ }
+ else if (method == ModelRestorationMethod.Freeze)
+ {
+ while (maximumContext.Suffix != PpmContext.Zero)
+ maximumContext = maximumContext.Suffix;
+
+ RemoveBinaryContexts(0, maximumContext);
+ method = (ModelRestorationMethod)(method + 1);
+ Allocator.GlueCount = 0;
+ orderFall = modelOrder;
+ }
+ else if (method == ModelRestorationMethod.Restart || Allocator.GetMemoryUsed() < (Allocator.AllocatorSize >> 1))
+ {
+ StartModel(modelOrder, method);
+ escapeCount = 0;
+ }
+ else
+ {
+ while (maximumContext.Suffix != PpmContext.Zero)
+ maximumContext = maximumContext.Suffix;
+
+ do
+ {
+ CutOff(0, maximumContext);
+ Allocator.ExpandText();
+ } while (Allocator.GetMemoryUsed() > 3 * (Allocator.AllocatorSize >> 2));
+
+ Allocator.GlueCount = 0;
+ orderFall = modelOrder;
+ }
+ }
+
+ private static void Swap(PpmState state1, PpmState state2)
+ {
+ byte swapSymbol = state1.Symbol;
+ byte swapFrequency = state1.Frequency;
+ PpmContext swapSuccessor = state1.Successor;
+
+ state1.Symbol = state2.Symbol;
+ state1.Frequency = state2.Frequency;
+ state1.Successor = state2.Successor;
+
+ state2.Symbol = swapSymbol;
+ state2.Frequency = swapFrequency;
+ state2.Successor = swapSuccessor;
+ }
+
+ private static void Copy(PpmState state1, PpmState state2)
+ {
+ state1.Symbol = state2.Symbol;
+ state1.Frequency = state2.Frequency;
+ state1.Successor = state2.Successor;
+ }
+
+ private static int Mean(int sum, int shift, int round)
+ {
+ return (sum + (1 << (shift - round))) >> shift;
+ }
+
+ private void ClearMask()
+ {
+ escapeCount = 1;
+ Array.Clear(characterMask, 0, characterMask.Length);
+ }
+
+ #endregion
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/ModelRestorationMethod.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/ModelRestorationMethod.cs
new file mode 100644
index 00000000..e7a7cdef
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/ModelRestorationMethod.cs
@@ -0,0 +1,29 @@
+#region Using
+
+
+
+#endregion
+
+namespace Compress.SevenZip.Compress.PPmd.I1
+{
+ ///
+ /// The method used to adjust the model when the memory limit is reached.
+ ///
+ internal enum ModelRestorationMethod
+ {
+ ///
+ /// Restart the model from scratch (this is the default).
+ ///
+ Restart = 0,
+
+ ///
+ /// Cut off the model (nearly twice as slow).
+ ///
+ CutOff = 1,
+
+ ///
+ /// Freeze the context tree (in some cases may result in poor compression).
+ ///
+ Freeze = 2
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Pointer.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Pointer.cs
new file mode 100644
index 00000000..2df401a0
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/Pointer.cs
@@ -0,0 +1,319 @@
+#region Using
+
+using System;
+
+#endregion
+
+namespace Compress.SevenZip.Compress.PPmd.I1
+{
+ ///
+ /// A structure containing a single address representing a position in the array. This
+ /// is intended to mimic the behaviour of a pointer in C/C++.
+ ///
+ ///
+ ///
+ /// This must be a structure rather than a class because several places in the associated code assume that
+ /// is a value type (meaning that assignment creates a completely new copy of the
+ /// instance rather than just copying a reference to the same instance).
+ ///
+ ///
+ /// Note that is a field rather than a property for performance reasons.
+ ///
+ ///
+ internal struct Pointer
+ {
+ public uint Address;
+ public byte[] Memory;
+ public static readonly Pointer Zero = new Pointer(0, null);
+ public const int Size = 1;
+
+ ///
+ /// Initializes a new instance of the structure.
+ ///
+ public Pointer(uint address, byte[] memory)
+ {
+ Address = address;
+ Memory = memory;
+ }
+
+ ///
+ /// Gets or sets the byte at the given .
+ ///
+ ///
+ ///
+ public byte this[int offset]
+ {
+ get
+ {
+ #if DEBUG
+ if (Address == 0)
+ throw new InvalidOperationException("The pointer being indexed is a null pointer.");
+ #endif
+ return Memory[Address + offset];
+ }
+ set
+ {
+ #if DEBUG
+ if (Address == 0)
+ throw new InvalidOperationException("The pointer being indexed is a null pointer.");
+ #endif
+ Memory[Address + offset] = value;
+ }
+ }
+
+ ///
+ /// Allow a to be implicitly converted to a .
+ ///
+ ///
+ ///
+ public static implicit operator Pointer(MemoryNode memoryNode)
+ {
+ return new Pointer(memoryNode.Address, memoryNode.Memory);
+ }
+
+ ///
+ /// Allow a to be implicitly converted to a .
+ ///
+ ///
+ ///
+ public static implicit operator Pointer(Model.PpmContext context)
+ {
+ return new Pointer(context.Address, context.Memory);
+ }
+
+ ///
+ /// Allow a to be implicitly converted to a .
+ ///
+ ///
+ ///
+ public static implicit operator Pointer(PpmState state)
+ {
+ return new Pointer(state.Address, state.Memory);
+ }
+
+ ///
+ /// Increase the address of a pointer by the given number of bytes.
+ ///
+ ///
+ ///
+ ///
+ public static Pointer operator +(Pointer pointer, int offset)
+ {
+ #if DEBUG
+ if (pointer.Address == 0)
+ throw new InvalidOperationException("The pointer is a null pointer.");
+ #endif
+ pointer.Address = (uint) (pointer.Address + offset);
+ return pointer;
+ }
+
+ ///
+ /// Increase the address of a pointer by the given number of bytes.
+ ///
+ ///
+ ///
+ ///
+ public static Pointer operator +(Pointer pointer, uint offset)
+ {
+ #if DEBUG
+ if (pointer.Address == 0)
+ throw new InvalidOperationException("The pointer is a null pointer.");
+ #endif
+ pointer.Address += offset;
+ return pointer;
+ }
+
+ ///
+ /// Increment the address of a pointer.
+ ///
+ ///
+ ///
+ public static Pointer operator ++(Pointer pointer)
+ {
+ #if DEBUG
+ if (pointer.Address == 0)
+ throw new InvalidOperationException("The pointer being incremented is a null pointer.");
+ #endif
+ pointer.Address++;
+ return pointer;
+ }
+
+ ///
+ /// Decrease the address of a pointer by the given number of bytes.
+ ///
+ ///
+ ///
+ ///
+ public static Pointer operator -(Pointer pointer, int offset)
+ {
+ #if DEBUG
+ if (pointer.Address == 0)
+ throw new InvalidOperationException("The pointer is a null pointer.");
+ #endif
+ pointer.Address = (uint) (pointer.Address - offset);
+ return pointer;
+ }
+
+ ///
+ /// Decrease the address of a pointer by the given number of bytes.
+ ///
+ ///
+ ///
+ ///
+ public static Pointer operator -(Pointer pointer, uint offset)
+ {
+ #if DEBUG
+ if (pointer.Address == 0)
+ throw new InvalidOperationException("The pointer is a null pointer.");
+ #endif
+ pointer.Address -= offset;
+ return pointer;
+ }
+
+ ///
+ /// Decrement the address of a pointer.
+ ///
+ ///
+ ///
+ public static Pointer operator --(Pointer pointer)
+ {
+ #if DEBUG
+ if (pointer.Address == 0)
+ throw new InvalidOperationException("The pointer being decremented is a null pointer.");
+ #endif
+ pointer.Address--;
+ return pointer;
+ }
+
+ ///
+ /// Subtract two pointers.
+ ///
+ ///
+ ///
+ /// The number of bytes between the two pointers.
+ public static uint operator -(Pointer pointer1, Pointer pointer2)
+ {
+ #if DEBUG
+ if (pointer1.Address == 0)
+ throw new InvalidOperationException("The pointer to the left of the subtraction operator is a null pointer.");
+ if (pointer2.Address == 0)
+ throw new InvalidOperationException("The pointer to the right of the subtraction operator is a null pointer.");
+ #endif
+ return pointer1.Address - pointer2.Address;
+ }
+
+ ///
+ /// Compare pointers.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator <(Pointer pointer1, Pointer pointer2)
+ {
+ #if DEBUG
+ if (pointer1.Address == 0)
+ throw new InvalidOperationException("The pointer to the left of the less than operator is a null pointer.");
+ if (pointer2.Address == 0)
+ throw new InvalidOperationException("The pointer to the right of the less than operator is a null pointer.");
+ #endif
+ return pointer1.Address < pointer2.Address;
+ }
+
+ ///
+ /// Compare two pointers.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator <=(Pointer pointer1, Pointer pointer2)
+ {
+ #if DEBUG
+ if (pointer1.Address == 0)
+ throw new InvalidOperationException("The pointer to the left of the less than or equal to operator is a null pointer.");
+ if (pointer2.Address == 0)
+ throw new InvalidOperationException("The pointer to the right of the less than or equal to operator is a null pointer.");
+ #endif
+ return pointer1.Address <= pointer2.Address;
+ }
+
+ ///
+ /// Compare two pointers.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator >(Pointer pointer1, Pointer pointer2)
+ {
+ #if DEBUG
+ if (pointer1.Address == 0)
+ throw new InvalidOperationException("The pointer to the left of the greater than operator is a null pointer.");
+ if (pointer2.Address == 0)
+ throw new InvalidOperationException("The pointer to the right of the greater than operator is a null pointer.");
+ #endif
+ return pointer1.Address > pointer2.Address;
+ }
+
+ ///
+ /// Compare two pointers.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator >=(Pointer pointer1, Pointer pointer2)
+ {
+ #if DEBUG
+ if (pointer1.Address == 0)
+ throw new InvalidOperationException("The pointer to the left of the greater than or equal to operator is a null pointer.");
+ if (pointer2.Address == 0)
+ throw new InvalidOperationException("The pointer to the right of the greater than or equal to operator is a null pointer.");
+ #endif
+ return pointer1.Address >= pointer2.Address;
+ }
+
+ ///
+ /// Compare two pointers.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator ==(Pointer pointer1, Pointer pointer2)
+ {
+ return pointer1.Address == pointer2.Address;
+ }
+
+ ///
+ /// Compare two pointers.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator !=(Pointer pointer1, Pointer pointer2)
+ {
+ return pointer1.Address != pointer2.Address;
+ }
+
+ ///
+ /// Indicates whether this instance and a specified object are equal.
+ ///
+ /// true if obj and this instance are the same type and represent the same value; otherwise, false.
+ /// Another object to compare to.
+ public override bool Equals(object obj)
+ {
+ if (obj is Pointer)
+ {
+ Pointer pointer = (Pointer) obj;
+ return pointer.Address == Address;
+ }
+ return base.Equals(obj);
+ }
+
+ ///
+ /// Returns the hash code for this instance.
+ ///
+ /// A 32-bit signed integer that is the hash code for this instance.
+ public override int GetHashCode()
+ {
+ return Address.GetHashCode();
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/PpmContext.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/PpmContext.cs
new file mode 100644
index 00000000..81aad5e1
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/PpmContext.cs
@@ -0,0 +1,787 @@
+#region Using
+
+
+
+#endregion
+
+namespace Compress.SevenZip.Compress.PPmd.I1
+{
+ ///
+ /// The PPM context structure. This is tightly coupled with .
+ ///
+ ///
+ ///
+ /// This must be a structure rather than a class because several places in the associated code assume that
+ /// is a value type (meaning that assignment creates a completely new copy of
+ /// the instance rather than just copying a reference to the same instance).
+ ///
+ ///
+ internal partial class Model
+ {
+ ///
+ /// The structure which represents the current PPM context. This is 12 bytes in size.
+ ///
+ internal struct PpmContext
+ {
+ public uint Address;
+ public byte[] Memory;
+ public static readonly PpmContext Zero = new PpmContext(0, null);
+ public const int Size = 12;
+
+ ///
+ /// Initializes a new instance of the structure.
+ ///
+ public PpmContext(uint address, byte[] memory)
+ {
+ Address = address;
+ Memory = memory;
+ }
+
+ ///
+ /// Gets or sets the number statistics.
+ ///
+ public byte NumberStatistics
+ {
+ get { return Memory[Address]; }
+ set { Memory[Address] = value; }
+ }
+
+ ///
+ /// Gets or sets the flags.
+ ///
+ public byte Flags
+ {
+ get { return Memory[Address + 1]; }
+ set { Memory[Address + 1] = value; }
+ }
+
+ ///
+ /// Gets or sets the summary frequency.
+ ///
+ public ushort SummaryFrequency
+ {
+ get { return (ushort)(((ushort)Memory[Address + 2]) | ((ushort)Memory[Address + 3]) << 8); }
+ set
+ {
+ Memory[Address + 2] = (byte)value;
+ Memory[Address + 3] = (byte)(value >> 8);
+ }
+ }
+
+ ///
+ /// Gets or sets the statistics.
+ ///
+ public PpmState Statistics
+ {
+ get { return new PpmState(((uint)Memory[Address + 4]) | ((uint)Memory[Address + 5]) << 8 | ((uint)Memory[Address + 6]) << 16 | ((uint)Memory[Address + 7]) << 24, Memory); }
+ set
+ {
+ Memory[Address + 4] = (byte)value.Address;
+ Memory[Address + 5] = (byte)(value.Address >> 8);
+ Memory[Address + 6] = (byte)(value.Address >> 16);
+ Memory[Address + 7] = (byte)(value.Address >> 24);
+ }
+ }
+
+ ///
+ /// Gets or sets the suffix.
+ ///
+ public PpmContext Suffix
+ {
+ get { return new PpmContext(((uint)Memory[Address + 8]) | ((uint)Memory[Address + 9]) << 8 | ((uint)Memory[Address + 10]) << 16 | ((uint)Memory[Address + 11]) << 24, Memory); }
+ set
+ {
+ Memory[Address + 8] = (byte)value.Address;
+ Memory[Address + 9] = (byte)(value.Address >> 8);
+ Memory[Address + 10] = (byte)(value.Address >> 16);
+ Memory[Address + 11] = (byte)(value.Address >> 24);
+ }
+ }
+
+ ///
+ /// The first PPM state associated with the PPM context.
+ ///
+ ///
+ ///
+ /// The first PPM state overlaps this PPM context instance (the context.SummaryFrequency and context.Statistics members
+ /// of PpmContext use 6 bytes and so can therefore fit into the space used by the Symbol, Frequency and
+ /// Successor members of PpmState, since they also add up to 6 bytes).
+ ///
+ ///
+ /// PpmContext (context.SummaryFrequency and context.Statistics use 6 bytes)
+ /// 1 context.NumberStatistics
+ /// 1 context.Flags
+ /// 2 context.SummaryFrequency
+ /// 4 context.Statistics (pointer to PpmState)
+ /// 4 context.Suffix (pointer to PpmContext)
+ ///
+ ///
+ /// PpmState (total of 6 bytes)
+ /// 1 Symbol
+ /// 1 Frequency
+ /// 4 Successor (pointer to PpmContext)
+ ///
+ ///
+ ///
+ public PpmState FirstState
+ {
+ get { return new PpmState(Address + 2, Memory); }
+ }
+
+ ///
+ /// Gets or sets the symbol of the first PPM state. This is provided for convenience. The same
+ /// information can be obtained using the Symbol property on the PPM state provided by the
+ /// property.
+ ///
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode", Justification = "The property getter is provided for completeness.")]
+ public byte FirstStateSymbol
+ {
+ get { return Memory[Address + 2]; }
+ set { Memory[Address + 2] = value; }
+ }
+
+ ///
+ /// Gets or sets the frequency of the first PPM state. This is provided for convenience. The same
+ /// information can be obtained using the Frequency property on the PPM state provided by the
+ ///context.FirstState property.
+ ///
+ public byte FirstStateFrequency
+ {
+ get { return Memory[Address + 3]; }
+ set { Memory[Address + 3] = value; }
+ }
+
+ ///
+ /// Gets or sets the successor of the first PPM state. This is provided for convenience. The same
+ /// information can be obtained using the Successor property on the PPM state provided by the
+ ///
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode", Justification = "The property getter is provided for completeness.")]
+ public PpmContext FirstStateSuccessor
+ {
+ get { return new PpmContext(((uint)Memory[Address + 4]) | ((uint)Memory[Address + 5]) << 8 | ((uint)Memory[Address + 6]) << 16 | ((uint)Memory[Address + 7]) << 24, Memory); }
+ set
+ {
+ Memory[Address + 4] = (byte)value.Address;
+ Memory[Address + 5] = (byte)(value.Address >> 8);
+ Memory[Address + 6] = (byte)(value.Address >> 16);
+ Memory[Address + 7] = (byte)(value.Address >> 24);
+ }
+ }
+
+ ///
+ /// Allow a pointer to be implicitly converted to a PPM context.
+ ///
+ ///
+ ///
+ public static implicit operator PpmContext(Pointer pointer)
+ {
+ return new PpmContext(pointer.Address, pointer.Memory);
+ }
+
+ ///
+ /// Allow pointer-like addition on a PPM context.
+ ///
+ ///
+ ///
+ ///
+ public static PpmContext operator +(PpmContext context, int offset)
+ {
+ context.Address = (uint)(context.Address + offset * Size);
+ return context;
+ }
+
+ ///
+ /// Allow pointer-like subtraction on a PPM context.
+ ///
+ ///
+ ///
+ ///
+ public static PpmContext operator -(PpmContext context, int offset)
+ {
+ context.Address = (uint)(context.Address - offset * Size);
+ return context;
+ }
+
+ ///
+ /// Compare two PPM contexts.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator <=(PpmContext context1, PpmContext context2)
+ {
+ return context1.Address <= context2.Address;
+ }
+
+ ///
+ /// Compare two PPM contexts.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator >=(PpmContext context1, PpmContext context2)
+ {
+ return context1.Address >= context2.Address;
+ }
+
+ ///
+ /// Compare two PPM contexts.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator ==(PpmContext context1, PpmContext context2)
+ {
+ return context1.Address == context2.Address;
+ }
+
+ ///
+ /// Compare two PPM contexts.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator !=(PpmContext context1, PpmContext context2)
+ {
+ return context1.Address != context2.Address;
+ }
+
+ ///
+ /// Indicates whether this instance and a specified object are equal.
+ ///
+ /// true if obj and this instance are the same type and represent the same value; otherwise, false.
+ /// Another object to compare to.
+ public override bool Equals(object obj)
+ {
+ if (obj is PpmContext)
+ {
+ PpmContext context = (PpmContext)obj;
+ return context.Address == Address;
+ }
+ return base.Equals(obj);
+ }
+
+ ///
+ /// Returns the hash code for this instance.
+ ///
+ /// A 32-bit signed integer that is the hash code for this instance.
+ public override int GetHashCode()
+ {
+ return Address.GetHashCode();
+ }
+ }
+
+ private void EncodeBinarySymbol(int symbol, PpmContext context)
+ {
+ PpmState state = context.FirstState;
+ int index1 = probabilities[state.Frequency - 1];
+ int index2 = numberStatisticsToBinarySummaryIndex[context.Suffix.NumberStatistics] + previousSuccess + context.Flags + ((runLength >> 26) & 0x20);
+
+ if (state.Symbol == symbol)
+ {
+ foundState = state;
+ state.Frequency += (byte)((state.Frequency < 196) ? 1 : 0);
+ Coder.LowCount = 0;
+ Coder.HighCount = binarySummary[index1, index2];
+ binarySummary[index1, index2] += (ushort)(Interval - Mean(binarySummary[index1, index2], PeriodBitCount, 2));
+ previousSuccess = 1;
+ runLength++;
+ }
+ else
+ {
+ Coder.LowCount = binarySummary[index1, index2];
+ binarySummary[index1, index2] -= (ushort)Mean(binarySummary[index1, index2], PeriodBitCount, 2);
+ Coder.HighCount = BinaryScale;
+ initialEscape = ExponentialEscapes[binarySummary[index1, index2] >> 10];
+ characterMask[state.Symbol] = escapeCount;
+ previousSuccess = 0;
+ numberMasked = 0;
+ foundState = PpmState.Zero;
+ }
+ }
+
+ private void EncodeSymbol1(int symbol, PpmContext context)
+ {
+ uint lowCount;
+ uint index = context.Statistics.Symbol;
+ PpmState state = context.Statistics;
+ Coder.Scale = context.SummaryFrequency;
+ if (index == symbol)
+ {
+ Coder.HighCount = state.Frequency;
+ previousSuccess = (byte)((2 * Coder.HighCount >= Coder.Scale) ? 1 : 0);
+ foundState = state;
+ foundState.Frequency += 4;
+ context.SummaryFrequency += 4;
+ runLength += previousSuccess;
+ if (state.Frequency > MaximumFrequency)
+ Rescale(context);
+ Coder.LowCount = 0;
+ return;
+ }
+
+ lowCount = state.Frequency;
+ index = context.NumberStatistics;
+ previousSuccess = 0;
+ while ((++state).Symbol != symbol)
+ {
+ lowCount += state.Frequency;
+ if (--index == 0)
+ {
+ Coder.LowCount = lowCount;
+ characterMask[state.Symbol] = escapeCount;
+ numberMasked = context.NumberStatistics;
+ index = context.NumberStatistics;
+ foundState = PpmState.Zero;
+ do
+ {
+ characterMask[(--state).Symbol] = escapeCount;
+ } while (--index != 0);
+ Coder.HighCount = Coder.Scale;
+ return;
+ }
+ }
+ Coder.HighCount = (Coder.LowCount = lowCount) + state.Frequency;
+ Update1(state, context);
+ }
+
+ private void EncodeSymbol2(int symbol, PpmContext context)
+ {
+ See2Context see2Context = MakeEscapeFrequency(context);
+ uint currentSymbol;
+ uint lowCount = 0;
+ uint index = (uint)(context.NumberStatistics - numberMasked);
+ PpmState state = context.Statistics - 1;
+
+ do
+ {
+ do
+ {
+ currentSymbol = state[1].Symbol;
+ state++;
+ } while (characterMask[currentSymbol] == escapeCount);
+ characterMask[currentSymbol] = escapeCount;
+ if (currentSymbol == symbol)
+ goto SymbolFound;
+ lowCount += state.Frequency;
+ } while (--index != 0);
+
+ Coder.LowCount = lowCount;
+ Coder.Scale += Coder.LowCount;
+ Coder.HighCount = Coder.Scale;
+ see2Context.Summary += (ushort)Coder.Scale;
+ numberMasked = context.NumberStatistics;
+ return;
+
+ SymbolFound:
+ Coder.LowCount = lowCount;
+ lowCount += state.Frequency;
+ Coder.HighCount = lowCount;
+ for (PpmState p1 = state; --index != 0; )
+ {
+ do
+ {
+ currentSymbol = p1[1].Symbol;
+ p1++;
+ } while (characterMask[currentSymbol] == escapeCount);
+ lowCount += p1.Frequency;
+ }
+ Coder.Scale += lowCount;
+ see2Context.Update();
+ Update2(state, context);
+ }
+
+ private void DecodeBinarySymbol(PpmContext context)
+ {
+ PpmState state = context.FirstState;
+ int index1 = probabilities[state.Frequency - 1];
+ int index2 = numberStatisticsToBinarySummaryIndex[context.Suffix.NumberStatistics] + previousSuccess + context.Flags + ((runLength >> 26) & 0x20);
+
+ if (Coder.RangeGetCurrentShiftCount(TotalBitCount) < binarySummary[index1, index2])
+ {
+ foundState = state;
+ state.Frequency += (byte)((state.Frequency < 196) ? 1 : 0);
+ Coder.LowCount = 0;
+ Coder.HighCount = binarySummary[index1, index2];
+ binarySummary[index1, index2] += (ushort)(Interval - Mean(binarySummary[index1, index2], PeriodBitCount, 2));
+ previousSuccess = 1;
+ runLength++;
+ }
+ else
+ {
+ Coder.LowCount = binarySummary[index1, index2];
+ binarySummary[index1, index2] -= (ushort)Mean(binarySummary[index1, index2], PeriodBitCount, 2);
+ Coder.HighCount = BinaryScale;
+ initialEscape = ExponentialEscapes[binarySummary[index1, index2] >> 10];
+ characterMask[state.Symbol] = escapeCount;
+ previousSuccess = 0;
+ numberMasked = 0;
+ foundState = PpmState.Zero;
+ }
+ }
+
+ private void DecodeSymbol1(PpmContext context)
+ {
+ uint index;
+ uint count;
+ uint highCount = context.Statistics.Frequency;
+ PpmState state = context.Statistics;
+ Coder.Scale = context.SummaryFrequency;
+
+ count = Coder.RangeGetCurrentCount();
+ if (count < highCount)
+ {
+ Coder.HighCount = highCount;
+ previousSuccess = (byte)((2 * Coder.HighCount >= Coder.Scale) ? 1 : 0);
+ foundState = state;
+ highCount += 4;
+ foundState.Frequency = (byte)highCount;
+ context.SummaryFrequency += 4;
+ runLength += previousSuccess;
+ if (highCount > MaximumFrequency)
+ Rescale(context);
+ Coder.LowCount = 0;
+ return;
+ }
+
+ index = context.NumberStatistics;
+ previousSuccess = 0;
+ while ((highCount += (++state).Frequency) <= count)
+ {
+ if (--index == 0)
+ {
+ Coder.LowCount = highCount;
+ characterMask[state.Symbol] = escapeCount;
+ numberMasked = context.NumberStatistics;
+ index = context.NumberStatistics;
+ foundState = PpmState.Zero;
+ do
+ {
+ characterMask[(--state).Symbol] = escapeCount;
+ } while (--index != 0);
+ Coder.HighCount = Coder.Scale;
+ return;
+ }
+ }
+ Coder.HighCount = highCount;
+ Coder.LowCount = Coder.HighCount - state.Frequency;
+ Update1(state, context);
+ }
+
+ private void DecodeSymbol2(PpmContext context)
+ {
+ See2Context see2Context = MakeEscapeFrequency(context);
+ uint currentSymbol;
+ uint count;
+ uint highCount = 0;
+ uint index = (uint)(context.NumberStatistics - numberMasked);
+ uint stateIndex = 0;
+ PpmState state = context.Statistics - 1;
+
+ do
+ {
+ do
+ {
+ currentSymbol = state[1].Symbol;
+ state++;
+ } while (characterMask[currentSymbol] == escapeCount);
+ highCount += state.Frequency;
+ decodeStates[stateIndex++] = state; // note that decodeStates is a static array that is re-used on each call to this method (for performance reasons)
+ } while (--index != 0);
+
+ Coder.Scale += highCount;
+ count = Coder.RangeGetCurrentCount();
+ stateIndex = 0;
+ state = decodeStates[stateIndex];
+ if (count < highCount)
+ {
+ highCount = 0;
+ while ((highCount += state.Frequency) <= count)
+ state = decodeStates[++stateIndex];
+ Coder.HighCount = highCount;
+ Coder.LowCount = Coder.HighCount - state.Frequency;
+ see2Context.Update();
+ Update2(state, context);
+ }
+ else
+ {
+ Coder.LowCount = highCount;
+ Coder.HighCount = Coder.Scale;
+ index = (uint)(context.NumberStatistics - numberMasked);
+ numberMasked = context.NumberStatistics;
+ do
+ {
+ characterMask[decodeStates[stateIndex].Symbol] = escapeCount;
+ stateIndex++;
+ } while (--index != 0);
+ see2Context.Summary += (ushort)Coder.Scale;
+ }
+ }
+
+ private void Update1(PpmState state, PpmContext context)
+ {
+ foundState = state;
+ foundState.Frequency += 4;
+ context.SummaryFrequency += 4;
+ if (state[0].Frequency > state[-1].Frequency)
+ {
+ Swap(state[0], state[-1]);
+ foundState = --state;
+ if (state.Frequency > MaximumFrequency)
+ Rescale(context);
+ }
+ }
+
+ private void Update2(PpmState state, PpmContext context)
+ {
+ foundState = state;
+ foundState.Frequency += 4;
+ context.SummaryFrequency += 4;
+ if (state.Frequency > MaximumFrequency)
+ Rescale(context);
+ escapeCount++;
+ runLength = initialRunLength;
+ }
+
+ private See2Context MakeEscapeFrequency(PpmContext context)
+ {
+ uint numberStatistics = (uint)2 * context.NumberStatistics;
+ See2Context see2Context;
+
+ if (context.NumberStatistics != 0xff)
+ {
+ // Note that context.Flags is always in the range 0 .. 28 (this ensures that the index used for the second
+ // dimension of the see2Contexts array is always in the range 0 .. 31).
+
+ numberStatistics = context.Suffix.NumberStatistics;
+ int index1 = probabilities[context.NumberStatistics + 2] - 3;
+ int index2 = ((context.SummaryFrequency > 11 * (context.NumberStatistics + 1)) ? 1 : 0) + ((2 * context.NumberStatistics < numberStatistics + numberMasked) ? 2 : 0) + context.Flags;
+ see2Context = see2Contexts[index1, index2];
+ Coder.Scale = see2Context.Mean();
+ }
+ else
+ {
+ see2Context = emptySee2Context;
+ Coder.Scale = 1;
+ }
+
+ return see2Context;
+ }
+
+ private void Rescale(PpmContext context)
+ {
+ uint oldUnitCount;
+ int adder;
+ uint escapeFrequency;
+ uint index = context.NumberStatistics;
+
+ byte localSymbol;
+ byte localFrequency;
+ PpmContext localSuccessor;
+ PpmState p1;
+ PpmState state;
+
+ for (state = foundState; state != context.Statistics; state--)
+ Swap(state[0], state[-1]);
+
+ state.Frequency += 4;
+ context.SummaryFrequency += 4;
+ escapeFrequency = (uint)(context.SummaryFrequency - state.Frequency);
+ adder = (orderFall != 0 || method > ModelRestorationMethod.Freeze) ? 1 : 0;
+ state.Frequency = (byte)((state.Frequency + adder) >> 1);
+ context.SummaryFrequency = state.Frequency;
+
+ do
+ {
+ escapeFrequency -= (++state).Frequency;
+ state.Frequency = (byte)((state.Frequency + adder) >> 1);
+ context.SummaryFrequency += state.Frequency;
+ if (state[0].Frequency > state[-1].Frequency)
+ {
+ p1 = state;
+ localSymbol = p1.Symbol;
+ localFrequency = p1.Frequency;
+ localSuccessor = p1.Successor;
+ do
+ {
+ Copy(p1[0], p1[-1]);
+ } while (localFrequency > (--p1)[-1].Frequency);
+ p1.Symbol = localSymbol;
+ p1.Frequency = localFrequency;
+ p1.Successor = localSuccessor;
+ }
+ } while (--index != 0);
+
+ if (state.Frequency == 0)
+ {
+ do
+ {
+ index++;
+ } while ((--state).Frequency == 0);
+
+ escapeFrequency += index;
+ oldUnitCount = (uint)((context.NumberStatistics + 2) >> 1);
+ context.NumberStatistics -= (byte)index;
+ if (context.NumberStatistics == 0)
+ {
+ localSymbol = context.Statistics.Symbol;
+ localFrequency = context.Statistics.Frequency;
+ localSuccessor = context.Statistics.Successor;
+ localFrequency = (byte)((2 * localFrequency + escapeFrequency - 1) / escapeFrequency);
+ if (localFrequency > MaximumFrequency / 3)
+ localFrequency = (byte)(MaximumFrequency / 3);
+ Allocator.FreeUnits(context.Statistics, oldUnitCount);
+ context.FirstStateSymbol = localSymbol;
+ context.FirstStateFrequency = localFrequency;
+ context.FirstStateSuccessor = localSuccessor;
+ context.Flags = (byte)((context.Flags & 0x10) + ((localSymbol >= 0x40) ? 0x08 : 0x00));
+ foundState = context.FirstState;
+ return;
+ }
+
+ context.Statistics = Allocator.ShrinkUnits(context.Statistics, oldUnitCount, (uint)((context.NumberStatistics + 2) >> 1));
+ context.Flags &= 0xf7;
+ index = context.NumberStatistics;
+ state = context.Statistics;
+ context.Flags |= (byte)((state.Symbol >= 0x40) ? 0x08 : 0x00);
+ do
+ {
+ context.Flags |= (byte)(((++state).Symbol >= 0x40) ? 0x08 : 0x00);
+ } while (--index != 0);
+ }
+
+ escapeFrequency -= (escapeFrequency >> 1);
+ context.SummaryFrequency += (ushort)escapeFrequency;
+ context.Flags |= 0x04;
+ foundState = context.Statistics;
+ }
+
+ private void Refresh(uint oldUnitCount, bool scale, PpmContext context)
+ {
+ int index = context.NumberStatistics;
+ int escapeFrequency;
+ int scaleValue = (scale ? 1 : 0);
+
+ context.Statistics = Allocator.ShrinkUnits(context.Statistics, oldUnitCount, (uint)((index + 2) >> 1));
+ PpmState statistics = context.Statistics;
+ context.Flags = (byte)((context.Flags & (0x10 + (scale ? 0x04 : 0x00))) + ((statistics.Symbol >= 0x40) ? 0x08 : 0x00));
+ escapeFrequency = context.SummaryFrequency - statistics.Frequency;
+ statistics.Frequency = (byte)((statistics.Frequency + scaleValue) >> scaleValue);
+ context.SummaryFrequency = statistics.Frequency;
+
+ do
+ {
+ escapeFrequency -= (++statistics).Frequency;
+ statistics.Frequency = (byte)((statistics.Frequency + scaleValue) >> scaleValue);
+ context.SummaryFrequency += statistics.Frequency;
+ context.Flags |= (byte)((statistics.Symbol >= 0x40) ? 0x08 : 0x00);
+ } while (--index != 0);
+
+ escapeFrequency = (escapeFrequency + scaleValue) >> scaleValue;
+ context.SummaryFrequency += (ushort)escapeFrequency;
+ }
+
+ private PpmContext CutOff(int order, PpmContext context)
+ {
+ int index;
+ PpmState state;
+
+ if (context.NumberStatistics == 0)
+ {
+ state = context.FirstState;
+ if ((Pointer)state.Successor >= Allocator.BaseUnit)
+ {
+ if (order < modelOrder)
+ state.Successor = CutOff(order + 1, state.Successor);
+ else
+ state.Successor = PpmContext.Zero;
+
+ if (state.Successor == PpmContext.Zero && order > OrderBound)
+ {
+ Allocator.SpecialFreeUnits(context);
+ return PpmContext.Zero;
+ }
+
+ return context;
+ }
+ else
+ {
+ Allocator.SpecialFreeUnits(context);
+ return PpmContext.Zero;
+ }
+ }
+
+ uint unitCount = (uint)((context.NumberStatistics + 2) >> 1);
+ context.Statistics = Allocator.MoveUnitsUp(context.Statistics, unitCount);
+ index = context.NumberStatistics;
+ for (state = context.Statistics + index; state >= context.Statistics; state--)
+ {
+ if (state.Successor < Allocator.BaseUnit)
+ {
+ state.Successor = PpmContext.Zero;
+ Swap(state, context.Statistics[index--]);
+ }
+ else if (order < modelOrder)
+ state.Successor = CutOff(order + 1, state.Successor);
+ else
+ state.Successor = PpmContext.Zero;
+ }
+
+ if (index != context.NumberStatistics && order != 0)
+ {
+ context.NumberStatistics = (byte)index;
+ state = context.Statistics;
+ if (index < 0)
+ {
+ Allocator.FreeUnits(state, unitCount);
+ Allocator.SpecialFreeUnits(context);
+ return PpmContext.Zero;
+ }
+ else if (index == 0)
+ {
+ context.Flags = (byte)((context.Flags & 0x10) + ((state.Symbol >= 0x40) ? 0x08 : 0x00));
+ Copy(context.FirstState, state);
+ Allocator.FreeUnits(state, unitCount);
+ context.FirstStateFrequency = (byte)((context.FirstStateFrequency + 11) >> 3);
+ }
+ else
+ {
+ Refresh(unitCount, context.SummaryFrequency > 16 * index, context);
+ }
+ }
+
+ return context;
+ }
+
+ private PpmContext RemoveBinaryContexts(int order, PpmContext context)
+ {
+ if (context.NumberStatistics == 0)
+ {
+ PpmState state = context.FirstState;
+ if ((Pointer)state.Successor >= Allocator.BaseUnit && order < modelOrder)
+ state.Successor = RemoveBinaryContexts(order + 1, state.Successor);
+ else
+ state.Successor = PpmContext.Zero;
+ if ((state.Successor == PpmContext.Zero) && (context.Suffix.NumberStatistics == 0 || context.Suffix.Flags == 0xff))
+ {
+ Allocator.FreeUnits(context, 1);
+ return PpmContext.Zero;
+ }
+ else
+ {
+ return context;
+ }
+ }
+
+ for (PpmState state = context.Statistics + context.NumberStatistics; state >= context.Statistics; state--)
+ {
+ if ((Pointer)state.Successor >= Allocator.BaseUnit && order < modelOrder)
+ state.Successor = RemoveBinaryContexts(order + 1, state.Successor);
+ else
+ state.Successor = PpmContext.Zero;
+ }
+
+ return context;
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/PpmState.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/PpmState.cs
new file mode 100644
index 00000000..79b509f6
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/PpmState.cs
@@ -0,0 +1,206 @@
+#region Using
+
+
+
+#endregion
+
+namespace Compress.SevenZip.Compress.PPmd.I1
+{
+ ///
+ /// PPM state.
+ ///
+ ///
+ ///
+ /// This must be a structure rather than a class because several places in the associated code assume that
+ /// is a value type (meaning that assignment creates a completely new copy of the
+ /// instance rather than just copying a reference to the same instance).
+ ///
+ ///
+ /// Note that is a field rather than a property for performance reasons.
+ ///
+ ///
+ internal struct PpmState
+ {
+ public uint Address;
+ public byte[] Memory;
+ public static readonly PpmState Zero = new PpmState(0, null);
+ public const int Size = 6;
+
+ ///
+ /// Initializes a new instance of the structure.
+ ///
+ public PpmState(uint address, byte[] memory)
+ {
+ Address = address;
+ Memory = memory;
+ }
+
+ ///
+ /// Gets or sets the symbol.
+ ///
+ public byte Symbol
+ {
+ get { return Memory[Address]; }
+ set { Memory[Address] = value; }
+ }
+
+ ///
+ /// Gets or sets the frequency.
+ ///
+ public byte Frequency
+ {
+ get { return Memory[Address + 1]; }
+ set { Memory[Address + 1] = value; }
+ }
+
+ ///
+ /// Gets or sets the successor.
+ ///
+ public Model.PpmContext Successor
+ {
+ get { return new Model.PpmContext(((uint) Memory[Address + 2]) | ((uint) Memory[Address + 3]) << 8 | ((uint) Memory[Address + 4]) << 16 | ((uint) Memory[Address + 5]) << 24, Memory); }
+ set
+ {
+ Memory[Address + 2] = (byte) value.Address;
+ Memory[Address + 3] = (byte) (value.Address >> 8);
+ Memory[Address + 4] = (byte) (value.Address >> 16);
+ Memory[Address + 5] = (byte) (value.Address >> 24);
+ }
+ }
+
+ ///
+ /// Gets the at the relative to this
+ /// .
+ ///
+ ///
+ ///
+ public PpmState this[int offset]
+ {
+ get { return new PpmState((uint) (Address + offset * Size), Memory); }
+ }
+
+ ///
+ /// Allow a pointer to be implicitly converted to a PPM state.
+ ///
+ ///
+ ///
+ public static implicit operator PpmState(Pointer pointer)
+ {
+ return new PpmState(pointer.Address, pointer.Memory);
+ }
+
+ ///
+ /// Allow pointer-like addition on a PPM state.
+ ///
+ ///
+ ///
+ ///
+ public static PpmState operator +(PpmState state, int offset)
+ {
+ state.Address = (uint) (state.Address + offset * Size);
+ return state;
+ }
+
+ ///
+ /// Allow pointer-like incrementing on a PPM state.
+ ///
+ ///
+ ///
+ public static PpmState operator ++(PpmState state)
+ {
+ state.Address += Size;
+ return state;
+ }
+
+ ///
+ /// Allow pointer-like subtraction on a PPM state.
+ ///
+ ///
+ ///
+ ///
+ public static PpmState operator -(PpmState state, int offset)
+ {
+ state.Address = (uint) (state.Address - offset * Size);
+ return state;
+ }
+
+ ///
+ /// Allow pointer-like decrementing on a PPM state.
+ ///
+ ///
+ ///
+ public static PpmState operator --(PpmState state)
+ {
+ state.Address -= Size;
+ return state;
+ }
+
+ ///
+ /// Compare two PPM states.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator <=(PpmState state1, PpmState state2)
+ {
+ return state1.Address <= state2.Address;
+ }
+
+ ///
+ /// Compare two PPM states.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator >=(PpmState state1, PpmState state2)
+ {
+ return state1.Address >= state2.Address;
+ }
+
+ ///
+ /// Compare two PPM states.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator ==(PpmState state1, PpmState state2)
+ {
+ return state1.Address == state2.Address;
+ }
+
+ ///
+ /// Compare two PPM states.
+ ///
+ ///
+ ///
+ ///
+ public static bool operator !=(PpmState state1, PpmState state2)
+ {
+ return state1.Address != state2.Address;
+ }
+
+ ///
+ /// Indicates whether this instance and a specified object are equal.
+ ///
+ /// true if obj and this instance are the same type and represent the same value; otherwise, false.
+ /// Another object to compare to.
+ public override bool Equals(object obj)
+ {
+ if (obj is PpmState)
+ {
+ PpmState state = (PpmState) obj;
+ return state.Address == Address;
+ }
+ return base.Equals(obj);
+ }
+
+ ///
+ /// Returns the hash code for this instance.
+ ///
+ /// A 32-bit signed integer that is the hash code for this instance.
+ public override int GetHashCode()
+ {
+ return Address.GetHashCode();
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/See2Context.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/See2Context.cs
new file mode 100644
index 00000000..970a0889
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/I1/See2Context.cs
@@ -0,0 +1,55 @@
+#region Using
+
+
+
+#endregion
+
+namespace Compress.SevenZip.Compress.PPmd.I1
+{
+ ///
+ /// SEE2 (secondary escape estimation) contexts for PPM contexts with masked symbols.
+ ///
+ ///
+ ///
+ /// This must be a class rather than a structure because MakeEscapeFrequency returns a See2Context
+ /// instance from the see2Contexts array. The caller (for example, EncodeSymbol2) then updates the
+ /// returned See2Context instance and expects the updates to be reflected in the see2Contexts array.
+ /// This would not happen if this were a structure.
+ ///
+ ///
+ /// Note that in most cases fields are used rather than properties for performance reasons (for example,
+ /// is a field rather than a property).
+ ///
+ ///
+ internal class See2Context
+ {
+ private const byte PeriodBitCount = 7;
+
+ public ushort Summary;
+ public byte Shift;
+ public byte Count;
+
+ public void Initialize(uint initialValue)
+ {
+ Shift = PeriodBitCount - 4;
+ Summary = (ushort) (initialValue << Shift);
+ Count = 7;
+ }
+
+ public uint Mean()
+ {
+ uint value = (uint) (Summary >> Shift);
+ Summary = (ushort) (Summary - value);
+ return (uint) (value + ((value == 0) ? 1 : 0));
+ }
+
+ public void Update()
+ {
+ if (Shift < PeriodBitCount && --Count == 0)
+ {
+ Summary += Summary;
+ Count = (byte) (3 << Shift++);
+ }
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/PpmdProperties.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/PpmdProperties.cs
new file mode 100644
index 00000000..2e5adbfd
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/PpmdProperties.cs
@@ -0,0 +1,81 @@
+using System;
+
+namespace Compress.SevenZip.Compress.PPmd
+{
+ public enum PpmdVersion
+ {
+ H,
+ H7z,
+ I1,
+ }
+
+ public class PpmdProperties
+ {
+ public PpmdVersion Version = PpmdVersion.I1;
+ public int ModelOrder;
+ internal I1.ModelRestorationMethod ModelRestorationMethod;
+
+ private int allocatorSize;
+ internal I1.Allocator Allocator;
+
+ public PpmdProperties()
+ : this(16 << 20, 6)
+ {
+ }
+
+ public PpmdProperties(int allocatorSize, int modelOrder)
+ : this(allocatorSize, modelOrder, I1.ModelRestorationMethod.Restart)
+ {
+ }
+
+ internal PpmdProperties(int allocatorSize, int modelOrder, I1.ModelRestorationMethod modelRestorationMethod)
+ {
+ AllocatorSize = allocatorSize;
+ ModelOrder = modelOrder;
+ ModelRestorationMethod = modelRestorationMethod;
+ }
+
+ public PpmdProperties(byte[] properties)
+ {
+ if (properties.Length == 2)
+ {
+ ushort props = BitConverter.ToUInt16(properties, 0);
+ AllocatorSize = (((props >> 4) & 0xff) + 1) << 20;
+ ModelOrder = (props & 0x0f) + 1;
+ ModelRestorationMethod = (I1.ModelRestorationMethod)(props >> 12);
+ }
+ else if (properties.Length == 5)
+ {
+ Version = PpmdVersion.H7z;
+ AllocatorSize = BitConverter.ToInt32(properties, 1);
+ ModelOrder = properties[0];
+ }
+ }
+
+ public int AllocatorSize
+ {
+ get
+ {
+ return allocatorSize;
+ }
+ set
+ {
+ allocatorSize = value;
+ if (Version == PpmdVersion.I1)
+ {
+ if (Allocator == null)
+ Allocator = new I1.Allocator();
+ Allocator.Start(allocatorSize);
+ }
+ }
+ }
+
+ public byte[] Properties
+ {
+ get
+ {
+ return BitConverter.GetBytes((ushort)((ModelOrder - 1) + (((AllocatorSize >> 20) - 1) << 4) + ((ushort)ModelRestorationMethod << 12)));
+ }
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/PpmdStream.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/PpmdStream.cs
new file mode 100644
index 00000000..f5db7540
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/PpmdStream.cs
@@ -0,0 +1,154 @@
+using System;
+using System.IO;
+using Compress.SevenZip.Compress.RangeCoder;
+
+namespace Compress.SevenZip.Compress.PPmd
+{
+ public class PpmdStream : Stream
+ {
+ private PpmdProperties properties;
+ private Stream stream;
+ private bool compress;
+ private I1.Model model;
+ private H.ModelPPM modelH;
+ private Decoder decoder;
+ private long position = 0;
+
+ public PpmdStream(PpmdProperties properties, Stream stream, bool compress)
+ {
+ this.properties = properties;
+ this.stream = stream;
+ this.compress = compress;
+
+ if (properties.Version == PpmdVersion.I1)
+ {
+ model = new I1.Model();
+ if (compress)
+ model.EncodeStart(properties);
+ else
+ model.DecodeStart(stream, properties);
+ }
+ if (properties.Version == PpmdVersion.H)
+ {
+ modelH = new H.ModelPPM();
+ if (compress)
+ throw new NotImplementedException();
+ else
+ modelH.decodeInit(stream, properties.ModelOrder, properties.AllocatorSize);
+ }
+ if (properties.Version == PpmdVersion.H7z)
+ {
+ modelH = new H.ModelPPM();
+ if (compress)
+ throw new NotImplementedException();
+ else
+ modelH.decodeInit(null, properties.ModelOrder, properties.AllocatorSize);
+ decoder = new Decoder();
+ decoder.Init(stream);
+ }
+ }
+
+ public override bool CanRead
+ {
+ get { return !compress; }
+ }
+
+ public override bool CanSeek
+ {
+ get { return false; }
+ }
+
+ public override bool CanWrite
+ {
+ get { return compress; }
+ }
+
+ public override void Flush()
+ {
+ }
+
+ protected override void Dispose(bool isDisposing)
+ {
+ if (isDisposing)
+ {
+ if (compress)
+ model.EncodeBlock(stream, new MemoryStream(), true);
+ }
+ base.Dispose(isDisposing);
+ }
+
+ public override long Length
+ {
+ get { throw new NotImplementedException(); }
+ }
+
+ public override long Position
+ {
+ get
+ {
+ return position;
+ }
+ set
+ {
+ throw new NotImplementedException();
+ }
+ }
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ if (compress)
+ return 0;
+ int size = 0;
+ if (properties.Version == PpmdVersion.I1)
+ size = model.DecodeBlock(stream, buffer, offset, count);
+ if (properties.Version == PpmdVersion.H)
+ {
+ int c;
+ while (size < count && (c = modelH.decodeChar()) >= 0)
+ {
+ buffer[offset++] = (byte)c;
+ size++;
+ }
+ }
+ if (properties.Version == PpmdVersion.H7z)
+ {
+ int c;
+ while (size < count && (c = modelH.decodeChar(decoder)) >= 0)
+ {
+ buffer[offset++] = (byte)c;
+ size++;
+ }
+ }
+ position += size;
+ return size;
+ }
+
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ if (origin != SeekOrigin.Current)
+ throw new NotImplementedException();
+
+ byte[] tmpBuff = new byte[1024];
+ long sizeToGo = offset;
+ while (sizeToGo > 0)
+ {
+ int sizenow = sizeToGo > 1024 ? 1024 : (int)sizeToGo;
+ Read(tmpBuff, 0, sizenow);
+ sizeToGo -= sizenow;
+ }
+
+ return offset;
+ }
+
+ public override void SetLength(long value)
+ {
+ throw new NotImplementedException();
+ }
+
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ if (compress)
+ model.EncodeBlock(stream, new MemoryStream(buffer, offset, count), false);
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/Utility.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/Utility.cs
new file mode 100644
index 00000000..934f4fbb
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/PPmd/Utility.cs
@@ -0,0 +1,467 @@
+using System;
+using System.Collections.Generic;
+using System.Collections.ObjectModel;
+using System.IO;
+using System.Linq;
+
+namespace Compress.SevenZip.Compress.PPmd
+{
+ internal static class Utility
+ {
+ public static ReadOnlyCollection ToReadOnly(this IEnumerable items)
+ {
+ return new ReadOnlyCollection(items.ToList());
+ }
+
+ ///
+ /// Performs an unsigned bitwise right shift with the specified number
+ ///
+ /// Number to operate on
+ /// Ammount of bits to shift
+ /// The resulting number from the shift operation
+ public static int URShift(int number, int bits)
+ {
+ if (number >= 0)
+ return number >> bits;
+ else
+ return (number >> bits) + (2 << ~bits);
+ }
+
+ ///
+ /// Performs an unsigned bitwise right shift with the specified number
+ ///
+ /// Number to operate on
+ /// Ammount of bits to shift
+ /// The resulting number from the shift operation
+ public static int URShift(int number, long bits)
+ {
+ return URShift(number, (int)bits);
+ }
+
+ ///
+ /// Performs an unsigned bitwise right shift with the specified number
+ ///
+ /// Number to operate on
+ /// Ammount of bits to shift
+ /// The resulting number from the shift operation
+ public static long URShift(long number, int bits)
+ {
+ if (number >= 0)
+ return number >> bits;
+ else
+ return (number >> bits) + (2L << ~bits);
+ }
+
+ ///
+ /// Performs an unsigned bitwise right shift with the specified number
+ ///
+ /// Number to operate on
+ /// Ammount of bits to shift
+ /// The resulting number from the shift operation
+ public static long URShift(long number, long bits)
+ {
+ return URShift(number, (int)bits);
+ }
+
+ ///
+ /// Fills the array with an specific value from an specific index to an specific index.
+ ///
+ /// The array to be filled.
+ /// The first index to be filled.
+ /// The last index to be filled.
+ /// The value to fill the array with.
+ public static void Fill(T[] array, int fromindex, int toindex, T val) where T : struct
+ {
+ if (array.Length == 0)
+ {
+ throw new NullReferenceException();
+ }
+ if (fromindex > toindex)
+ {
+ throw new ArgumentException();
+ }
+ if ((fromindex < 0) || ((System.Array)array).Length < toindex)
+ {
+ throw new IndexOutOfRangeException();
+ }
+ for (int index = (fromindex > 0) ? fromindex-- : fromindex; index < toindex; index++)
+ {
+ array[index] = val;
+ }
+ }
+
+ ///
+ /// Fills the array with an specific value.
+ ///
+ /// The array to be filled.
+ /// The value to fill the array with.
+ public static void Fill(T[] array, T val) where T : struct
+ {
+ Fill(array, 0, array.Length, val);
+ }
+
+ public static void SetSize(this List list, int count)
+ {
+ if (count > list.Count)
+ {
+ for (int i = list.Count; i < count; i++)
+ {
+ list.Add(0x0);
+ }
+ }
+ else
+ {
+ byte[] temp = new byte[count];
+ list.CopyTo(temp, 0);
+ list.Clear();
+ list.AddRange(temp);
+ }
+ }
+
+ /// Read a int value from the byte array at the given position (Big Endian)
+ ///
+ ///
+ /// the array to read from
+ ///
+ /// the offset
+ ///
+ /// the value
+ ///
+ public static int readIntBigEndian(byte[] array, int pos)
+ {
+ int temp = 0;
+ temp |= array[pos] & 0xff;
+ temp <<= 8;
+ temp |= array[pos + 1] & 0xff;
+ temp <<= 8;
+ temp |= array[pos + 2] & 0xff;
+ temp <<= 8;
+ temp |= array[pos + 3] & 0xff;
+ return temp;
+ }
+
+ /// Read a short value from the byte array at the given position (little
+ /// Endian)
+ ///
+ ///
+ /// the array to read from
+ ///
+ /// the offset
+ ///
+ /// the value
+ ///
+ public static short readShortLittleEndian(byte[] array, int pos)
+ {
+ return BitConverter.ToInt16(array, pos);
+ }
+
+ /// Read an int value from the byte array at the given position (little
+ /// Endian)
+ ///
+ ///
+ /// the array to read from
+ ///
+ /// the offset
+ ///
+ /// the value
+ ///
+ public static int readIntLittleEndian(byte[] array, int pos)
+ {
+ return BitConverter.ToInt32(array, pos);
+ }
+
+ /// Write an int value into the byte array at the given position (Big endian)
+ ///
+ ///
+ /// the array
+ ///
+ /// the offset
+ ///
+ /// the value to write
+ ///
+ public static void writeIntBigEndian(byte[] array, int pos, int value)
+ {
+ array[pos] = (byte)((Utility.URShift(value, 24)) & 0xff);
+ array[pos + 1] = (byte)((Utility.URShift(value, 16)) & 0xff);
+ array[pos + 2] = (byte)((Utility.URShift(value, 8)) & 0xff);
+ array[pos + 3] = (byte)((value) & 0xff);
+ }
+
+ /// Write a short value into the byte array at the given position (little
+ /// endian)
+ ///
+ ///
+ /// the array
+ ///
+ /// the offset
+ ///
+ /// the value to write
+ ///
+ public static void WriteLittleEndian(byte[] array, int pos, short value)
+ {
+ byte[] newBytes = BitConverter.GetBytes(value);
+ Array.Copy(newBytes, 0, array, pos, newBytes.Length);
+ }
+
+ /// Increment a short value at the specified position by the specified amount
+ /// (little endian).
+ ///
+ public static void incShortLittleEndian(byte[] array, int pos, short incrementValue)
+ {
+ short existingValue = BitConverter.ToInt16(array, pos);
+ existingValue += incrementValue;
+ WriteLittleEndian(array, pos, existingValue);
+ //int c = Utility.URShift(((array[pos] & 0xff) + (dv & 0xff)), 8);
+ //array[pos] = (byte)(array[pos] + (dv & 0xff));
+ //if ((c > 0) || ((dv & 0xff00) != 0))
+ //{
+ // array[pos + 1] = (byte)(array[pos + 1] + ((Utility.URShift(dv, 8)) & 0xff) + c);
+ //}
+ }
+
+ /// Write an int value into the byte array at the given position (little
+ /// endian)
+ ///
+ ///
+ /// the array
+ ///
+ /// the offset
+ ///
+ /// the value to write
+ ///
+ public static void WriteLittleEndian(byte[] array, int pos, int value)
+ {
+ byte[] newBytes = BitConverter.GetBytes(value);
+ Array.Copy(newBytes, 0, array, pos, newBytes.Length);
+ }
+
+ public static void Initialize(this T[] array, Func func)
+ {
+ for (int i = 0; i < array.Length; i++)
+ {
+ array[i] = func();
+ }
+ }
+
+ public static void AddRange(this ICollection destination, IEnumerable source)
+ {
+ foreach (T item in source)
+ {
+ destination.Add(item);
+ }
+ }
+
+ public static void ForEach(this IEnumerable items, Action action)
+ {
+ foreach (T item in items)
+ {
+ action(item);
+ }
+ }
+
+ public static IEnumerable AsEnumerable(this T item)
+ {
+ yield return item;
+ }
+
+ public static void CheckNotNull(this object obj, string name)
+ {
+ if (obj == null)
+ {
+ throw new ArgumentNullException(name);
+ }
+ }
+
+ public static void CheckNotNullOrEmpty(this string obj, string name)
+ {
+ obj.CheckNotNull(name);
+ if (obj.Length == 0)
+ {
+ throw new ArgumentException("String is empty.");
+ }
+ }
+
+ public static void Skip(this Stream source, long advanceAmount)
+ {
+ byte[] buffer = new byte[32 * 1024];
+ int read = 0;
+ int readCount = 0;
+ do
+ {
+ readCount = buffer.Length;
+ if (readCount > advanceAmount)
+ {
+ readCount = (int)advanceAmount;
+ }
+ read = source.Read(buffer, 0, readCount);
+ if (read < 0)
+ {
+ break;
+ }
+ advanceAmount -= read;
+ if (advanceAmount == 0)
+ {
+ break;
+ }
+ } while (true);
+ }
+
+ public static void SkipAll(this Stream source)
+ {
+ byte[] buffer = new byte[32 * 1024];
+ do
+ {
+ } while (source.Read(buffer, 0, buffer.Length) == buffer.Length);
+ }
+
+
+ public static byte[] UInt32ToBigEndianBytes(uint x)
+ {
+ return new byte[] {
+ (byte)((x >> 24) & 0xff),
+ (byte)((x >> 16) & 0xff),
+ (byte)((x >> 8) & 0xff),
+ (byte)(x & 0xff) };
+ }
+
+ public static DateTime DosDateToDateTime(UInt16 iDate, UInt16 iTime)
+ {
+ int year = iDate / 512 + 1980;
+ int month = iDate % 512 / 32;
+ int day = iDate % 512 % 32;
+ int hour = iTime / 2048;
+ int minute = iTime % 2048 / 32;
+ int second = iTime % 2048 % 32 * 2;
+
+ if (iDate == UInt16.MaxValue || month == 0 || day == 0)
+ {
+ year = 1980;
+ month = 1;
+ day = 1;
+ }
+
+ if (iTime == UInt16.MaxValue)
+ {
+ hour = minute = second = 0;
+ }
+
+ DateTime dt;
+ try
+ {
+ dt = new DateTime(year, month, day, hour, minute, second);
+ }
+ catch
+ {
+ dt = new DateTime();
+ }
+ return dt;
+ }
+
+ public static uint DateTimeToDosTime(this DateTime? dateTime)
+ {
+ if (dateTime == null)
+ {
+ return 0;
+ }
+ return (uint)(
+ (dateTime.Value.Second / 2) | (dateTime.Value.Minute << 5) | (dateTime.Value.Hour << 11) |
+ (dateTime.Value.Day << 16) | (dateTime.Value.Month << 21) | ((dateTime.Value.Year - 1980) << 25));
+ }
+
+
+ public static DateTime DosDateToDateTime(UInt32 iTime)
+ {
+ return DosDateToDateTime((UInt16)(iTime / 65536),
+ (UInt16)(iTime % 65536));
+ }
+
+ public static DateTime DosDateToDateTime(Int32 iTime)
+ {
+ return DosDateToDateTime((UInt32)iTime);
+ }
+
+ public static long TransferTo(this Stream source, Stream destination)
+ {
+ byte[] array = new byte[4096];
+ int count;
+ long total = 0;
+ while ((count = source.Read(array, 0, array.Length)) != 0)
+ {
+ total += count;
+ destination.Write(array, 0, count);
+ }
+ return total;
+ }
+
+ public static bool ReadFully(this Stream stream, byte[] buffer)
+ {
+ int total = 0;
+ int read;
+ while ((read = stream.Read(buffer, total, buffer.Length - total)) > 0)
+ {
+ total += read;
+ if (total >= buffer.Length)
+ {
+ return true;
+ }
+ }
+ return (total >= buffer.Length);
+ }
+
+ public static string TrimNulls(this string source)
+ {
+ return source.Replace('\0', ' ').Trim();
+ }
+
+ public static bool BinaryEquals(this byte[] source, byte[] target)
+ {
+ if (source.Length != target.Length)
+ {
+ return false;
+ }
+ for (int i = 0; i < source.Length; ++i)
+ {
+ if (source[i] != target[i])
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+
+
+#if PORTABLE
+ public static void CopyTo(this byte[] array, byte[] destination, int index)
+ {
+ Array.Copy(array, 0, destination, index, array.Length);
+ }
+
+ public static long HostToNetworkOrder(long host)
+ {
+ return (int)((long)HostToNetworkOrder((int)host)
+ & unchecked((long)(unchecked((ulong)-1))) << 32
+ | ((long)HostToNetworkOrder((int)((int)host >> 32)) & unchecked((long)(unchecked((ulong)-1)))));
+ }
+ public static int HostToNetworkOrder(int host)
+ {
+ return (int)((int)(HostToNetworkOrder((short)host) & -1) << 16 | (HostToNetworkOrder((short)(host >> 16)) & -1));
+ }
+ public static short HostToNetworkOrder(short host)
+ {
+ return (short)((int)(host & 255) << 8 | ((int)host >> 8 & 255));
+ }
+ public static long NetworkToHostOrder(long network)
+ {
+ return HostToNetworkOrder(network);
+ }
+ public static int NetworkToHostOrder(int network)
+ {
+ return HostToNetworkOrder(network);
+ }
+ public static short NetworkToHostOrder(short network)
+ {
+ return HostToNetworkOrder(network);
+ }
+#endif
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/RangeCoder/RangeCoder.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/RangeCoder/RangeCoder.cs
new file mode 100644
index 00000000..10f2e09f
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/RangeCoder/RangeCoder.cs
@@ -0,0 +1,247 @@
+using System;
+
+namespace Compress.SevenZip.Compress.RangeCoder
+{
+ internal class Encoder
+ {
+ public const uint kTopValue = (1 << 24);
+
+ System.IO.Stream Stream;
+
+ public UInt64 Low;
+ public uint Range;
+ uint _cacheSize;
+ byte _cache;
+
+ //long StartPosition;
+
+ public void SetStream(System.IO.Stream stream)
+ {
+ Stream = stream;
+ }
+
+ public void ReleaseStream()
+ {
+ Stream = null;
+ }
+
+ public void Init()
+ {
+ //StartPosition = Stream.Position;
+
+ Low = 0;
+ Range = 0xFFFFFFFF;
+ _cacheSize = 1;
+ _cache = 0;
+ }
+
+ public void FlushData()
+ {
+ for (int i = 0; i < 5; i++)
+ ShiftLow();
+ }
+
+ public void FlushStream()
+ {
+ Stream.Flush();
+ }
+
+ public void CloseStream()
+ {
+ Stream.Dispose();
+ }
+
+ public void Encode(uint start, uint size, uint total)
+ {
+ Low += start * (Range /= total);
+ Range *= size;
+ while (Range < kTopValue)
+ {
+ Range <<= 8;
+ ShiftLow();
+ }
+ }
+
+ public void ShiftLow()
+ {
+ if ((uint)Low < (uint)0xFF000000 || (uint)(Low >> 32) == 1)
+ {
+ byte temp = _cache;
+ do
+ {
+ Stream.WriteByte((byte)(temp + (Low >> 32)));
+ temp = 0xFF;
+ }
+ while (--_cacheSize != 0);
+ _cache = (byte)(((uint)Low) >> 24);
+ }
+ _cacheSize++;
+ Low = ((uint)Low) << 8;
+ }
+
+ public void EncodeDirectBits(uint v, int numTotalBits)
+ {
+ for (int i = numTotalBits - 1; i >= 0; i--)
+ {
+ Range >>= 1;
+ if (((v >> i) & 1) == 1)
+ Low += Range;
+ if (Range < kTopValue)
+ {
+ Range <<= 8;
+ ShiftLow();
+ }
+ }
+ }
+
+ public void EncodeBit(uint size0, int numTotalBits, uint symbol)
+ {
+ uint newBound = (Range >> numTotalBits) * size0;
+ if (symbol == 0)
+ Range = newBound;
+ else
+ {
+ Low += newBound;
+ Range -= newBound;
+ }
+ while (Range < kTopValue)
+ {
+ Range <<= 8;
+ ShiftLow();
+ }
+ }
+
+ public long GetProcessedSizeAdd()
+ {
+ return -1;
+ //return _cacheSize + Stream.Position - StartPosition + 4;
+ // (long)Stream.GetProcessedSize();
+ }
+ }
+
+ internal class Decoder
+ {
+ public const uint kTopValue = (1 << 24);
+ public uint Range;
+ public uint Code = 0;
+ // public Buffer.InBuffer Stream = new Buffer.InBuffer(1 << 16);
+ public System.IO.Stream Stream;
+ public long Total;
+
+ public void Init(System.IO.Stream stream)
+ {
+ // Stream.Init(stream);
+ Stream = stream;
+
+ Code = 0;
+ Range = 0xFFFFFFFF;
+ for (int i = 0; i < 5; i++)
+ Code = (Code << 8) | (byte)Stream.ReadByte();
+ Total = 5;
+ }
+
+ public void ReleaseStream()
+ {
+ // Stream.ReleaseStream();
+ Stream = null;
+ }
+
+ public void CloseStream()
+ {
+ Stream.Dispose();
+ }
+
+ public void Normalize()
+ {
+ while (Range < kTopValue)
+ {
+ Code = (Code << 8) | (byte)Stream.ReadByte();
+ Range <<= 8;
+ Total++;
+ }
+ }
+
+ public void Normalize2()
+ {
+ if (Range < kTopValue)
+ {
+ Code = (Code << 8) | (byte)Stream.ReadByte();
+ Range <<= 8;
+ Total++;
+ }
+ }
+
+ public uint GetThreshold(uint total)
+ {
+ return Code / (Range /= total);
+ }
+
+ public void Decode(uint start, uint size)
+ {
+ Code -= start * Range;
+ Range *= size;
+ Normalize();
+ }
+
+ public uint DecodeDirectBits(int numTotalBits)
+ {
+ uint range = Range;
+ uint code = Code;
+ uint result = 0;
+ for (int i = numTotalBits; i > 0; i--)
+ {
+ range >>= 1;
+ /*
+ result <<= 1;
+ if (code >= range)
+ {
+ code -= range;
+ result |= 1;
+ }
+ */
+ uint t = (code - range) >> 31;
+ code -= range & (t - 1);
+ result = (result << 1) | (1 - t);
+
+ if (range < kTopValue)
+ {
+ code = (code << 8) | (byte)Stream.ReadByte();
+ range <<= 8;
+ Total++;
+ }
+ }
+ Range = range;
+ Code = code;
+ return result;
+ }
+
+ public uint DecodeBit(uint size0, int numTotalBits)
+ {
+ uint newBound = (Range >> numTotalBits) * size0;
+ uint symbol;
+ if (Code < newBound)
+ {
+ symbol = 0;
+ Range = newBound;
+ }
+ else
+ {
+ symbol = 1;
+ Code -= newBound;
+ Range -= newBound;
+ }
+ Normalize();
+ return symbol;
+ }
+
+ public bool IsFinished
+ {
+ get
+ {
+ return Code == 0;
+ }
+ }
+
+ // ulong GetProcessedSize() {return Stream.GetProcessedSize(); }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/RangeCoder/RangeCoderBit.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/RangeCoder/RangeCoderBit.cs
new file mode 100644
index 00000000..357a3e79
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/RangeCoder/RangeCoderBit.cs
@@ -0,0 +1,119 @@
+using System;
+
+namespace Compress.SevenZip.Compress.RangeCoder
+{
+ internal struct BitEncoder
+ {
+ public const int kNumBitModelTotalBits = 11;
+ public const uint kBitModelTotal = (1 << kNumBitModelTotalBits);
+ const int kNumMoveBits = 5;
+ const int kNumMoveReducingBits = 2;
+ public const int kNumBitPriceShiftBits = 6;
+
+ uint Prob;
+
+ public void Init() { Prob = kBitModelTotal >> 1; }
+
+ public void UpdateModel(uint symbol)
+ {
+ if (symbol == 0)
+ Prob += (kBitModelTotal - Prob) >> kNumMoveBits;
+ else
+ Prob -= (Prob) >> kNumMoveBits;
+ }
+
+ public void Encode(Encoder encoder, uint symbol)
+ {
+ // encoder.EncodeBit(Prob, kNumBitModelTotalBits, symbol);
+ // UpdateModel(symbol);
+ uint newBound = (encoder.Range >> kNumBitModelTotalBits) * Prob;
+ if (symbol == 0)
+ {
+ encoder.Range = newBound;
+ Prob += (kBitModelTotal - Prob) >> kNumMoveBits;
+ }
+ else
+ {
+ encoder.Low += newBound;
+ encoder.Range -= newBound;
+ Prob -= (Prob) >> kNumMoveBits;
+ }
+ if (encoder.Range < Encoder.kTopValue)
+ {
+ encoder.Range <<= 8;
+ encoder.ShiftLow();
+ }
+ }
+
+ private static UInt32[] ProbPrices = new UInt32[kBitModelTotal >> kNumMoveReducingBits];
+
+ static BitEncoder()
+ {
+ const int kNumBits = (kNumBitModelTotalBits - kNumMoveReducingBits);
+ for (int i = kNumBits - 1; i >= 0; i--)
+ {
+ UInt32 start = (UInt32)1 << (kNumBits - i - 1);
+ UInt32 end = (UInt32)1 << (kNumBits - i);
+ for (UInt32 j = start; j < end; j++)
+ ProbPrices[j] = ((UInt32)i << kNumBitPriceShiftBits) +
+ (((end - j) << kNumBitPriceShiftBits) >> (kNumBits - i - 1));
+ }
+ }
+
+ public uint GetPrice(uint symbol)
+ {
+ return ProbPrices[(((Prob - symbol) ^ ((-(int)symbol))) & (kBitModelTotal - 1)) >> kNumMoveReducingBits];
+ }
+ public uint GetPrice0() { return ProbPrices[Prob >> kNumMoveReducingBits]; }
+ public uint GetPrice1() { return ProbPrices[(kBitModelTotal - Prob) >> kNumMoveReducingBits]; }
+ }
+
+ internal struct BitDecoder
+ {
+ public const int kNumBitModelTotalBits = 11;
+ public const uint kBitModelTotal = (1 << kNumBitModelTotalBits);
+ const int kNumMoveBits = 5;
+
+ uint Prob;
+
+ public void UpdateModel(int numMoveBits, uint symbol)
+ {
+ if (symbol == 0)
+ Prob += (kBitModelTotal - Prob) >> numMoveBits;
+ else
+ Prob -= (Prob) >> numMoveBits;
+ }
+
+ public void Init() { Prob = kBitModelTotal >> 1; }
+
+ public uint Decode(RangeCoder.Decoder rangeDecoder)
+ {
+ uint newBound = (uint)(rangeDecoder.Range >> kNumBitModelTotalBits) * (uint)Prob;
+ if (rangeDecoder.Code < newBound)
+ {
+ rangeDecoder.Range = newBound;
+ Prob += (kBitModelTotal - Prob) >> kNumMoveBits;
+ if (rangeDecoder.Range < Decoder.kTopValue)
+ {
+ rangeDecoder.Code = (rangeDecoder.Code << 8) | (byte)rangeDecoder.Stream.ReadByte();
+ rangeDecoder.Range <<= 8;
+ rangeDecoder.Total++;
+ }
+ return 0;
+ }
+ else
+ {
+ rangeDecoder.Range -= newBound;
+ rangeDecoder.Code -= newBound;
+ Prob -= (Prob) >> kNumMoveBits;
+ if (rangeDecoder.Range < Decoder.kTopValue)
+ {
+ rangeDecoder.Code = (rangeDecoder.Code << 8) | (byte)rangeDecoder.Stream.ReadByte();
+ rangeDecoder.Range <<= 8;
+ rangeDecoder.Total++;
+ }
+ return 1;
+ }
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Compress/RangeCoder/RangeCoderBitTree.cs b/SabreTools.Library/External/Compress/SevenZip/Compress/RangeCoder/RangeCoderBitTree.cs
new file mode 100644
index 00000000..06a814ef
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Compress/RangeCoder/RangeCoderBitTree.cs
@@ -0,0 +1,157 @@
+using System;
+
+namespace Compress.SevenZip.Compress.RangeCoder
+{
+ internal struct BitTreeEncoder
+ {
+ BitEncoder[] Models;
+ int NumBitLevels;
+
+ public BitTreeEncoder(int numBitLevels)
+ {
+ NumBitLevels = numBitLevels;
+ Models = new BitEncoder[1 << numBitLevels];
+ }
+
+ public void Init()
+ {
+ for (uint i = 1; i < (1 << NumBitLevels); i++)
+ Models[i].Init();
+ }
+
+ public void Encode(Encoder rangeEncoder, UInt32 symbol)
+ {
+ UInt32 m = 1;
+ for (int bitIndex = NumBitLevels; bitIndex > 0; )
+ {
+ bitIndex--;
+ UInt32 bit = (symbol >> bitIndex) & 1;
+ Models[m].Encode(rangeEncoder, bit);
+ m = (m << 1) | bit;
+ }
+ }
+
+ public void ReverseEncode(Encoder rangeEncoder, UInt32 symbol)
+ {
+ UInt32 m = 1;
+ for (UInt32 i = 0; i < NumBitLevels; i++)
+ {
+ UInt32 bit = symbol & 1;
+ Models[m].Encode(rangeEncoder, bit);
+ m = (m << 1) | bit;
+ symbol >>= 1;
+ }
+ }
+
+ public UInt32 GetPrice(UInt32 symbol)
+ {
+ UInt32 price = 0;
+ UInt32 m = 1;
+ for (int bitIndex = NumBitLevels; bitIndex > 0; )
+ {
+ bitIndex--;
+ UInt32 bit = (symbol >> bitIndex) & 1;
+ price += Models[m].GetPrice(bit);
+ m = (m << 1) + bit;
+ }
+ return price;
+ }
+
+ public UInt32 ReverseGetPrice(UInt32 symbol)
+ {
+ UInt32 price = 0;
+ UInt32 m = 1;
+ for (int i = NumBitLevels; i > 0; i--)
+ {
+ UInt32 bit = symbol & 1;
+ symbol >>= 1;
+ price += Models[m].GetPrice(bit);
+ m = (m << 1) | bit;
+ }
+ return price;
+ }
+
+ public static UInt32 ReverseGetPrice(BitEncoder[] Models, UInt32 startIndex,
+ int NumBitLevels, UInt32 symbol)
+ {
+ UInt32 price = 0;
+ UInt32 m = 1;
+ for (int i = NumBitLevels; i > 0; i--)
+ {
+ UInt32 bit = symbol & 1;
+ symbol >>= 1;
+ price += Models[startIndex + m].GetPrice(bit);
+ m = (m << 1) | bit;
+ }
+ return price;
+ }
+
+ public static void ReverseEncode(BitEncoder[] Models, UInt32 startIndex,
+ Encoder rangeEncoder, int NumBitLevels, UInt32 symbol)
+ {
+ UInt32 m = 1;
+ for (int i = 0; i < NumBitLevels; i++)
+ {
+ UInt32 bit = symbol & 1;
+ Models[startIndex + m].Encode(rangeEncoder, bit);
+ m = (m << 1) | bit;
+ symbol >>= 1;
+ }
+ }
+ }
+
+ internal struct BitTreeDecoder
+ {
+ BitDecoder[] Models;
+ int NumBitLevels;
+
+ public BitTreeDecoder(int numBitLevels)
+ {
+ NumBitLevels = numBitLevels;
+ Models = new BitDecoder[1 << numBitLevels];
+ }
+
+ public void Init()
+ {
+ for (uint i = 1; i < (1 << NumBitLevels); i++)
+ Models[i].Init();
+ }
+
+ public uint Decode(RangeCoder.Decoder rangeDecoder)
+ {
+ uint m = 1;
+ for (int bitIndex = NumBitLevels; bitIndex > 0; bitIndex--)
+ m = (m << 1) + Models[m].Decode(rangeDecoder);
+ return m - ((uint)1 << NumBitLevels);
+ }
+
+ public uint ReverseDecode(RangeCoder.Decoder rangeDecoder)
+ {
+ uint m = 1;
+ uint symbol = 0;
+ for (int bitIndex = 0; bitIndex < NumBitLevels; bitIndex++)
+ {
+ uint bit = Models[m].Decode(rangeDecoder);
+ m <<= 1;
+ m += bit;
+ symbol |= (bit << bitIndex);
+ }
+ return symbol;
+ }
+
+ public static uint ReverseDecode(BitDecoder[] Models, UInt32 startIndex,
+ RangeCoder.Decoder rangeDecoder, int NumBitLevels)
+ {
+ uint m = 1;
+ uint symbol = 0;
+ for (int bitIndex = 0; bitIndex < NumBitLevels; bitIndex++)
+ {
+ uint bit = Models[startIndex + m].Decode(rangeDecoder);
+ m <<= 1;
+ m += bit;
+ symbol |= (bit << bitIndex);
+ }
+ return symbol;
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Filters/BCJ2Filter.cs b/SabreTools.Library/External/Compress/SevenZip/Filters/BCJ2Filter.cs
new file mode 100644
index 00000000..09664ee5
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Filters/BCJ2Filter.cs
@@ -0,0 +1,204 @@
+using System;
+using System.IO;
+
+namespace Compress.SevenZip.Filters
+{
+ public class BCJ2Filter : Stream
+ {
+ private Stream baseStream;
+
+ private long position = 0;
+ private byte[] output = new byte[4];
+ private int outputOffset = 0;
+ private int outputCount = 0;
+
+ private Stream control;
+ private Stream data1;
+ private Stream data2;
+
+ private ushort[] p = new ushort[256 + 2];
+ private uint range, code;
+ private byte prevByte = 0;
+
+ private const int kNumTopBits = 24;
+ private const int kTopValue = 1 << kNumTopBits;
+
+ private const int kNumBitModelTotalBits = 11;
+ private const int kBitModelTotal = 1 << kNumBitModelTotalBits;
+ private const int kNumMoveBits = 5;
+
+ private static bool IsJ(byte b0, byte b1)
+ {
+ return (b1 & 0xFE) == 0xE8 || IsJcc(b0, b1);
+ }
+
+ private static bool IsJcc(byte b0, byte b1)
+ {
+ return b0 == 0x0F && (b1 & 0xF0) == 0x80;
+ }
+
+ public BCJ2Filter(Stream baseStream, Stream data1, Stream data2, Stream control)
+ {
+ this.control = control;
+ this.data1 = data1;
+ this.data2 = data2;
+ this.baseStream = baseStream;
+
+ int i;
+ for (i = 0; i < p.Length; i++)
+ p[i] = kBitModelTotal >> 1;
+
+ code = 0;
+ range = 0xFFFFFFFF;
+
+ byte[] controlbuf=new byte[5];
+ control.Read(controlbuf, 0, 5);
+
+ for (i = 0; i < 5; i++)
+ code = (code << 8) | controlbuf[i];
+ }
+
+ public override bool CanRead
+ {
+ get { return true; }
+ }
+
+ public override bool CanSeek
+ {
+ get { return false; }
+ }
+
+ public override bool CanWrite
+ {
+ get { return false; }
+ }
+
+ public override void Flush()
+ {
+ throw new NotImplementedException();
+ }
+
+ public override long Length
+ {
+ get { return baseStream.Length + data1.Length + data2.Length; }
+ }
+
+ public override long Position
+ {
+ get
+ {
+ return position;
+ }
+ set
+ {
+ throw new NotImplementedException();
+ }
+ }
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ int size = 0;
+ byte b = 0;
+
+ while (size < count)
+ {
+ while (outputOffset < outputCount)
+ {
+ b = output[outputOffset++];
+ buffer[offset++] = b;
+ size++;
+ position++;
+
+ prevByte = b;
+ if (size == count)
+ return size;
+ }
+
+ b = (byte)baseStream.ReadByte();
+ buffer[offset++] = b;
+ size++;
+ position++;
+
+ if (!IsJ(prevByte, b))
+ prevByte = b;
+ else
+ {
+ int prob;
+ if (b == 0xE8)
+ prob = prevByte;
+ else if (b == 0xE9)
+ prob = 256;
+ else
+ prob = 257;
+
+ uint bound = (range >> kNumBitModelTotalBits) * p[prob];
+ if (code < bound)
+ {
+ range = bound;
+ p[prob] += (ushort)((kBitModelTotal - p[prob]) >> kNumMoveBits);
+ if (range < kTopValue)
+ {
+ range <<= 8;
+ code = (code << 8) | (byte)control.ReadByte();
+ }
+ prevByte = b;
+ }
+ else
+ {
+ range -= bound;
+ code -= bound;
+ p[prob] -= (ushort)(p[prob] >> kNumMoveBits);
+ if (range < kTopValue)
+ {
+ range <<= 8;
+ code = (code << 8) | (byte)control.ReadByte();
+ }
+
+ uint dest;
+ if (b == 0xE8)
+ dest = (uint)((data1.ReadByte() << 24) | (data1.ReadByte() << 16) | (data1.ReadByte() << 8) | data1.ReadByte());
+ else
+ dest = (uint)((data2.ReadByte() << 24) | (data2.ReadByte() << 16) | (data2.ReadByte() << 8) | data2.ReadByte());
+ dest -= (uint)(position + 4);
+
+ output[0] = (byte)dest;
+ output[1] = (byte)(dest >> 8);
+ output[2] = (byte)(dest >> 16);
+ output[3] = (byte)(dest >> 24);
+ outputOffset = 0;
+ outputCount = 4;
+ }
+ }
+ }
+
+ return size;
+ }
+
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ if (origin != SeekOrigin.Current)
+ throw new NotImplementedException();
+
+ const int bufferSize = 10240;
+ byte[] seekBuffer = new byte[bufferSize];
+ long seekToGo = offset;
+ while (seekToGo > 0)
+ {
+ long get = seekToGo > bufferSize ? bufferSize : seekToGo;
+ Read(seekBuffer, 0, (int)get);
+ seekToGo -= get;
+ }
+ return position;
+ }
+
+ public override void SetLength(long value)
+ {
+ throw new NotImplementedException();
+ }
+
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ throw new NotImplementedException();
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Filters/BCJFilter.cs b/SabreTools.Library/External/Compress/SevenZip/Filters/BCJFilter.cs
new file mode 100644
index 00000000..666ce0ce
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Filters/BCJFilter.cs
@@ -0,0 +1,90 @@
+using System.IO;
+
+namespace Compress.SevenZip.Filters
+{
+ public class BCJFilter : Filter
+ {
+ private static readonly bool[] MASK_TO_ALLOWED_STATUS = new bool[] {true, true, true, false, true, false, false, false};
+ private static readonly int[] MASK_TO_BIT_NUMBER = new int[] { 0, 1, 2, 2, 3, 3, 3, 3 };
+
+ private int pos;
+ private int prevMask = 0;
+
+ public BCJFilter(bool isEncoder, Stream baseStream) : base(isEncoder, baseStream, 5)
+ {
+ pos = 5;
+ }
+
+ private static bool test86MSByte(byte b)
+ {
+ return b == 0x00 || b == 0xFF;
+ }
+
+ protected override int Transform(byte[] buffer, int offset, int count)
+ {
+ int prevPos = offset - 1;
+ int end = offset + count - 5;
+ int i;
+
+ for (i = offset; i <= end; ++i) {
+ if ((buffer[i] & 0xFE) != 0xE8)
+ continue;
+
+ prevPos = i - prevPos;
+ if ((prevPos & ~3) != 0) { // (unsigned)prevPos > 3
+ prevMask = 0;
+ } else {
+ prevMask = (prevMask << (prevPos - 1)) & 7;
+ if (prevMask != 0) {
+ if (!MASK_TO_ALLOWED_STATUS[prevMask] || test86MSByte(
+ buffer[i + 4 - MASK_TO_BIT_NUMBER[prevMask]])) {
+ prevPos = i;
+ prevMask = (prevMask << 1) | 1;
+ continue;
+ }
+ }
+ }
+
+ prevPos = i;
+
+ if (test86MSByte(buffer[i + 4])) {
+ int src = buffer[i + 1]
+ | (buffer[i + 2] << 8)
+ | (buffer[i + 3] << 16)
+ | (buffer[i + 4] << 24);
+ int dest;
+ while (true) {
+ if (isEncoder)
+ dest = src + (pos + i - offset);
+ else
+ dest = src - (pos + i - offset);
+
+ if (prevMask == 0)
+ break;
+
+ int index = MASK_TO_BIT_NUMBER[prevMask] * 8;
+ if (!test86MSByte((byte)(dest >> (24 - index))))
+ break;
+
+ src = dest ^ ((1 << (32 - index)) - 1);
+ }
+
+ buffer[i + 1] = (byte)dest;
+ buffer[i + 2] = (byte)(dest >> 8);
+ buffer[i + 3] = (byte)(dest >> 16);
+ buffer[i + 4] = (byte)(~(((dest >> 24) & 1) - 1));
+ i += 4;
+ } else {
+ prevMask = (prevMask << 1) | 1;
+ }
+ }
+
+ prevPos = i - prevPos;
+ prevMask = ((prevPos & ~3) != 0) ? 0 : prevMask << (prevPos - 1);
+
+ i -= offset;
+ pos += i;
+ return i;
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Filters/Delta.cs b/SabreTools.Library/External/Compress/SevenZip/Filters/Delta.cs
new file mode 100644
index 00000000..400e03a4
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Filters/Delta.cs
@@ -0,0 +1,100 @@
+using System;
+using System.IO;
+
+namespace Compress.SevenZip.Filters
+{
+ public class Delta : Stream
+ {
+ private readonly Stream _baseStream;
+ private long _position;
+ private readonly byte[] _bVal;
+ private readonly int _dSize;
+ private int _bIndex;
+
+ // properties values are 0,1,3
+ public Delta(byte[] properties, Stream inputStream)
+ {
+ _dSize = properties[0] + 1;
+ _bVal = new byte[_dSize];
+
+ _baseStream = inputStream;
+ }
+
+ public override void Flush()
+ {
+ throw new NotImplementedException();
+ }
+
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ if (origin != SeekOrigin.Current)
+ throw new NotImplementedException();
+
+ const int bufferSize = 10240;
+ byte[] seekBuffer = new byte[bufferSize];
+ long seekToGo = offset;
+ while (seekToGo > 0)
+ {
+ long get = seekToGo > bufferSize ? bufferSize : seekToGo;
+ Read(seekBuffer, 0, (int)get);
+ seekToGo -= get;
+ }
+ return _position;
+ }
+
+ public override void SetLength(long value)
+ {
+ throw new NotImplementedException();
+ }
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ int read = _baseStream.Read(buffer, offset, count);
+
+ for (int i = 0; i < read; i++)
+ {
+ buffer[i] = _bVal[_bIndex] = (byte)(buffer[i] + _bVal[_bIndex]);
+ _bIndex = (_bIndex + 1) % _dSize;
+ }
+
+ _position += read;
+
+ return read;
+ }
+
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ throw new NotImplementedException();
+ }
+
+ public override bool CanRead
+ {
+ get { return true; }
+ }
+
+ public override bool CanSeek
+ {
+ get { return false; }
+ }
+
+ public override bool CanWrite
+ {
+ get { return false; }
+ }
+ public override long Length
+ {
+ get { return _baseStream.Length; }
+ }
+ public override long Position
+ {
+ get
+ {
+ return _position;
+ }
+ set
+ {
+ throw new NotImplementedException();
+ }
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/Filters/Filter.cs b/SabreTools.Library/External/Compress/SevenZip/Filters/Filter.cs
new file mode 100644
index 00000000..cd1f80d9
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Filters/Filter.cs
@@ -0,0 +1,171 @@
+using System;
+using System.IO;
+
+namespace Compress.SevenZip.Filters
+{
+ public abstract class Filter : Stream
+ {
+ protected bool isEncoder;
+ protected Stream baseStream;
+
+ private byte[] tail;
+ private byte[] window;
+ private int transformed = 0;
+ private int read = 0;
+ private bool endReached = false;
+ private long position = 0;
+
+ protected Filter(bool isEncoder, Stream baseStream, int lookahead)
+ {
+ this.isEncoder = isEncoder;
+ this.baseStream = baseStream;
+ tail = new byte[lookahead - 1];
+ window = new byte[tail.Length * 2];
+ }
+
+ public Stream BaseStream
+ { get { return baseStream; } }
+
+ public override bool CanRead
+ {
+ get { return !isEncoder; }
+ }
+
+ public override bool CanSeek
+ {
+ get { return false; }
+ }
+
+ public override bool CanWrite
+ {
+ get { return isEncoder; }
+ }
+
+ public override void Flush()
+ {
+ throw new NotImplementedException();
+ }
+
+ public override long Length
+ {
+ get { return baseStream.Length; }
+ }
+
+ public override long Position
+ {
+ get
+ {
+ return position;
+ }
+ set
+ {
+ throw new NotImplementedException();
+ }
+ }
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ int size = 0;
+
+ if (transformed > 0)
+ {
+ int copySize = transformed;
+ if (copySize > count)
+ copySize = count;
+ Buffer.BlockCopy(tail, 0, buffer, offset, copySize);
+ transformed -= copySize;
+ read -= copySize;
+ offset += copySize;
+ count -= copySize;
+ size += copySize;
+ Buffer.BlockCopy(tail, copySize, tail, 0, read);
+ }
+ if (count == 0)
+ {
+ position += size;
+ return size;
+ }
+
+ int inSize = read;
+ if (inSize > count)
+ inSize = count;
+ Buffer.BlockCopy(tail, 0, buffer, offset, inSize);
+ read -= inSize;
+ Buffer.BlockCopy(tail, inSize, tail, 0, read);
+ while (!endReached && inSize < count)
+ {
+ int baseRead = baseStream.Read(buffer, offset + inSize, count - inSize);
+ inSize += baseRead;
+ if (baseRead == 0)
+ endReached = true;
+ }
+ while (!endReached && read < tail.Length)
+ {
+ int baseRead = baseStream.Read(tail, read, tail.Length - read);
+ read += baseRead;
+ if (baseRead == 0)
+ endReached = true;
+ }
+
+ if (inSize > tail.Length)
+ {
+ transformed = Transform(buffer, offset, inSize);
+ offset += transformed;
+ count -= transformed;
+ size += transformed;
+ inSize -= transformed;
+ transformed = 0;
+ }
+
+ if (count == 0)
+ {
+ position += size;
+ return size;
+ }
+
+ Buffer.BlockCopy(buffer, offset, window, 0, inSize);
+ Buffer.BlockCopy(tail, 0, window, inSize, read);
+ if (inSize + read > tail.Length)
+ transformed = Transform(window, 0, inSize + read);
+ else
+ transformed = inSize + read;
+ Buffer.BlockCopy(window, 0, buffer, offset, inSize);
+ Buffer.BlockCopy(window, inSize, tail, 0, read);
+ size += inSize;
+ transformed -= inSize;
+
+ position += size;
+ return size;
+ }
+
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ if (origin != SeekOrigin.Current)
+ throw new NotImplementedException();
+
+ const int bufferSize = 10240;
+ byte[] seekBuffer = new byte[bufferSize];
+ long seekToGo = offset;
+ while (seekToGo > 0)
+ {
+ long get = seekToGo > bufferSize ? bufferSize : seekToGo;
+ Read(seekBuffer, 0, (int) get);
+ seekToGo -= get;
+ }
+ return position;
+ }
+
+ public override void SetLength(long value)
+ {
+ throw new NotImplementedException();
+ }
+
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ Transform(buffer, offset, count);
+ baseStream.Write(buffer, offset, count);
+ }
+
+ protected abstract int Transform(byte[] buffer, int offset, int count);
+ }
+}
diff --git a/SabreTools.Library/External/Compress/SevenZip/SevenZip.cs b/SabreTools.Library/External/Compress/SevenZip/SevenZip.cs
new file mode 100644
index 00000000..d5693afb
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/SevenZip.cs
@@ -0,0 +1,1082 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.IO;
+using System.Text;
+using Compress.SevenZip.Compress.BZip2;
+using Compress.SevenZip.Compress.LZMA;
+using Compress.SevenZip.Compress.PPmd;
+using Compress.SevenZip.Filters;
+using Compress.SevenZip.Structure;
+using Compress.Utils;
+using FileInfo = RVIO.FileInfo;
+using FileStream = RVIO.FileStream;
+
+namespace Compress.SevenZip
+{
+ public class SevenZ : ICompress
+ {
+ private List _localFiles = new List();
+
+ private FileInfo _zipFileInfo;
+
+ private Stream _zipFs;
+
+ private SignatureHeader _signatureHeader;
+
+ private bool _compressed = true;
+
+
+ private long _baseOffset;
+
+ public string ZipFilename => _zipFileInfo != null ? _zipFileInfo.FullName : "";
+
+ public long TimeStamp => _zipFileInfo?.LastWriteTime ?? 0;
+
+ public ZipOpenType ZipOpen { get; private set; }
+ public ZipStatus ZipStatus { get; private set; }
+
+ public int LocalFilesCount()
+ {
+ return _localFiles.Count;
+ }
+
+ public string Filename(int i)
+ {
+ return _localFiles[i].FileName;
+ }
+
+ public ulong? LocalHeader(int i)
+ {
+ return 0;
+ }
+
+ public ulong UncompressedSize(int i)
+ {
+ return _localFiles[i].UncompressedSize;
+ }
+
+ public int StreamIndex(int i)
+ {
+ return _localFiles[i].StreamIndex;
+
+ }
+
+ public ZipReturn FileStatus(int i)
+ {
+ return _localFiles[i].FileStatus;
+ }
+
+ public byte[] CRC32(int i)
+ {
+ return _localFiles[i].CRC;
+ }
+
+ public void ZipFileCloseFailed()
+ {
+ switch (ZipOpen)
+ {
+ case ZipOpenType.Closed:
+ return;
+ case ZipOpenType.OpenRead:
+ ZipFileCloseReadStream();
+ if (_zipFs != null)
+ {
+ _zipFs.Close();
+ _zipFs.Dispose();
+ }
+ break;
+ case ZipOpenType.OpenWrite:
+ _zipFs.Flush();
+ _zipFs.Close();
+ _zipFs.Dispose();
+ if (_zipFileInfo != null)
+ RVIO.File.Delete(_zipFileInfo.FullName);
+ _zipFileInfo = null;
+ break;
+ }
+
+ ZipOpen = ZipOpenType.Closed;
+ }
+
+ public bool IsDirectory(int i)
+ {
+ return _localFiles[i].IsDirectory;
+ }
+
+
+ public void ZipFileAddDirectory(string filename)
+ {
+ string fName = filename;
+ if (fName.Substring(fName.Length - 1, 1) == @"/")
+ fName = fName.Substring(0, fName.Length - 1);
+
+ LocalFile lf = new LocalFile
+ {
+ FileName = fName,
+ UncompressedSize = 0,
+ IsDirectory = true,
+ StreamOffset = 0
+ };
+ _localFiles.Add(lf);
+ }
+
+ private class LocalFile
+ {
+ public string FileName;
+ public ulong UncompressedSize;
+ public bool IsDirectory;
+ public byte[] CRC;
+ public int StreamIndex;
+ public ulong StreamOffset;
+ public ZipReturn FileStatus = ZipReturn.ZipUntested;
+ }
+
+ #region open 7z files
+
+ public ZipReturn ZipFileOpen(string filename, long timestamp, bool readHeaders)
+ {
+ ZipFileClose();
+ Debug.WriteLine(filename);
+ #region open file stream
+
+ try
+ {
+ if (!RVIO.File.Exists(filename))
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorFileNotFound;
+ }
+ _zipFileInfo = new FileInfo(filename);
+ if ((timestamp != -1) && (_zipFileInfo.LastWriteTime != timestamp))
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorTimeStamp;
+ }
+ int errorCode = FileStream.OpenFileRead(filename, out _zipFs);
+ if (errorCode != 0)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+ }
+ catch (PathTooLongException)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipFileNameToLong;
+ }
+ catch (IOException)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+
+ #endregion
+
+ ZipOpen = ZipOpenType.OpenRead;
+ ZipStatus = ZipStatus.None;
+
+ return ZipFileReadHeaders();
+ }
+
+ public ZipReturn ZipFileOpen(Stream inStream)
+ {
+ ZipFileClose();
+ _zipFileInfo = null;
+ _zipFs = inStream;
+ ZipOpen = ZipOpenType.OpenRead;
+ ZipStatus = ZipStatus.None;
+ return ZipFileReadHeaders();
+ }
+
+ private ZipReturn ZipFileReadHeaders()
+ {
+ try
+ {
+ SignatureHeader signatureHeader = new SignatureHeader();
+ if (!signatureHeader.Read(_zipFs))
+ {
+ return ZipReturn.ZipSignatureError;
+ }
+
+ _baseOffset = _zipFs.Position;
+
+ //_zipFs.Seek(_baseOffset + (long)signatureHeader.NextHeaderOffset, SeekOrigin.Begin);
+ //byte[] mainHeader = new byte[signatureHeader.NextHeaderSize];
+ //_zipFs.Read(mainHeader, 0, (int)signatureHeader.NextHeaderSize);
+ //if (!CRC.VerifyDigest(signatureHeader.NextHeaderCRC, mainHeader, 0, (uint)signatureHeader.NextHeaderSize))
+ // return ZipReturn.Zip64EndOfCentralDirError;
+
+ if (signatureHeader.NextHeaderSize != 0)
+ {
+ _zipFs.Seek(_baseOffset + (long)signatureHeader.NextHeaderOffset, SeekOrigin.Begin);
+ ZipReturn zr = Header.ReadHeaderOrPackedHeader(_zipFs, _baseOffset, out _header);
+ if (zr != ZipReturn.ZipGood)
+ {
+ return zr;
+ }
+ }
+
+ _zipFs.Seek(_baseOffset + (long)(signatureHeader.NextHeaderOffset + signatureHeader.NextHeaderSize), SeekOrigin.Begin);
+
+ ZipStatus = ZipStatus.None;
+
+ ZipStatus |= IsRomVault7Z() ? ZipStatus.TrrntZip : ZipStatus.None;
+ ZipStatus |= Istorrent7Z() ? ZipStatus.Trrnt7Zip : ZipStatus.None;
+ PopulateLocalFiles(out _localFiles);
+
+ return ZipReturn.ZipGood;
+ }
+ catch
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorReadingFile;
+ }
+ }
+
+
+ private void PopulateLocalFiles(out List localFiles)
+ {
+ int emptyFileIndex = 0;
+ int folderIndex = 0;
+ int unpackedStreamsIndex = 0;
+ ulong streamOffset = 0;
+ localFiles = new List();
+
+ if (_header == null)
+ return;
+
+ for (int i = 0; i < _header.FileInfo.Names.Length; i++)
+ {
+ LocalFile lf = new LocalFile { FileName = _header.FileInfo.Names[i] };
+
+ if ((_header.FileInfo.EmptyStreamFlags == null) || !_header.FileInfo.EmptyStreamFlags[i])
+ {
+ lf.StreamIndex = folderIndex;
+ lf.StreamOffset = streamOffset;
+ lf.UncompressedSize = _header.StreamsInfo.Folders[folderIndex].UnpackedStreamInfo[unpackedStreamsIndex].UnpackedSize;
+ lf.CRC = Util.uinttobytes(_header.StreamsInfo.Folders[folderIndex].UnpackedStreamInfo[unpackedStreamsIndex].Crc);
+
+ streamOffset += lf.UncompressedSize;
+ unpackedStreamsIndex++;
+
+ if (unpackedStreamsIndex >= _header.StreamsInfo.Folders[folderIndex].UnpackedStreamInfo.Length)
+ {
+ folderIndex++;
+ unpackedStreamsIndex = 0;
+ streamOffset = 0;
+ }
+ }
+ else
+ {
+ lf.UncompressedSize = 0;
+ lf.CRC = new byte[] { 0, 0, 0, 0 };
+ lf.IsDirectory = (_header.FileInfo.EmptyFileFlags == null) || !_header.FileInfo.EmptyFileFlags[emptyFileIndex++];
+
+ if (lf.IsDirectory)
+ {
+ if (lf.FileName.Substring(lf.FileName.Length - 1, 1) != "/")
+ {
+ lf.FileName += "/";
+ }
+ }
+ }
+
+ localFiles.Add(lf);
+ }
+ }
+
+
+ public void ZipFileClose()
+ {
+ switch (ZipOpen)
+ {
+ case ZipOpenType.Closed:
+ return;
+ case ZipOpenType.OpenRead:
+ ZipFileCloseReadStream();
+ if (_zipFs != null)
+ {
+ _zipFs.Close();
+ _zipFs.Dispose();
+ }
+ ZipOpen = ZipOpenType.Closed;
+ return;
+ case ZipOpenType.OpenWrite:
+ CloseWriting7Zip();
+ if (_zipFileInfo != null)
+ _zipFileInfo = new FileInfo(_zipFileInfo.FullName);
+ break;
+ }
+
+ ZipOpen = ZipOpenType.Closed;
+ }
+
+ private Header _header;
+
+ public StringBuilder HeaderReport()
+ {
+ StringBuilder sb = new StringBuilder();
+
+ if (_header == null)
+ {
+ sb.AppendLine("Null Header");
+ return sb;
+ }
+
+ _header.Report(ref sb);
+
+ return sb;
+ }
+
+ // not finalized yet, so do not use
+ private void WriteRomVault7Zip(BinaryWriter bw, ulong headerPos, ulong headerLength, uint headerCRC)
+ {
+ const string sig = "RomVault7Z01";
+ byte[] RV7Zid = Util.Enc.GetBytes(sig);
+
+ // RomVault 7Zip torrent header
+ // 12 bytes : RomVault7Zip
+ // 4 bytes : HeaderCRC
+ // 8 bytes : HeaderPos
+ // 8 bytes : HeaderLength
+
+ bw.Write(RV7Zid);
+ bw.Write(headerCRC);
+ bw.Write(headerPos);
+ bw.Write(headerLength);
+
+ ZipStatus = ZipStatus.TrrntZip;
+ }
+
+
+ private bool IsRomVault7Z()
+ {
+ long length = _zipFs.Length;
+ if (length < 32)
+ {
+ return false;
+ }
+
+ _zipFs.Seek(length - 32, SeekOrigin.Begin);
+
+ const string sig = "RomVault7Z01";
+ byte[] rv7Zid = Util.Enc.GetBytes(sig);
+
+ byte[] header = new byte[12];
+ _zipFs.Read(header, 0, 12);
+ for (int i = 0; i < 12; i++)
+ {
+ if (header[i] != rv7Zid[i])
+ {
+ return false;
+ }
+ }
+
+ uint headerCRC;
+ ulong headerOffset;
+ ulong headerSize;
+ using (BinaryReader br = new BinaryReader(_zipFs, Encoding.UTF8, true))
+ {
+ headerCRC = br.ReadUInt32();
+ headerOffset = br.ReadUInt64();
+ headerSize = br.ReadUInt64();
+ }
+
+ if ((ulong)length < headerOffset)
+ {
+ return false;
+ }
+
+ _zipFs.Seek((long)headerOffset, SeekOrigin.Begin);
+
+ byte[] mainHeader = new byte[headerSize];
+ int bytesread = _zipFs.Read(mainHeader, 0, (int)headerSize);
+
+ return ((ulong)bytesread == headerSize) &&
+ Utils.CRC.VerifyDigest(headerCRC, mainHeader, 0, (uint)headerSize);
+
+ }
+
+ private bool Istorrent7Z()
+ {
+ const int crcsz = 128;
+ const int t7ZsigSize = 16 + 1 + 9 + 4 + 4;
+ byte[] kSignature = { (byte)'7', (byte)'z', 0xBC, 0xAF, 0x27, 0x1C };
+ int kSignatureSize = kSignature.Length;
+ const string sig = "\xa9\x9f\xd1\x57\x08\xa9\xd7\xea\x29\x64\xb2\x36\x1b\x83\x52\x33\x01torrent7z_0.9beta";
+ byte[] t7Zid = Util.Enc.GetBytes(sig);
+ int t7ZidSize = t7Zid.Length;
+
+ const int tmpbufsize = 256 + t7ZsigSize + 8 + 4;
+ byte[] buffer = new byte[tmpbufsize];
+
+ // read fist 128 bytes, pad with zeros if less bytes
+ int bufferPos = 0;
+ _zipFs.Seek(0, SeekOrigin.Begin);
+ int ar = _zipFs.Read(buffer, bufferPos, crcsz);
+ if (ar < crcsz)
+ {
+ Util.memset(buffer, bufferPos + ar, 0, crcsz - ar);
+ }
+ bufferPos = crcsz;
+
+ long foffs = _zipFs.Length;
+ int endReadLength = crcsz + t7ZsigSize + 4;
+ foffs = foffs < endReadLength ? 0 : foffs - endReadLength;
+
+ _zipFs.Seek(foffs, SeekOrigin.Begin);
+
+ ar = _zipFs.Read(buffer, bufferPos, endReadLength);
+ if (ar < endReadLength)
+ {
+ if (ar >= t7ZsigSize + 4)
+ {
+ ar -= t7ZsigSize + 4;
+ }
+ if (ar < kSignatureSize)
+ {
+ ar = kSignatureSize;
+ }
+ Util.memset(buffer, bufferPos + ar, 0, crcsz - ar);
+ Util.memcpyr(buffer, crcsz * 2 + 8, buffer, bufferPos + ar, t7ZsigSize + 4);
+ }
+ else
+ {
+ Util.memcpyr(buffer, crcsz * 2 + 8, buffer, crcsz * 2, t7ZsigSize + 4);
+ }
+
+ foffs = _zipFs.Length;
+ foffs -= t7ZsigSize + 4;
+
+ //memcpy(buffer, crcsz * 2, &foffs, 8);
+ buffer[crcsz * 2 + 0] = (byte)((foffs >> 0) & 0xff);
+ buffer[crcsz * 2 + 1] = (byte)((foffs >> 8) & 0xff);
+ buffer[crcsz * 2 + 2] = (byte)((foffs >> 16) & 0xff);
+ buffer[crcsz * 2 + 3] = (byte)((foffs >> 24) & 0xff);
+ buffer[crcsz * 2 + 4] = 0;
+ buffer[crcsz * 2 + 5] = 0;
+ buffer[crcsz * 2 + 6] = 0;
+ buffer[crcsz * 2 + 7] = 0;
+
+ if (Util.memcmp(buffer, 0, kSignature, kSignatureSize))
+ {
+ t7Zid[16] = buffer[crcsz * 2 + 4 + 8 + 16];
+ if (Util.memcmp(buffer, crcsz * 2 + 4 + 8, t7Zid, t7ZidSize))
+ {
+ uint inCrc32 = (uint)(buffer[crcsz * 2 + 8 + 0] +
+ (buffer[crcsz * 2 + 8 + 1] << 8) +
+ (buffer[crcsz * 2 + 8 + 2] << 16) +
+ (buffer[crcsz * 2 + 8 + 3] << 24));
+
+ buffer[crcsz * 2 + 8 + 0] = 0xff;
+ buffer[crcsz * 2 + 8 + 1] = 0xff;
+ buffer[crcsz * 2 + 8 + 2] = 0xff;
+ buffer[crcsz * 2 + 8 + 3] = 0xff;
+
+ uint calcCrc32 = Utils.CRC.CalculateDigest(buffer, 0, crcsz * 2 + 8 + t7ZsigSize + 4);
+
+ if (inCrc32 == calcCrc32)
+ {
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ #endregion
+
+ #region read 7z file
+
+ private int _streamIndex = -1;
+ private Stream _stream;
+
+ public ZipReturn ZipFileOpenReadStream(int index, out Stream stream, out ulong unCompressedSize)
+ {
+ Debug.WriteLine("Opening File " + _localFiles[index].FileName);
+ stream = null;
+ unCompressedSize = 0;
+
+ try
+ {
+ if (ZipOpen != ZipOpenType.OpenRead)
+ {
+ return ZipReturn.ZipErrorGettingDataStream;
+ }
+
+ if (IsDirectory(index))
+ {
+ return ZipReturn.ZipTryingToAccessADirectory;
+ }
+
+ unCompressedSize = _localFiles[index].UncompressedSize;
+ int thisStreamIndex = _localFiles[index].StreamIndex;
+ ulong streamOffset = _localFiles[index].StreamOffset;
+
+ if ((thisStreamIndex == _streamIndex) && (streamOffset >= (ulong)_stream.Position))
+ {
+ stream = _stream;
+ stream.Seek((long)_localFiles[index].StreamOffset - _stream.Position, SeekOrigin.Current);
+ return ZipReturn.ZipGood;
+ }
+
+ ZipFileCloseReadStream();
+ _streamIndex = thisStreamIndex;
+
+
+ Folder folder = _header.StreamsInfo.Folders[_streamIndex];
+
+ // first make the List of Decompressors streams
+ int codersNeeded = folder.Coders.Length;
+
+ List allInputStreams = new List();
+ for (int i = 0; i < codersNeeded; i++)
+ {
+ folder.Coders[i].DecoderStream = null;
+ allInputStreams.AddRange(folder.Coders[i].InputStreamsSourceInfo);
+ }
+
+ // now use the binding pairs to links the outputs to the inputs
+ int bindPairsCount = folder.BindPairs.Length;
+ for (int i = 0; i < bindPairsCount; i++)
+ {
+ allInputStreams[(int)folder.BindPairs[i].InIndex].InStreamSource = InStreamSource.CompStreamOutput;
+ allInputStreams[(int)folder.BindPairs[i].InIndex].InStreamIndex = folder.BindPairs[i].OutIndex;
+ folder.Coders[(int)folder.BindPairs[i].OutIndex].OutputUsedInternally = true;
+ }
+
+ // next use the stream indises to connect the remaining input streams from the sourcefile
+ int packedStreamsCount = folder.PackedStreamIndices.Length;
+ for (int i = 0; i < packedStreamsCount; i++)
+ {
+ ulong packedStreamIndex = (ulong)i + folder.PackedStreamIndexBase;
+
+ // create and open the source file stream if needed
+ if (_header.StreamsInfo.PackedStreams[packedStreamIndex].PackedStream == null)
+ {
+ _header.StreamsInfo.PackedStreams[packedStreamIndex].PackedStream = CloneStream(_zipFs);
+ }
+ _header.StreamsInfo.PackedStreams[packedStreamIndex].PackedStream.Seek(
+ _baseOffset + (long)_header.StreamsInfo.PackedStreams[packedStreamIndex].StreamPosition, SeekOrigin.Begin);
+
+
+ allInputStreams[(int)folder.PackedStreamIndices[i]].InStreamSource = InStreamSource.FileStream;
+ allInputStreams[(int)folder.PackedStreamIndices[i]].InStreamIndex = packedStreamIndex;
+ }
+
+ List inputCoders = new List();
+
+ bool allCodersComplete = false;
+ while (!allCodersComplete)
+ {
+ allCodersComplete = true;
+ for (int i = 0; i < codersNeeded; i++)
+ {
+ Coder coder = folder.Coders[i];
+
+ // check is decoder already processed
+ if (coder.DecoderStream != null)
+ {
+ continue;
+ }
+
+ inputCoders.Clear();
+ for (int j = 0; j < (int)coder.NumInStreams; j++)
+ {
+ if (coder.InputStreamsSourceInfo[j].InStreamSource == InStreamSource.FileStream)
+ {
+ inputCoders.Add(_header.StreamsInfo.PackedStreams[coder.InputStreamsSourceInfo[j].InStreamIndex].PackedStream);
+ }
+ else if (coder.InputStreamsSourceInfo[j].InStreamSource == InStreamSource.CompStreamOutput)
+ {
+ if (folder.Coders[coder.InputStreamsSourceInfo[j].InStreamIndex].DecoderStream == null)
+ {
+ break;
+ }
+ inputCoders.Add(folder.Coders[coder.InputStreamsSourceInfo[j].InStreamIndex].DecoderStream);
+ }
+ else
+ {
+ // unknown input type so error
+ return ZipReturn.ZipDecodeError;
+ }
+ }
+
+ if (inputCoders.Count == (int)coder.NumInStreams)
+ {
+ // all inputs streams are available to make the decoder stream
+ switch (coder.DecoderType)
+ {
+ case DecompressType.Stored:
+ coder.DecoderStream = inputCoders[0];
+ break;
+ case DecompressType.Delta:
+ coder.DecoderStream = new Delta(folder.Coders[i].Properties, inputCoders[0]);
+ break;
+ case DecompressType.LZMA:
+ coder.DecoderStream = new LzmaStream(folder.Coders[i].Properties, inputCoders[0]);
+ break;
+ case DecompressType.LZMA2:
+ coder.DecoderStream = new LzmaStream(folder.Coders[i].Properties, inputCoders[0]);
+ break;
+ case DecompressType.PPMd:
+ coder.DecoderStream = new PpmdStream(new PpmdProperties(folder.Coders[i].Properties), inputCoders[0], false);
+ break;
+ case DecompressType.BZip2:
+ coder.DecoderStream = new CBZip2InputStream(inputCoders[0], false);
+ break;
+ case DecompressType.BCJ:
+ coder.DecoderStream = new BCJFilter(false, inputCoders[0]);
+ break;
+ case DecompressType.BCJ2:
+ coder.DecoderStream = new BCJ2Filter(inputCoders[0], inputCoders[1], inputCoders[2], inputCoders[3]);
+ break;
+ default:
+ return ZipReturn.ZipDecodeError;
+ }
+ }
+
+ // if skipped a coder need to loop round again
+ if (coder.DecoderStream == null)
+ {
+ allCodersComplete = false;
+ }
+ }
+ }
+ // find the final output stream and return it.
+ int outputStream = -1;
+ for (int i = 0; i < codersNeeded; i++)
+ {
+ Coder coder = folder.Coders[i];
+ if (!coder.OutputUsedInternally)
+ {
+ outputStream = i;
+ }
+ }
+
+ stream = folder.Coders[outputStream].DecoderStream;
+ stream.Seek((long)_localFiles[index].StreamOffset, SeekOrigin.Current);
+
+ _stream = stream;
+
+ return ZipReturn.ZipGood;
+
+ }
+ catch (Exception e)
+ {
+ return ZipReturn.ZipErrorGettingDataStream;
+ }
+
+ }
+
+ private Stream CloneStream(Stream s)
+ {
+ switch (s)
+ {
+ case System.IO.FileStream _:
+ int errorCode = FileStream.OpenFileRead(ZipFilename, out Stream streamOut);
+ return errorCode != 0 ? null : streamOut;
+
+ case MemoryStream memStream:
+ long pos = memStream.Position;
+ memStream.Position = 0;
+ byte[] newStream = new byte[memStream.Length];
+ memStream.Read(newStream, 0, (int)memStream.Length);
+ MemoryStream ret = new MemoryStream(newStream, false);
+ memStream.Position = pos;
+ return ret;
+ }
+
+ return null;
+ }
+
+ public ZipReturn ZipFileCloseReadStream()
+ {
+ if (_streamIndex != -1)
+ {
+ Folder folder = _header.StreamsInfo.Folders[_streamIndex];
+
+ foreach (Coder c in folder.Coders)
+ {
+ Stream ds = c?.DecoderStream;
+ if (ds == null)
+ {
+ continue;
+ }
+ ds.Close();
+ ds.Dispose();
+ c.DecoderStream = null;
+ }
+ }
+ _streamIndex = -1;
+
+ if (_header?.StreamsInfo != null)
+ {
+ foreach (PackedStreamInfo psi in _header.StreamsInfo.PackedStreams)
+ {
+ if (psi?.PackedStream == null)
+ {
+ continue;
+ }
+ psi.PackedStream.Close();
+ psi.PackedStream.Dispose();
+ psi.PackedStream = null;
+ }
+ }
+ return ZipReturn.ZipGood;
+ }
+
+ #endregion
+
+ #region write 7z File
+
+ private LzmaStream _lzmaStream;
+ private ulong _packStreamStart;
+ private ulong _packStreamSize;
+ private ulong _unpackedStreamSize;
+ private byte[] _codeMSbytes;
+
+
+ public void ZipFileAddDirectory()
+ {
+ // do nothing here for 7zip
+ }
+
+ public ZipReturn ZipFileCreate(string newFilename)
+ {
+ return ZipFileCreate(newFilename, true);
+ }
+
+
+ public ZipReturn ZipFileCreateFromUncompressedSize(string newFilename, ulong unCompressedSize)
+ {
+ return ZipFileCreate(newFilename,true, GetDictionarySizeFromUncompressedSize(unCompressedSize));
+ }
+
+ public ZipReturn ZipFileCreate(string newFilename, bool compressOutput, int dictionarySize = 1 << 24, int numFastBytes = 64)
+ {
+ if (ZipOpen != ZipOpenType.Closed)
+ {
+ return ZipReturn.ZipFileAlreadyOpen;
+ }
+
+ DirUtil.CreateDirForFile(newFilename);
+ _zipFileInfo = new FileInfo(newFilename);
+
+ int errorCode = FileStream.OpenFileWrite(newFilename, out _zipFs);
+ if (errorCode != 0)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+ ZipOpen = ZipOpenType.OpenWrite;
+
+ _signatureHeader = new SignatureHeader();
+ _header = new Header();
+
+ using (BinaryWriter bw = new BinaryWriter(_zipFs, Encoding.UTF8, true))
+ {
+ _signatureHeader.Write(bw);
+ }
+
+ _compressed = compressOutput;
+
+ _unpackedStreamSize = 0;
+ if (_compressed)
+ {
+ LzmaEncoderProperties ep = new LzmaEncoderProperties(true, dictionarySize, numFastBytes);
+ _lzmaStream = new LzmaStream(ep, false, _zipFs);
+ _codeMSbytes = _lzmaStream.Properties;
+ _packStreamStart = (ulong)_zipFs.Position;
+ }
+
+ return ZipReturn.ZipGood;
+ }
+
+ public ZipReturn ZipFileOpenWriteStream(bool raw, bool trrntzip, string filename, ulong uncompressedSize, ushort compressionMethod, out Stream stream)
+ {
+ return ZipFileOpenWriteStream(filename, uncompressedSize, out stream);
+ }
+
+ private ZipReturn ZipFileOpenWriteStream(string filename, ulong uncompressedSize, out Stream stream)
+ {
+ LocalFile lf = new LocalFile
+ {
+ FileName = filename,
+ UncompressedSize = uncompressedSize,
+ StreamOffset = (ulong)(_zipFs.Position - _signatureHeader.BaseOffset)
+ };
+
+ _unpackedStreamSize += uncompressedSize;
+
+ _localFiles.Add(lf);
+ stream = _compressed ? _lzmaStream : _zipFs;
+ return ZipReturn.ZipGood;
+ }
+
+
+ public ZipReturn ZipFileCloseWriteStream(byte[] crc32)
+ {
+ _localFiles[_localFiles.Count - 1].CRC = new[] { crc32[3], crc32[2], crc32[1], crc32[0] };
+ return ZipReturn.ZipGood;
+ }
+
+
+ private void CloseWriting7Zip()
+ {
+ if (_compressed)
+ {
+ _lzmaStream.Close();
+ }
+
+ _packStreamSize = (ulong)_zipFs.Position - _packStreamStart;
+
+ Create7ZStructure();
+
+ byte[] newHeaderByte;
+ using (Stream headerMem = new MemoryStream())
+ {
+ using (BinaryWriter headerBw = new BinaryWriter(headerMem, Encoding.UTF8, true))
+ {
+ _header.WriteHeader(headerBw);
+ newHeaderByte = new byte[headerMem.Length];
+ headerMem.Position = 0;
+ headerMem.Read(newHeaderByte, 0, newHeaderByte.Length);
+ }
+ }
+
+ uint mainHeaderCRC = Utils.CRC.CalculateDigest(newHeaderByte, 0, (uint)newHeaderByte.Length);
+
+ ulong headerpos = (ulong)_zipFs.Position;
+ _zipFs.Write(newHeaderByte,0,newHeaderByte.Length);
+ using (BinaryWriter bw = new BinaryWriter(_zipFs, Encoding.UTF8, true))
+ {
+ _signatureHeader.WriteFinal(bw, headerpos, (ulong)newHeaderByte.Length, mainHeaderCRC);
+ WriteRomVault7Zip(bw, headerpos, (ulong)newHeaderByte.Length, mainHeaderCRC);
+ }
+ _zipFs.Flush();
+ _zipFs.Close();
+ _zipFs.Dispose();
+ }
+
+
+ private void Create7ZStructure()
+ {
+ int fileCount = _localFiles.Count;
+
+ //FileInfo
+ _header.FileInfo = new Structure.FileInfo
+ {
+ Names = new string[fileCount]
+ };
+
+ ulong emptyStreamCount = 0;
+ ulong emptyFileCount = 0;
+ for (int i = 0; i < fileCount; i++)
+ {
+ _header.FileInfo.Names[i] = _localFiles[i].FileName;
+
+ if (_localFiles[i].UncompressedSize != 0)
+ {
+ continue;
+ }
+
+ if (!_localFiles[i].IsDirectory)
+ {
+ emptyFileCount += 1;
+ }
+
+ emptyStreamCount += 1;
+ }
+ ulong outFileCount = (ulong)_localFiles.Count - emptyStreamCount;
+
+ _header.FileInfo.EmptyStreamFlags = null;
+ _header.FileInfo.EmptyFileFlags = null;
+ _header.FileInfo.Attributes = null;
+
+ if (emptyStreamCount > 0)
+ {
+ if (emptyStreamCount != emptyFileCount) //then we found directories and need to set the attributes
+ {
+ _header.FileInfo.Attributes = new uint[fileCount];
+ }
+
+ if (emptyFileCount > 0)
+ {
+ _header.FileInfo.EmptyFileFlags = new bool[emptyStreamCount];
+ }
+
+ emptyStreamCount = 0;
+ _header.FileInfo.EmptyStreamFlags = new bool[fileCount];
+ for (int i = 0; i < fileCount; i++)
+ {
+ if (_localFiles[i].UncompressedSize != 0)
+ {
+ continue;
+ }
+
+ if (_localFiles[i].IsDirectory)
+ {
+ if (_header.FileInfo.Attributes != null)
+ _header.FileInfo.Attributes[i] = 0x10; // set attributes to directory
+ }
+ else
+ {
+ if (_header.FileInfo.EmptyFileFlags != null)
+ _header.FileInfo.EmptyFileFlags[emptyStreamCount] = true; // set empty file flag
+ }
+
+ _header.FileInfo.EmptyStreamFlags[i] = true;
+ emptyStreamCount += 1;
+ }
+ }
+
+
+ //StreamsInfo
+ _header.StreamsInfo = new StreamsInfo { PackPosition = 0 };
+
+ //StreamsInfo.PackedStreamsInfo
+ if (_compressed)
+ {
+ _header.StreamsInfo.PackedStreams = new PackedStreamInfo[1];
+ _header.StreamsInfo.PackedStreams[0] = new PackedStreamInfo { PackedSize = _packStreamSize };
+ }
+ else
+ {
+ _header.StreamsInfo.PackedStreams = new PackedStreamInfo[outFileCount];
+ int fileIndex = 0;
+ for (int i = 0; i < fileCount; i++)
+ {
+ if (_localFiles[i].UncompressedSize == 0)
+ {
+ continue;
+ }
+ _header.StreamsInfo.PackedStreams[fileIndex++] = new PackedStreamInfo { PackedSize = _localFiles[i].UncompressedSize };
+ }
+ }
+ //StreamsInfo.PackedStreamsInfo, no CRC or StreamPosition required
+
+ if (_compressed)
+ {
+ //StreamsInfo.Folders
+ _header.StreamsInfo.Folders = new Folder[1];
+
+ Folder folder = new Folder { Coders = new Coder[1] };
+
+ //StreamsInfo.Folders.Coder
+ // flags 0x23
+ folder.Coders[0] = new Coder
+ {
+ Method = new byte[] { 3, 1, 1 },
+ NumInStreams = 1,
+ NumOutStreams = 1,
+ Properties = _codeMSbytes
+ };
+ folder.BindPairs = null;
+ folder.PackedStreamIndices = new[] { (ulong)0 };
+ folder.UnpackedStreamSizes = new[] { _unpackedStreamSize };
+ folder.UnpackCRC = null;
+
+ folder.UnpackedStreamInfo = new UnpackedStreamInfo[outFileCount];
+ int fileIndex = 0;
+ for (int i = 0; i < fileCount; i++)
+ {
+ if (_localFiles[i].UncompressedSize == 0)
+ {
+ continue;
+ }
+ UnpackedStreamInfo unpackedStreamInfo = new UnpackedStreamInfo
+ {
+ UnpackedSize = _localFiles[i].UncompressedSize,
+ Crc = Util.bytestouint(_localFiles[i].CRC)
+ };
+ folder.UnpackedStreamInfo[fileIndex++] = unpackedStreamInfo;
+ }
+ _header.StreamsInfo.Folders[0] = folder;
+ }
+ else
+ {
+ _header.StreamsInfo.Folders = new Folder[outFileCount];
+ int fileIndex = 0;
+ for (int i = 0; i < fileCount; i++)
+ {
+ if (_localFiles[i].UncompressedSize == 0)
+ {
+ continue;
+ }
+ Folder folder = new Folder { Coders = new Coder[1] };
+
+ //StreamsInfo.Folders.Coder
+ // flags 0x01
+ folder.Coders[0] = new Coder
+ {
+ Method = new byte[] { 0 },
+ NumInStreams = 1,
+ NumOutStreams = 1,
+ Properties = null
+ };
+
+ folder.BindPairs = null;
+ folder.PackedStreamIndices = new[] { (ulong)i };
+ folder.UnpackedStreamSizes = new[] { _localFiles[i].UncompressedSize };
+ folder.UnpackCRC = null;
+
+ folder.UnpackedStreamInfo = new UnpackedStreamInfo[1];
+ UnpackedStreamInfo unpackedStreamInfo = new UnpackedStreamInfo
+ {
+ UnpackedSize = _localFiles[i].UncompressedSize,
+ Crc = Util.bytestouint(_localFiles[i].CRC)
+ };
+ folder.UnpackedStreamInfo[0] = unpackedStreamInfo;
+
+ _header.StreamsInfo.Folders[fileIndex++] = folder;
+ }
+ }
+ }
+
+ #endregion
+
+
+ private static readonly int[] DictionarySizes =
+ {
+ 0x10000,
+ 0x18000,
+ 0x20000,
+ 0x30000,
+ 0x40000,
+ 0x60000,
+ 0x80000,
+ 0xc0000,
+
+ 0x100000,
+ 0x180000,
+ 0x200000,
+ 0x300000,
+ 0x400000,
+ 0x600000,
+ 0x800000,
+ 0xc00000,
+
+ 0x1000000,
+ 0x1800000,
+ 0x2000000,
+ 0x3000000,
+ 0x4000000,
+ 0x6000000
+ };
+
+
+ private static int GetDictionarySizeFromUncompressedSize(ulong unCompressedSize)
+ {
+ foreach (int v in DictionarySizes)
+ {
+ if ((ulong)v >= unCompressedSize)
+ return v;
+ }
+
+ return DictionarySizes[DictionarySizes.Length - 1];
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Structure/BindPair.cs b/SabreTools.Library/External/Compress/SevenZip/Structure/BindPair.cs
new file mode 100644
index 00000000..6ff38f17
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Structure/BindPair.cs
@@ -0,0 +1,30 @@
+using System.IO;
+using System.Text;
+
+namespace Compress.SevenZip.Structure
+{
+ public class BindPair
+ {
+ public ulong InIndex;
+ public ulong OutIndex;
+
+ public void Read(BinaryReader br)
+ {
+ InIndex = br.ReadEncodedUInt64();
+ OutIndex = br.ReadEncodedUInt64();
+ }
+
+ public void Write(BinaryWriter bw)
+ {
+ bw.WriteEncodedUInt64(InIndex);
+ bw.WriteEncodedUInt64(OutIndex);
+ }
+
+
+ public void Report(ref StringBuilder sb)
+ {
+ sb.AppendLine(" InIndex = " + InIndex);
+ sb.AppendLine(" OutIndex = " + OutIndex);
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Structure/Coder.cs b/SabreTools.Library/External/Compress/SevenZip/Structure/Coder.cs
new file mode 100644
index 00000000..9aabcc45
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Structure/Coder.cs
@@ -0,0 +1,159 @@
+using System;
+using System.IO;
+using System.Text;
+using Compress.Utils;
+
+namespace Compress.SevenZip.Structure
+{
+ public enum InStreamSource
+ {
+ Unknown,
+ FileStream,
+ CompStreamOutput
+ }
+
+
+ public class InStreamSourceInfo
+ {
+ public InStreamSource InStreamSource = InStreamSource.Unknown;
+ public ulong InStreamIndex;
+ }
+
+ public enum DecompressType
+ {
+ Unknown,
+ Stored,
+ Delta,
+ LZMA,
+ BCJ,
+ BCJ2,
+ PPMd,
+ BZip2,
+ LZMA2
+ }
+
+
+ public class Coder
+ {
+ public byte[] Method;
+ public ulong NumInStreams;
+ public ulong NumOutStreams;
+ public byte[] Properties;
+
+ /************Local Variables***********/
+ public DecompressType DecoderType;
+ public bool OutputUsedInternally = false;
+ public InStreamSourceInfo[] InputStreamsSourceInfo;
+ public Stream DecoderStream;
+
+ public void Read(BinaryReader br)
+ {
+ byte flags = br.ReadByte();
+ int decompressionMethodIdSize = flags & 0xf;
+ Method = br.ReadBytes(decompressionMethodIdSize);
+ if ((flags & 0x10) != 0)
+ {
+ NumInStreams = br.ReadEncodedUInt64();
+ NumOutStreams = br.ReadEncodedUInt64();
+ }
+ else
+ {
+ NumInStreams = 1;
+ NumOutStreams = 1;
+ }
+
+ if ((flags & 0x20) != 0)
+ {
+ ulong propSize = br.ReadEncodedUInt64();
+ Properties = br.ReadBytes((int)propSize);
+ }
+
+ if ((flags & 0x80) != 0)
+ {
+ throw new NotSupportedException("External flag");
+ }
+
+ if ((Method.Length == 1) && (Method[0] == 0))
+ {
+ DecoderType = DecompressType.Stored;
+ }
+ else if ((Method.Length == 1) && (Method[0] == 3))
+ {
+ DecoderType = DecompressType.Delta;
+ }
+ else if ((Method.Length == 3) && (Method[0] == 3) && (Method[1] == 1) && (Method[2] == 1))
+ {
+ DecoderType = DecompressType.LZMA;
+ }
+ else if ((Method.Length == 4) && (Method[0] == 3) && (Method[1] == 3) && (Method[2] == 1) &&
+ (Method[3] == 3))
+ {
+ DecoderType = DecompressType.BCJ;
+ }
+ else if ((Method.Length == 4) && (Method[0] == 3) && (Method[1] == 3) && (Method[2] == 1) &&
+ (Method[3] == 27))
+ {
+ DecoderType = DecompressType.BCJ2;
+ }
+ else if ((Method.Length == 3) && (Method[0] == 3) && (Method[1] == 4) && (Method[2] == 1))
+ {
+ DecoderType = DecompressType.PPMd;
+ }
+ else if ((Method.Length == 3) && (Method[0] == 4) && (Method[1] == 2) && (Method[2] == 2))
+ {
+ DecoderType = DecompressType.BZip2;
+ }
+ else if ((Method.Length == 1) && (Method[0] == 33))
+ {
+ DecoderType = DecompressType.LZMA2;
+ }
+
+ InputStreamsSourceInfo = new InStreamSourceInfo[NumInStreams];
+ for (uint i = 0; i < NumInStreams; i++)
+ {
+ InputStreamsSourceInfo[i] = new InStreamSourceInfo();
+ }
+ }
+
+ public void Write(BinaryWriter bw)
+ {
+ byte flags = (byte)Method.Length;
+ if ((NumInStreams != 1) || (NumOutStreams != 1))
+ {
+ flags = (byte)(flags | 0x10);
+ }
+
+ if ((Properties != null) && (Properties.Length > 0))
+ {
+ flags = (byte)(flags | 0x20);
+ }
+
+ bw.Write(flags);
+
+ bw.Write(Method);
+
+ if ((NumInStreams != 1) || (NumOutStreams != 1))
+ {
+ bw.WriteEncodedUInt64(NumInStreams);
+ bw.WriteEncodedUInt64(NumOutStreams);
+ }
+
+ if ((Properties != null) && (Properties.Length > 0))
+ {
+ bw.WriteEncodedUInt64((ulong)Properties.Length);
+ bw.Write(Properties);
+ }
+ }
+
+
+
+ public void Report(ref StringBuilder sb)
+ {
+ sb.AppendLine($" Method[] = {Method.ToArrayString()} : {DecoderType}");
+ sb.AppendLine($" NumInStreams = {NumInStreams}");
+ sb.AppendLine($" NumOutStreams = {NumOutStreams}");
+ sb.AppendLine($" Properties[] = {Properties.ToArrayString()}");
+ }
+
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Structure/FileInfo.cs b/SabreTools.Library/External/Compress/SevenZip/Structure/FileInfo.cs
new file mode 100644
index 00000000..2a6200b5
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Structure/FileInfo.cs
@@ -0,0 +1,140 @@
+using System;
+using System.IO;
+using System.Text;
+
+namespace Compress.SevenZip.Structure
+{
+ public class FileInfo
+ {
+ public string[] Names;
+ public bool[] EmptyStreamFlags;
+ public bool[] EmptyFileFlags;
+ public uint[] Attributes;
+
+ public void Read(BinaryReader br)
+ {
+ ulong size = br.ReadEncodedUInt64();
+ Names = new string[size];
+
+ ulong numEmptyFiles = 0;
+
+ for (;;)
+ {
+ HeaderProperty hp = (HeaderProperty) br.ReadByte();
+ if (hp == HeaderProperty.kEnd)
+ {
+ return;
+ }
+
+ ulong bytessize = br.ReadEncodedUInt64();
+ switch (hp)
+ {
+ case HeaderProperty.kName:
+ if (br.ReadByte() != 0)
+ {
+ throw new Exception("Cannot be external");
+ }
+
+ for (ulong i = 0; i < size; i++)
+ {
+ Names[i] = br.ReadName();
+ }
+
+ continue;
+
+ case HeaderProperty.kEmptyStream:
+ EmptyStreamFlags = Util.ReadBoolFlags(br, (ulong) Names.Length);
+ for (ulong i = 0; i < size; i++)
+ {
+ if (EmptyStreamFlags[i])
+ {
+ numEmptyFiles++;
+ }
+ }
+ continue;
+
+ case HeaderProperty.kEmptyFile:
+ EmptyFileFlags = Util.ReadBoolFlags(br, numEmptyFiles);
+ continue;
+
+ case HeaderProperty.kWinAttributes:
+ Attributes = Util.ReadUInt32Def(br, size);
+ continue;
+
+ // don't know what this is.
+ case HeaderProperty.kAnti:
+ br.ReadBytes((int)bytessize);
+ continue;
+
+ case HeaderProperty.kCreationTime:
+ case HeaderProperty.kLastAccessTime:
+ case HeaderProperty.kLastWriteTime:
+ br.ReadBytes((int) bytessize);
+ continue;
+
+ case HeaderProperty.kDummy:
+ br.ReadBytes((int) bytessize);
+ continue;
+
+ default:
+ throw new Exception(hp.ToString());
+ }
+ }
+ }
+
+ public void Write(BinaryWriter bw)
+ {
+ bw.Write((byte) HeaderProperty.kFilesInfo);
+ bw.WriteEncodedUInt64((ulong) Names.Length);
+
+
+ byte[] namebyte;
+ using (MemoryStream nameMem = new MemoryStream())
+ {
+ using (BinaryWriter nameBw = new BinaryWriter(nameMem,Encoding.UTF8,true))
+ {
+ nameBw.Write((byte) 0); //not external
+ foreach (string name in Names)
+ {
+ nameBw.WriteName(name);
+ }
+
+ namebyte = new byte[nameMem.Length];
+ nameMem.Position = 0;
+ nameMem.Read(namebyte, 0, namebyte.Length);
+ }
+ }
+
+ bw.Write((byte) HeaderProperty.kName);
+ bw.WriteEncodedUInt64((ulong) namebyte.Length);
+ bw.Write(namebyte);
+
+ if (EmptyStreamFlags != null)
+ {
+ bw.Write((byte) HeaderProperty.kEmptyStream);
+ Util.WriteBoolFlags(bw, EmptyStreamFlags);
+ }
+
+ if (EmptyFileFlags != null)
+ {
+ bw.Write((byte) HeaderProperty.kEmptyFile);
+ Util.WriteBoolFlags(bw, EmptyFileFlags);
+ }
+
+ if (Attributes != null)
+ {
+ bw.Write((byte) HeaderProperty.kWinAttributes);
+ Util.WriteUint32Def(bw, Attributes);
+ }
+
+ bw.Write((byte) HeaderProperty.kEnd);
+ }
+
+ public void Report(ref StringBuilder sb)
+ {
+ sb.AppendLine(" FileInfo");
+ sb.AppendLine(" ------");
+ }
+
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Structure/Folder.cs b/SabreTools.Library/External/Compress/SevenZip/Structure/Folder.cs
new file mode 100644
index 00000000..0df22996
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Structure/Folder.cs
@@ -0,0 +1,434 @@
+using System;
+using System.IO;
+using System.Text;
+using Compress.Utils;
+
+namespace Compress.SevenZip.Structure
+{
+ public class Folder
+ {
+ public Coder[] Coders;
+ public BindPair[] BindPairs;
+ public ulong PackedStreamIndexBase;
+ public ulong[] PackedStreamIndices;
+ public ulong[] UnpackedStreamSizes;
+ public uint? UnpackCRC;
+ public UnpackedStreamInfo[] UnpackedStreamInfo;
+
+
+ private void ReadFolder(BinaryReader br)
+ {
+ ulong numCoders = br.ReadEncodedUInt64();
+
+ Coders = new Coder[numCoders];
+
+ int numInStreams = 0;
+ int numOutStreams = 0;
+
+ for (ulong i = 0; i < numCoders; i++)
+ {
+ Coders[i] = new Coder();
+ Coders[i].Read(br);
+
+ numInStreams += (int) Coders[i].NumInStreams;
+ numOutStreams += (int) Coders[i].NumOutStreams;
+ }
+
+ int numBindPairs = numOutStreams - 1;
+ BindPairs = new BindPair[numBindPairs];
+ for (int i = 0; i < numBindPairs; i++)
+ {
+ BindPairs[i] = new BindPair();
+ BindPairs[i].Read(br);
+ }
+
+ if (numInStreams < numBindPairs)
+ {
+ throw new NotSupportedException("Error");
+ }
+
+ int numPackedStreams = numInStreams - numBindPairs;
+
+ PackedStreamIndices = new ulong[numPackedStreams];
+
+ if (numPackedStreams == 1)
+ {
+ uint pi = 0;
+ for (uint j = 0; j < numInStreams; j++)
+ {
+ for (uint k = 0; k < BindPairs.Length; k++)
+ {
+ if (BindPairs[k].InIndex == j)
+ {
+ continue;
+ }
+
+ PackedStreamIndices[pi++] = j;
+ break;
+ }
+ }
+ }
+ else
+ {
+ for (uint i = 0; i < numPackedStreams; i++)
+ {
+ PackedStreamIndices[i] = br.ReadEncodedUInt64();
+ }
+ }
+ }
+
+ private void ReadUnpackedStreamSize(BinaryReader br)
+ {
+ ulong outStreams = 0;
+ foreach (Coder c in Coders)
+ {
+ outStreams += c.NumOutStreams;
+ }
+
+ UnpackedStreamSizes = new ulong[outStreams];
+ for (uint j = 0; j < outStreams; j++)
+ {
+ UnpackedStreamSizes[j] = br.ReadEncodedUInt64();
+ }
+ }
+
+ private ulong GetUnpackSize()
+ {
+ ulong outStreams = 0;
+ foreach (Coder coder in Coders)
+ {
+ outStreams += coder.NumInStreams;
+ }
+
+ for (ulong j = 0; j < outStreams; j++)
+ {
+ bool found = false;
+ foreach (BindPair bindPair in BindPairs)
+ {
+ if (bindPair.OutIndex != j)
+ {
+ continue;
+ }
+
+ found = true;
+ break;
+ }
+
+ if (!found)
+ {
+ return UnpackedStreamSizes[j];
+ }
+ }
+
+ return 0;
+ }
+
+
+ public static void ReadUnPackInfo(BinaryReader br, out Folder[] Folders)
+ {
+ Folders = null;
+ for (;;)
+ {
+ HeaderProperty hp = (HeaderProperty) br.ReadByte();
+ switch (hp)
+ {
+ case HeaderProperty.kFolder:
+ {
+ ulong numFolders = br.ReadEncodedUInt64();
+
+ Folders = new Folder[numFolders];
+
+ byte external = br.ReadByte();
+ switch (external)
+ {
+ case 0:
+ {
+ ulong folderIndex = 0;
+ for (uint i = 0; i < numFolders; i++)
+ {
+ Folders[i] = new Folder();
+ Folders[i].ReadFolder(br);
+ Folders[i].PackedStreamIndexBase = folderIndex;
+ folderIndex += (ulong) Folders[i].PackedStreamIndices.Length;
+ }
+
+ break;
+ }
+
+ case 1:
+ throw new NotSupportedException("External flag");
+ }
+
+ continue;
+ }
+
+
+ case HeaderProperty.kCodersUnPackSize:
+ {
+ for (uint i = 0; i < Folders.Length; i++)
+ {
+ Folders[i].ReadUnpackedStreamSize(br);
+ }
+
+ continue;
+ }
+
+ case HeaderProperty.kCRC:
+ {
+ uint?[] crcs;
+ Util.UnPackCRCs(br, (ulong) Folders.Length, out crcs);
+ for (int i = 0; i < Folders.Length; i++)
+ {
+ Folders[i].UnpackCRC = crcs[i];
+ }
+
+ continue;
+ }
+
+ case HeaderProperty.kEnd:
+ return;
+
+ default:
+ throw new Exception(hp.ToString());
+ }
+ }
+ }
+
+ public static void ReadSubStreamsInfo(BinaryReader br, ref Folder[] Folders)
+ {
+ for (;;)
+ {
+ HeaderProperty hp = (HeaderProperty) br.ReadByte();
+ switch (hp)
+ {
+ case HeaderProperty.kNumUnPackStream:
+ {
+ for (int f = 0; f < Folders.Length; f++)
+ {
+ int numStreams = (int) br.ReadEncodedUInt64();
+ Folders[f].UnpackedStreamInfo = new UnpackedStreamInfo[numStreams];
+ for (int i = 0; i < numStreams; i++)
+ {
+ Folders[f].UnpackedStreamInfo[i] = new UnpackedStreamInfo();
+ }
+ }
+
+ continue;
+ }
+
+ case HeaderProperty.kSize:
+ {
+ for (int f = 0; f < Folders.Length; f++)
+ {
+ Folder folder = Folders[f];
+
+ if (folder.UnpackedStreamInfo.Length == 0)
+ {
+ continue;
+ }
+
+ ulong sum = 0;
+ for (int i = 0; i < folder.UnpackedStreamInfo.Length - 1; i++)
+ {
+ ulong size = br.ReadEncodedUInt64();
+ folder.UnpackedStreamInfo[i].UnpackedSize = size;
+ sum += size;
+ }
+
+ folder.UnpackedStreamInfo[folder.UnpackedStreamInfo.Length - 1].UnpackedSize =
+ folder.GetUnpackSize() - sum;
+ }
+
+ continue;
+ }
+
+ case HeaderProperty.kCRC:
+ {
+ ulong numCRC = 0;
+ foreach (Folder folder in Folders)
+ {
+ if (folder.UnpackedStreamInfo == null)
+ {
+ folder.UnpackedStreamInfo = new UnpackedStreamInfo[1];
+ folder.UnpackedStreamInfo[0] = new UnpackedStreamInfo();
+ folder.UnpackedStreamInfo[0].UnpackedSize = folder.GetUnpackSize();
+ }
+
+ if ((folder.UnpackedStreamInfo.Length != 1) || !folder.UnpackCRC.HasValue)
+ {
+ numCRC += (ulong) folder.UnpackedStreamInfo.Length;
+ }
+ }
+
+ int crcIndex = 0;
+ uint?[] crc;
+ Util.UnPackCRCs(br, numCRC, out crc);
+ for (uint i = 0; i < Folders.Length; i++)
+ {
+ Folder folder = Folders[i];
+ if ((folder.UnpackedStreamInfo.Length == 1) && folder.UnpackCRC.HasValue)
+ {
+ folder.UnpackedStreamInfo[0].Crc = folder.UnpackCRC;
+ }
+ else
+ {
+ for (uint j = 0; j < folder.UnpackedStreamInfo.Length; j++, crcIndex++)
+ {
+ folder.UnpackedStreamInfo[j].Crc = crc[crcIndex];
+ }
+ }
+ }
+
+ continue;
+ }
+
+ case HeaderProperty.kEnd:
+ return;
+
+ default:
+ throw new Exception(hp.ToString());
+ }
+ }
+ }
+
+ private void WriteFolder(BinaryWriter bw)
+ {
+ ulong numCoders = (ulong) Coders.Length;
+ bw.WriteEncodedUInt64(numCoders);
+ for (ulong i = 0; i < numCoders; i++)
+ {
+ Coders[i].Write(bw);
+ }
+
+ ulong numBindingPairs = BindPairs == null ? 0 : (ulong) BindPairs.Length;
+ for (ulong i = 0; i < numBindingPairs; i++)
+ {
+ BindPairs[i].Write(bw);
+ }
+
+ //need to look at PAckedStreamIndices but don't need them for basic writing I am doing
+ }
+
+ private void WriteUnpackedStreamSize(BinaryWriter bw)
+ {
+ ulong numUnpackedStreamSizes = (ulong) UnpackedStreamSizes.Length;
+ for (ulong i = 0; i < numUnpackedStreamSizes; i++)
+ {
+ bw.WriteEncodedUInt64(UnpackedStreamSizes[i]);
+ }
+ }
+
+ public static void WriteUnPackInfo(BinaryWriter bw, Folder[] Folders)
+ {
+ bw.Write((byte) HeaderProperty.kUnPackInfo);
+
+ bw.Write((byte) HeaderProperty.kFolder);
+ ulong numFolders = (ulong) Folders.Length;
+ bw.WriteEncodedUInt64(numFolders);
+ bw.Write((byte) 0); //External Flag
+ for (ulong i = 0; i < numFolders; i++)
+ {
+ Folders[i].WriteFolder(bw);
+ }
+
+
+ bw.Write((byte) HeaderProperty.kCodersUnPackSize);
+ for (ulong i = 0; i < numFolders; i++)
+ {
+ Folders[i].WriteUnpackedStreamSize(bw);
+ }
+
+ if (Folders[0].UnpackCRC != null)
+ {
+ bw.Write((byte) HeaderProperty.kCRC);
+ throw new NotImplementedException();
+ }
+
+ bw.Write((byte) HeaderProperty.kEnd);
+ }
+
+ public static void WriteSubStreamsInfo(BinaryWriter bw, Folder[] Folders)
+ {
+ bw.Write((byte) HeaderProperty.kSubStreamsInfo);
+
+ bw.Write((byte) HeaderProperty.kNumUnPackStream);
+ for (int f = 0; f < Folders.Length; f++)
+ {
+ ulong numStreams = (ulong) Folders[f].UnpackedStreamInfo.Length;
+ bw.WriteEncodedUInt64(numStreams);
+ }
+
+ bw.Write((byte) HeaderProperty.kSize);
+
+ for (int f = 0; f < Folders.Length; f++)
+ {
+ Folder folder = Folders[f];
+ for (int i = 0; i < folder.UnpackedStreamInfo.Length - 1; i++)
+ {
+ bw.WriteEncodedUInt64(folder.UnpackedStreamInfo[i].UnpackedSize);
+ }
+ }
+
+ bw.Write((byte) HeaderProperty.kCRC);
+ bw.Write((byte) 1); // crc flags default to true
+ for (int f = 0; f < Folders.Length; f++)
+ {
+ Folder folder = Folders[f];
+ for (int i = 0; i < folder.UnpackedStreamInfo.Length; i++)
+ {
+ bw.Write(Util.uinttobytes(folder.UnpackedStreamInfo[i].Crc));
+ }
+ }
+
+ bw.Write((byte) HeaderProperty.kEnd);
+ }
+
+
+ public void Report(ref StringBuilder sb)
+ {
+ if (Coders == null)
+ {
+ sb.AppendLine(" Coders[] = null");
+ }
+ else
+ {
+ sb.AppendLine($" Coders[] = ({Coders.Length})");
+ foreach (Coder c in Coders)
+ {
+ c.Report(ref sb);
+ }
+ }
+ if (BindPairs == null)
+ {
+ sb.AppendLine(" BindPairs[] = null");
+ }
+ else
+ {
+ sb.AppendLine($" BindPairs[] = ({BindPairs.Length})");
+ foreach (BindPair bp in BindPairs)
+ {
+ bp.Report(ref sb);
+ }
+ }
+
+ sb.AppendLine($" PackedStreamIndexBase = {PackedStreamIndexBase}");
+ sb.AppendLine($" PackedStreamIndices[] = {PackedStreamIndices.ToArrayString()}");
+ sb.AppendLine($" UnpackedStreamSizes[] = {UnpackedStreamSizes.ToArrayString()}");
+ sb.AppendLine($" UnpackCRC = {UnpackCRC.ToHex()}");
+
+ if (UnpackedStreamInfo == null)
+ {
+ sb.AppendLine(" UnpackedStreamInfo[] = null");
+ }
+ else
+ {
+ sb.AppendLine($" UnpackedStreamInfo[{UnpackedStreamInfo.Length}]");
+ foreach (UnpackedStreamInfo usi in UnpackedStreamInfo)
+ {
+ usi.Report(ref sb);
+ }
+ }
+
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Structure/Header.cs b/SabreTools.Library/External/Compress/SevenZip/Structure/Header.cs
new file mode 100644
index 00000000..bd303109
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Structure/Header.cs
@@ -0,0 +1,133 @@
+using System;
+using System.IO;
+using System.Text;
+using Compress.SevenZip.Compress.LZMA;
+using Compress.Utils;
+
+namespace Compress.SevenZip.Structure
+{
+ public class Header
+ {
+ public StreamsInfo StreamsInfo;
+ public FileInfo FileInfo;
+
+ public void Read(BinaryReader br)
+ {
+ for (; ; )
+ {
+ HeaderProperty hp = (HeaderProperty)br.ReadByte();
+ switch (hp)
+ {
+ case HeaderProperty.kMainStreamsInfo:
+ StreamsInfo = new StreamsInfo();
+ StreamsInfo.Read(br);
+ break;
+
+ case HeaderProperty.kFilesInfo:
+ FileInfo = new FileInfo();
+ FileInfo.Read(br);
+ break;
+
+ case HeaderProperty.kEnd:
+ return;
+
+ default:
+ throw new Exception(hp.ToString());
+ }
+ }
+ }
+
+
+ private void Write(BinaryWriter bw)
+ {
+ bw.Write((byte)HeaderProperty.kHeader);
+ StreamsInfo.Write(bw);
+ FileInfo.Write(bw);
+ bw.Write((byte)HeaderProperty.kEnd);
+ }
+
+ public void WriteHeader(BinaryWriter bw)
+ {
+ Write(bw);
+ }
+
+ public static ZipReturn ReadHeaderOrPackedHeader(Stream stream, long baseOffset, out Header header)
+ {
+ header = null;
+
+ using (BinaryReader br = new BinaryReader(stream, Encoding.UTF8, true))
+ {
+ HeaderProperty hp = (HeaderProperty)br.ReadByte();
+ switch (hp)
+ {
+ case HeaderProperty.kEncodedHeader:
+ {
+ StreamsInfo streamsInfo = new StreamsInfo();
+ streamsInfo.Read(br);
+
+ if (streamsInfo.Folders.Length > 1)
+ {
+ return ZipReturn.ZipUnsupportedCompression;
+ }
+
+ Folder firstFolder = streamsInfo.Folders[0];
+ if (firstFolder.Coders.Length > 1)
+ {
+ return ZipReturn.ZipUnsupportedCompression;
+ }
+
+ byte[] method = firstFolder.Coders[0].Method;
+ if (!((method.Length == 3) && (method[0] == 3) && (method[1] == 1) && (method[2] == 1))) // LZMA
+ {
+ return ZipReturn.ZipUnsupportedCompression;
+ }
+
+ stream.Seek(baseOffset + (long)streamsInfo.PackPosition, SeekOrigin.Begin);
+ using (LzmaStream decoder = new LzmaStream(firstFolder.Coders[0].Properties, stream))
+ {
+ ZipReturn zr = ReadHeaderOrPackedHeader(decoder, baseOffset, out header);
+ if (zr != ZipReturn.ZipGood)
+ {
+ return zr;
+ }
+ }
+
+ return ZipReturn.ZipGood;
+ }
+
+ case HeaderProperty.kHeader:
+ {
+ header = new Header();
+ header.Read(br);
+ return ZipReturn.ZipGood;
+ }
+ }
+
+ return ZipReturn.ZipCentralDirError;
+ }
+ }
+
+ public void Report(ref StringBuilder sb)
+ {
+ sb.AppendLine("Header");
+ sb.AppendLine("------");
+ if (StreamsInfo == null)
+ {
+ sb.AppendLine("StreamsInfo == null");
+ }
+ else
+ {
+ StreamsInfo.Report(ref sb);
+ }
+
+ if (FileInfo == null)
+ {
+ sb.AppendLine("FileInfo == null");
+ }
+ else
+ {
+ FileInfo.Report(ref sb);
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Structure/PackedStreamInfo.cs b/SabreTools.Library/External/Compress/SevenZip/Structure/PackedStreamInfo.cs
new file mode 100644
index 00000000..66d6bb03
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Structure/PackedStreamInfo.cs
@@ -0,0 +1,98 @@
+using System;
+using System.IO;
+using System.Text;
+using Compress.Utils;
+
+namespace Compress.SevenZip.Structure
+{
+ public class PackedStreamInfo
+ {
+ public ulong PackedSize;
+ public ulong? Crc;
+ public ulong StreamPosition;
+ public Stream PackedStream;
+
+ public static void Read(BinaryReader br, out ulong packPosition, out PackedStreamInfo[] packedStreams)
+ {
+ packPosition = br.ReadEncodedUInt64();
+
+ ulong numPackStreams = br.ReadEncodedUInt64();
+
+ packedStreams = new PackedStreamInfo[numPackStreams];
+ for (ulong i = 0; i < numPackStreams; i++)
+ {
+ packedStreams[i] = new PackedStreamInfo();
+ }
+
+ ulong streamPosition = 0;
+
+ for (;;)
+ {
+ HeaderProperty hp = (HeaderProperty) br.ReadByte();
+ switch (hp)
+ {
+ case HeaderProperty.kSize:
+ for (ulong i = 0; i < numPackStreams; i++)
+ {
+ packedStreams[i].StreamPosition = streamPosition;
+ packedStreams[i].PackedSize = br.ReadEncodedUInt64();
+ streamPosition += packedStreams[i].PackedSize;
+ }
+
+ continue;
+
+ case HeaderProperty.kCRC:
+ for (ulong i = 0; i < numPackStreams; i++)
+ {
+ packedStreams[i].Crc = br.ReadEncodedUInt64();
+ }
+
+ continue;
+
+ case HeaderProperty.kEnd:
+ return;
+
+ default:
+ throw new Exception(hp.ToString());
+ }
+ }
+ }
+
+ public static void Write(BinaryWriter bw, ulong packPosition, PackedStreamInfo[] packedStreams)
+ {
+ ulong numPackStreams = (ulong) packedStreams.Length;
+ bw.Write((byte) HeaderProperty.kPackInfo);
+ bw.WriteEncodedUInt64(packPosition);
+ bw.WriteEncodedUInt64(numPackStreams);
+
+ bw.Write((byte) HeaderProperty.kSize);
+ ulong streamPosition = 0;
+ for (ulong i = 0; i < numPackStreams; i++)
+ {
+ packedStreams[i].StreamPosition = streamPosition;
+ bw.WriteEncodedUInt64(packedStreams[i].PackedSize);
+ streamPosition += packedStreams[i].PackedSize;
+ }
+
+ // Only checking the first CRC assuming all the reset will be the same
+ if (packedStreams[0].Crc != null)
+ {
+ bw.Write((byte) HeaderProperty.kCRC);
+ for (ulong i = 0; i < numPackStreams; i++)
+ {
+ bw.WriteEncodedUInt64(packedStreams[i].Crc ?? 0);
+ }
+ }
+
+ bw.Write((byte) HeaderProperty.kEnd);
+ }
+
+
+ public void Report(ref StringBuilder sb)
+ {
+ sb.AppendLine($" PackedSize = {PackedSize}");
+ sb.AppendLine($" Crc = {Crc.ToHex()}");
+ sb.AppendLine($" StreamPosition = {StreamPosition}");
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Structure/SignatureHeader.cs b/SabreTools.Library/External/Compress/SevenZip/Structure/SignatureHeader.cs
new file mode 100644
index 00000000..1ca52a86
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Structure/SignatureHeader.cs
@@ -0,0 +1,110 @@
+using System.IO;
+using System.Text;
+
+namespace Compress.SevenZip.Structure
+{
+ internal class SignatureHeader
+ {
+ private static readonly byte[] Signature = {(byte) '7', (byte) 'z', 0xBC, 0xAF, 0x27, 0x1C};
+
+ private byte _major;
+ private byte _minor;
+
+ private uint _startHeaderCRC;
+
+ public ulong NextHeaderOffset;
+ public ulong NextHeaderSize;
+ public uint NextHeaderCRC;
+
+
+ private long _crcOffset;
+ public long BaseOffset { get; private set; }
+
+ public bool Read(Stream stream)
+ {
+ using (BinaryReader br = new BinaryReader(stream, Encoding.UTF8, true))
+ {
+ byte[] signatureBytes = br.ReadBytes(6);
+ if (!signatureBytes.Compare(Signature))
+ {
+ return false;
+ }
+
+ _major = br.ReadByte();
+ _minor = br.ReadByte();
+
+ _startHeaderCRC = br.ReadUInt32();
+
+ long pos = br.BaseStream.Position;
+ byte[] mainHeader = new byte[8 + 8 + 4];
+ br.BaseStream.Read(mainHeader, 0, mainHeader.Length);
+ if (!Utils.CRC.VerifyDigest(_startHeaderCRC, mainHeader, 0, (uint) mainHeader.Length))
+ {
+ return false;
+ }
+
+ br.BaseStream.Seek(pos, SeekOrigin.Begin);
+
+ NextHeaderOffset = br.ReadUInt64();
+ NextHeaderSize = br.ReadUInt64();
+ NextHeaderCRC = br.ReadUInt32();
+ return true;
+ }
+ }
+
+ public void Write(BinaryWriter bw)
+ {
+ //SignatureHeader
+ //~~~~~~~~~~~~~~~
+
+ bw.Write(Signature);
+
+ //ArchiveVersion
+ //{
+ bw.Write((byte) 0); // BYTE Major
+ bw.Write((byte) 3); // BYTE Minor
+ //};
+
+ _crcOffset = bw.BaseStream.Position;
+ bw.Write((uint) 0); //HeaderCRC
+
+ //StartHeader
+ //{
+ bw.Write((ulong) 0); //NextHeaderOffset
+ bw.Write((ulong) 0); //NextHeaderSize
+ bw.Write((uint) 0); //NextHeaderCRC
+ //}
+
+ BaseOffset = bw.BaseStream.Position;
+ }
+
+ public void WriteFinal(BinaryWriter bw, ulong headerpos, ulong headerLength, uint headerCRC)
+ {
+ long fileEnd = bw.BaseStream.Position;
+
+
+ byte[] sigHeaderBytes;
+ using (MemoryStream sigHeaderMem = new MemoryStream())
+ {
+ using (BinaryWriter sigHeaderBw = new BinaryWriter(sigHeaderMem,Encoding.UTF8,true))
+ {
+ sigHeaderBw.Write((ulong) ((long) headerpos - BaseOffset)); //NextHeaderOffset
+ sigHeaderBw.Write(headerLength); //NextHeaderSize
+ sigHeaderBw.Write(headerCRC); //NextHeaderCRC
+
+ sigHeaderBytes = new byte[sigHeaderMem.Length];
+ sigHeaderMem.Position = 0;
+ sigHeaderMem.Read(sigHeaderBytes, 0, sigHeaderBytes.Length);
+ }
+ }
+
+ uint sigHeaderCRC = Utils.CRC.CalculateDigest(sigHeaderBytes, 0, (uint) sigHeaderBytes.Length);
+
+ bw.BaseStream.Position = _crcOffset;
+ bw.Write(sigHeaderCRC); //Header CRC
+ bw.Write(sigHeaderBytes);
+
+ bw.BaseStream.Seek(fileEnd, SeekOrigin.Begin);
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Structure/StreamsInfo.cs b/SabreTools.Library/External/Compress/SevenZip/Structure/StreamsInfo.cs
new file mode 100644
index 00000000..c78fff39
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Structure/StreamsInfo.cs
@@ -0,0 +1,84 @@
+using System;
+using System.IO;
+using System.Security.Permissions;
+using System.Text;
+using Compress.Utils;
+
+namespace Compress.SevenZip.Structure
+{
+ public class StreamsInfo
+ {
+ public ulong PackPosition;
+ public PackedStreamInfo[] PackedStreams;
+ public Folder[] Folders;
+
+ public void Read(BinaryReader br)
+ {
+ for (;;)
+ {
+ HeaderProperty hp = (HeaderProperty) br.ReadByte();
+ switch (hp)
+ {
+ case HeaderProperty.kPackInfo:
+ PackedStreamInfo.Read(br, out PackPosition, out PackedStreams);
+ continue;
+
+ case HeaderProperty.kUnPackInfo:
+ Folder.ReadUnPackInfo(br, out Folders);
+ continue;
+
+ case HeaderProperty.kSubStreamsInfo:
+ Folder.ReadSubStreamsInfo(br, ref Folders);
+ continue;
+
+ case HeaderProperty.kEnd:
+ return;
+
+ default:
+ throw new Exception(hp.ToString());
+ }
+ }
+ }
+
+ public void Write(BinaryWriter bw)
+ {
+ bw.Write((byte) HeaderProperty.kMainStreamsInfo);
+ PackedStreamInfo.Write(bw, PackPosition, PackedStreams);
+ Folder.WriteUnPackInfo(bw, Folders);
+ Folder.WriteSubStreamsInfo(bw, Folders);
+ bw.Write((byte) HeaderProperty.kEnd);
+ }
+
+
+ public void Report(ref StringBuilder sb)
+ {
+ sb.AppendLine(" StreamsInfo");
+ sb.AppendLine(" -----------");
+ sb.AppendLine($" PackPosition = {PackPosition}");
+ if (PackedStreams == null)
+ {
+ sb.AppendLine($" PackedStreams[] = null");
+ }
+ else
+ {
+ sb.AppendLine($" PackedStreams[] = ({PackedStreams.Length})");
+ foreach (PackedStreamInfo psi in PackedStreams)
+ {
+ psi.Report(ref sb);
+ }
+ }
+ if (Folders == null)
+ {
+ sb.AppendLine($" Folders[] = null");
+ }
+ else
+ {
+ sb.AppendLine($" Folders[] = ({Folders.Length})");
+ foreach (Folder f in Folders)
+ {
+ f.Report(ref sb);
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Structure/UnpackedStreamInfo.cs b/SabreTools.Library/External/Compress/SevenZip/Structure/UnpackedStreamInfo.cs
new file mode 100644
index 00000000..9eceb3c7
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Structure/UnpackedStreamInfo.cs
@@ -0,0 +1,16 @@
+using System.Text;
+using Compress.Utils;
+
+namespace Compress.SevenZip.Structure
+{
+ public class UnpackedStreamInfo
+ {
+ public ulong UnpackedSize;
+ public uint? Crc;
+
+ public void Report(ref StringBuilder sb)
+ {
+ sb.AppendLine($" Crc = {Crc.ToHex()} , Size = {UnpackedSize}");
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/SevenZip/Util.cs b/SabreTools.Library/External/Compress/SevenZip/Util.cs
new file mode 100644
index 00000000..fb14b00b
--- /dev/null
+++ b/SabreTools.Library/External/Compress/SevenZip/Util.cs
@@ -0,0 +1,351 @@
+using System.IO;
+using System.Text;
+
+namespace Compress.SevenZip
+{
+ public enum HeaderProperty
+ {
+ kEnd,
+ kHeader,
+
+ kArchiveProperties,
+
+ kAdditionalStreamsInfo,
+ kMainStreamsInfo,
+ kFilesInfo,
+
+ kPackInfo,
+ kUnPackInfo,
+ kSubStreamsInfo,
+
+ kSize,
+ kCRC,
+
+ kFolder,
+
+ kCodersUnPackSize,
+ kNumUnPackStream,
+
+ kEmptyStream,
+ kEmptyFile,
+ kAnti,
+
+ kName,
+ kCreationTime,
+ kLastAccessTime,
+ kLastWriteTime,
+ kWinAttributes,
+ kComment,
+
+ kEncodedHeader,
+
+ kStartPos,
+ kDummy
+ }
+
+ public static class Util
+ {
+ public static readonly Encoding Enc = Encoding.GetEncoding(28591);
+
+ public static void memset(byte[] buffer, int start, byte val, int len)
+ {
+ for (int i = 0; i < len; i++)
+ {
+ buffer[start + i] = val;
+ }
+ }
+
+ public static void memcpyr(byte[] destBuffer, int destPoint, byte[] sourceBuffer, int sourcePoint, int len)
+ {
+ for (int i = len - 1; i >= 0; i--)
+ {
+ destBuffer[destPoint + i] = sourceBuffer[sourcePoint + i];
+ }
+ }
+
+
+ public static bool memcmp(byte[] buffer1, int offset, byte[] buffer2, int len)
+ {
+ for (int i = 0; i < len; i++)
+ {
+ if (buffer1[offset + i] != buffer2[i])
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+
+
+ public static bool Compare(this byte[] b1, byte[] b2)
+ {
+ if ((b1 == null) || (b2 == null))
+ {
+ return false;
+ }
+
+ if (b1.Length != b2.Length)
+ {
+ return false;
+ }
+
+ for (int i = 0; i < b1.Length; i++)
+ {
+ if (b1[i] != b2[i])
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+
+ public static ulong ReadEncodedUInt64(this BinaryReader br)
+ {
+ byte mask = 0x80;
+ int i;
+ byte firstByte = br.ReadByte();
+ ulong value = 0;
+ for (i = 0; i < 8; i++)
+ {
+ if ((firstByte & mask) == 0)
+ {
+ ulong highPart = (ulong) (firstByte & (mask - 1));
+ value += highPart << (8*i);
+ return value;
+ }
+ byte b = br.ReadByte();
+ value |= (ulong) b << (8*i);
+ mask >>= 1;
+ }
+ return value;
+ }
+
+ public static void WriteEncodedUInt64(this BinaryWriter bw, ulong value)
+ {
+ byte firstByte = 0;
+ byte mask = 0x80;
+ int i;
+ for (i = 0; i < 8; i++)
+ {
+ if (value < (ulong) 1 << (7*(i + 1)))
+ {
+ firstByte |= (byte) (value >> (8*i));
+ break;
+ }
+ firstByte |= mask;
+ mask >>= 1;
+ }
+ bw.Write(firstByte);
+ for (; i > 0; i--)
+ {
+ bw.Write((byte) value);
+ value >>= 8;
+ }
+ }
+
+ public static string ReadName(this BinaryReader br)
+ {
+ StringBuilder stringBuilder = new StringBuilder();
+ for (;;)
+ {
+ char c = (char) br.ReadUInt16();
+ if (c == 0)
+ {
+ return stringBuilder.ToString();
+ }
+ stringBuilder.Append(c);
+ }
+ }
+
+ public static void WriteName(this BinaryWriter bw, string name)
+ {
+ char[] chars = name.ToCharArray();
+ for (int i = 0; i < chars.Length; i++)
+ {
+ bw.Write((ushort) chars[i]);
+ }
+ bw.Write((ushort) 0);
+ }
+
+
+ public static void UnPackCRCs(BinaryReader br, ulong numItems, out uint?[] digests)
+ {
+ bool[] digestsDefined = ReadBoolFlagsDefaultTrue(br, numItems);
+ digests = new uint?[numItems];
+ for (ulong i = 0; i < numItems; i++)
+ {
+ if (digestsDefined[i])
+ {
+ digests[i] = br.ReadUInt32();
+ }
+ }
+ }
+
+ private static bool[] ReadBoolFlagsDefaultTrue(BinaryReader br, ulong numItems)
+ {
+ byte allAreDefined = br.ReadByte();
+ if (allAreDefined == 0)
+ {
+ return ReadBoolFlags(br, numItems);
+ }
+ bool[] flags = new bool[numItems];
+ for (ulong i = 0; i < numItems; i++)
+ {
+ flags[i] = true;
+ }
+ return flags;
+ }
+
+ public static bool[] ReadBoolFlags(BinaryReader br, ulong numItems)
+ {
+ byte b = 0;
+ byte mask = 0;
+
+ bool[] flags = new bool[numItems];
+ for (ulong i = 0; i < numItems; i++)
+ {
+ if (mask == 0)
+ {
+ b = br.ReadByte();
+ mask = 0x80;
+ }
+
+ flags[i] = (b & mask) != 0;
+
+ mask >>= 1;
+ }
+ return flags;
+ }
+
+ public static bool[] ReadBoolFlags2(BinaryReader br, ulong numItems)
+ {
+ byte allAreDefined = br.ReadByte();
+ if (allAreDefined == 0)
+ {
+ return ReadBoolFlags(br, numItems);
+ }
+
+
+ bool[] flags = new bool[numItems];
+ for (ulong i = 0; i < numItems; i++)
+ {
+ flags[i] = true;
+ }
+ return flags;
+ }
+
+ public static void WriteUint32Def(BinaryWriter br, uint[] values)
+ {
+ br.WriteEncodedUInt64((ulong) (values.Length*4 + 2));
+ br.Write((byte) 1);
+ br.Write((byte) 0);
+ for (int i = 0; i < values.Length; i++)
+ {
+ br.Write(values[i]);
+ }
+ }
+
+ public static uint[] ReadUInt32Def(BinaryReader br, ulong numItems)
+ {
+ uint[] v = new uint[numItems];
+ bool[] defs = ReadBoolFlags2(br, numItems);
+ byte tmp = br.ReadByte();
+ for (ulong i = 0; i < numItems; i++)
+ {
+ v[i] = defs[i] ? br.ReadUInt32() : 0;
+ }
+
+ return v;
+ }
+
+ public static ulong[] ReadUInt64Def(BinaryReader br, ulong numItems)
+ {
+ ulong[] v = new ulong[numItems];
+ bool[] defs = ReadBoolFlags2(br, numItems);
+ byte tmp = br.ReadByte();
+ for (ulong i = 0; i < numItems; i++)
+ {
+ v[i] = defs[i] ? br.ReadUInt64() : 0;
+ }
+
+ return v;
+ }
+
+ public static void WriteBoolFlags(BinaryWriter bw, bool[] bArray)
+ {
+ bw.WriteEncodedUInt64((ulong) ((bArray.Length + 7)/8));
+ byte mask = 0x80;
+ byte tmpOut = 0;
+ for (int i = 0; i < bArray.Length; i++)
+ {
+ if (bArray[i])
+ {
+ tmpOut |= mask;
+ }
+
+ mask >>= 1;
+ if (mask != 0)
+ {
+ continue;
+ }
+
+ bw.Write(tmpOut);
+ mask = 0x80;
+ tmpOut = 0;
+ }
+ if (mask != 0x80)
+ {
+ bw.Write(tmpOut);
+ }
+ }
+
+ public static byte[] uinttobytes(uint? crc)
+ {
+ if (crc == null)
+ {
+ return null;
+ }
+ uint c = (uint) crc;
+
+ byte[] b = new byte[4];
+ b[0] = (byte) ((c >> 24) & 0xff);
+ b[1] = (byte) ((c >> 16) & 0xff);
+ b[2] = (byte) ((c >> 8) & 0xff);
+ b[3] = (byte) ((c >> 0) & 0xff);
+ return b;
+ }
+
+ public static uint? bytestouint(byte[] crc)
+ {
+ if (crc == null)
+ {
+ return null;
+ }
+
+ return (uint?) ((crc[0] << 24) | (crc[1] << 16) | (crc[2] << 8) | (crc[3] << 0));
+ }
+
+ public static bool ByteArrCompare(byte[] b0, byte[] b1)
+ {
+ if ((b0 == null) || (b1 == null))
+ {
+ return false;
+ }
+ if (b0.Length != b1.Length)
+ {
+ return false;
+ }
+
+ for (int i = 0; i < b0.Length; i++)
+ {
+ if (b0[i] != b1[i])
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/ThreadReaders/ThreadCRC.cs b/SabreTools.Library/External/Compress/ThreadReaders/ThreadCRC.cs
new file mode 100644
index 00000000..5be20fe8
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ThreadReaders/ThreadCRC.cs
@@ -0,0 +1,72 @@
+using System;
+using System.Threading;
+
+namespace Compress.ThreadReaders
+{
+ public class ThreadCRC : IDisposable
+ {
+ private Utils.CRC crc;
+ private readonly AutoResetEvent _waitEvent;
+ private readonly AutoResetEvent _outEvent;
+ private readonly Thread _tWorker;
+
+ private byte[] _buffer;
+ private int _size;
+ private bool _finished;
+
+
+ public ThreadCRC()
+ {
+ crc=new Utils.CRC();
+ _waitEvent = new AutoResetEvent(false);
+ _outEvent = new AutoResetEvent(false);
+ _finished = false;
+
+ _tWorker = new Thread(MainLoop);
+ _tWorker.Start();
+ }
+
+ public byte[] Hash => crc.Crc32ResultB;
+
+ public void Dispose()
+ {
+ _waitEvent.Dispose();
+ _outEvent.Dispose();
+ }
+
+ private void MainLoop()
+ {
+ while (true)
+ {
+ _waitEvent.WaitOne();
+ if (_finished)
+ {
+ break;
+ }
+
+ crc.SlurpBlock(_buffer,0,_size);
+
+ _outEvent.Set();
+ }
+ }
+
+ public void Trigger(byte[] buffer, int size)
+ {
+ _buffer = buffer;
+ _size = size;
+ _waitEvent.Set();
+ }
+
+ public void Wait()
+ {
+ _outEvent.WaitOne();
+ }
+
+ public void Finish()
+ {
+ _finished = true;
+ _waitEvent.Set();
+ _tWorker.Join();
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/ThreadReaders/ThreadLoadBuffer.cs b/SabreTools.Library/External/Compress/ThreadReaders/ThreadLoadBuffer.cs
new file mode 100644
index 00000000..10633554
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ThreadReaders/ThreadLoadBuffer.cs
@@ -0,0 +1,79 @@
+using System;
+using System.IO;
+using System.Threading;
+
+namespace Compress.ThreadReaders
+{
+ public class ThreadLoadBuffer : IDisposable
+ {
+ private readonly AutoResetEvent _waitEvent;
+ private readonly AutoResetEvent _outEvent;
+ private readonly Thread _tWorker;
+
+ private byte[] _buffer;
+ private int _size;
+ private readonly Stream _ds;
+ private bool _finished;
+ public bool errorState;
+
+ public int SizeRead;
+
+ public ThreadLoadBuffer(Stream ds)
+ {
+ _waitEvent = new AutoResetEvent(false);
+ _outEvent = new AutoResetEvent(false);
+ _finished = false;
+ _ds = ds;
+ errorState = false;
+
+ _tWorker = new Thread(MainLoop);
+ _tWorker.Start();
+ }
+
+ public void Dispose()
+ {
+ _waitEvent.Close();
+ _outEvent.Close();
+ }
+
+ private void MainLoop()
+ {
+ while (true)
+ {
+ _waitEvent.WaitOne();
+ if (_finished)
+ {
+ break;
+ }
+ try
+ {
+ SizeRead = _ds.Read(_buffer, 0, _size);
+ }
+ catch (Exception)
+ {
+ errorState = true;
+ }
+ _outEvent.Set();
+ }
+ }
+
+ public void Trigger(byte[] buffer, int size)
+ {
+ _buffer = buffer;
+ _size = size;
+ _waitEvent.Set();
+ }
+
+ public void Wait()
+ {
+ _outEvent.WaitOne();
+ }
+
+ public void Finish()
+ {
+ _finished = true;
+ _waitEvent.Set();
+ _tWorker.Join();
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/ThreadReaders/ThreadMD5.cs b/SabreTools.Library/External/Compress/ThreadReaders/ThreadMD5.cs
new file mode 100644
index 00000000..70b7679f
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ThreadReaders/ThreadMD5.cs
@@ -0,0 +1,76 @@
+using System;
+using System.Security.Cryptography;
+using System.Threading;
+
+namespace Compress.ThreadReaders
+{
+ public class ThreadMD5 : IDisposable
+ {
+ private readonly AutoResetEvent _waitEvent;
+ private readonly AutoResetEvent _outEvent;
+ private readonly Thread _tWorker;
+
+ private readonly MD5 _md5;
+
+ private byte[] _buffer;
+ private int _size;
+ private bool _finished;
+
+ public ThreadMD5()
+ {
+ _waitEvent = new AutoResetEvent(false);
+ _outEvent = new AutoResetEvent(false);
+ _finished = false;
+ _md5 = MD5.Create();
+
+ _tWorker = new Thread(MainLoop);
+ _tWorker.Start();
+ }
+
+ public byte[] Hash => _md5.Hash;
+
+ public void Dispose()
+ {
+ _waitEvent.Close();
+ _outEvent.Close();
+ // _md5.Dispose();
+ }
+
+ private void MainLoop()
+ {
+ while (true)
+ {
+ _waitEvent.WaitOne();
+ if (_finished)
+ {
+ break;
+ }
+ _md5.TransformBlock(_buffer, 0, _size, null, 0);
+ _outEvent.Set();
+ }
+
+ byte[] tmp = new byte[0];
+ _md5.TransformFinalBlock(tmp, 0, 0);
+ }
+
+ public void Trigger(byte[] buffer, int size)
+ {
+ _buffer = buffer;
+ _size = size;
+ _waitEvent.Set();
+ }
+
+ public void Wait()
+ {
+ _outEvent.WaitOne();
+ }
+
+
+ public void Finish()
+ {
+ _finished = true;
+ _waitEvent.Set();
+ _tWorker.Join();
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/ThreadReaders/ThreadSHA1.cs b/SabreTools.Library/External/Compress/ThreadReaders/ThreadSHA1.cs
new file mode 100644
index 00000000..b00bece8
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ThreadReaders/ThreadSHA1.cs
@@ -0,0 +1,76 @@
+using System;
+using System.Security.Cryptography;
+using System.Threading;
+
+namespace Compress.ThreadReaders
+{
+ public class ThreadSHA1 : IDisposable
+ {
+ private readonly AutoResetEvent _waitEvent;
+ private readonly AutoResetEvent _outEvent;
+ private readonly Thread _tWorker;
+
+ private readonly SHA1 _sha1;
+
+ private byte[] _buffer;
+ private int _size;
+ private bool _finished;
+
+ public ThreadSHA1()
+ {
+ _waitEvent = new AutoResetEvent(false);
+ _outEvent = new AutoResetEvent(false);
+ _finished = false;
+ _sha1 = SHA1.Create();
+
+ _tWorker = new Thread(MainLoop);
+ _tWorker.Start();
+ }
+
+ public byte[] Hash => _sha1.Hash;
+
+ public void Dispose()
+ {
+ _waitEvent.Close();
+ _outEvent.Close();
+ // _sha1.Dispose();
+ }
+
+ private void MainLoop()
+ {
+ while (true)
+ {
+ _waitEvent.WaitOne();
+ if (_finished)
+ {
+ break;
+ }
+ _sha1.TransformBlock(_buffer, 0, _size, null, 0);
+ _outEvent.Set();
+ }
+
+ byte[] tmp = new byte[0];
+ _sha1.TransformFinalBlock(tmp, 0, 0);
+ }
+
+ public void Trigger(byte[] buffer, int size)
+ {
+ _buffer = buffer;
+ _size = size;
+ _waitEvent.Set();
+ }
+
+ public void Wait()
+ {
+ _outEvent.WaitOne();
+ }
+
+
+ public void Finish()
+ {
+ _finished = true;
+ _waitEvent.Set();
+ _tWorker.Join();
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/Utils/CRC.cs b/SabreTools.Library/External/Compress/Utils/CRC.cs
new file mode 100644
index 00000000..a39d2b14
--- /dev/null
+++ b/SabreTools.Library/External/Compress/Utils/CRC.cs
@@ -0,0 +1,125 @@
+using System;
+
+namespace Compress.Utils
+{
+ public class CRC
+ {
+ public static readonly uint[] CRC32Lookup;
+ private uint _crc;
+ private long _totalBytesRead;
+
+ static CRC()
+ {
+ const uint polynomial = 0xEDB88320;
+ const int crcNumTables = 8;
+
+ unchecked
+ {
+ CRC32Lookup = new uint[256 * crcNumTables];
+ int i;
+ for (i = 0; i < 256; i++)
+ {
+ uint r = (uint)i;
+ for (int j = 0; j < 8; j++)
+ {
+ r = (r >> 1) ^ (polynomial & ~((r & 1) - 1));
+ }
+
+ CRC32Lookup[i] = r;
+ }
+
+ for (; i < 256 * crcNumTables; i++)
+ {
+ uint r = CRC32Lookup[i - 256];
+ CRC32Lookup[i] = CRC32Lookup[r & 0xFF] ^ (r >> 8);
+ }
+ }
+ }
+
+
+ public CRC()
+ {
+ Reset();
+ }
+
+ public void Reset()
+ {
+ _totalBytesRead = 0;
+ _crc = 0xffffffffu;
+ }
+
+
+ internal void UpdateCRC(int inCh)
+ {
+ _crc = (_crc >> 8) ^ CRC32Lookup[(byte)_crc ^ ((byte)inCh)];
+ }
+
+ public void SlurpBlock(byte[] block, int offset, int count)
+ {
+ _totalBytesRead += count;
+ uint crc = _crc;
+
+ for (; (offset & 7) != 0 && count != 0; count--)
+ crc = (crc >> 8) ^ CRC32Lookup[(byte)crc ^ block[offset++]];
+
+ if (count >= 8)
+ {
+ int end = (count - 8) & ~7;
+ count -= end;
+ end += offset;
+
+ while (offset != end)
+ {
+ crc ^= (uint)(block[offset] + (block[offset + 1] << 8) + (block[offset + 2] << 16) + (block[offset + 3] << 24));
+ uint high = (uint)(block[offset + 4] + (block[offset + 5] << 8) + (block[offset + 6] << 16) + (block[offset + 7] << 24));
+ offset += 8;
+
+ crc = CRC32Lookup[(byte)crc + 0x700]
+ ^ CRC32Lookup[(byte)(crc >>= 8) + 0x600]
+ ^ CRC32Lookup[(byte)(crc >>= 8) + 0x500]
+ ^ CRC32Lookup[ /*(byte)*/(crc >> 8) + 0x400]
+ ^ CRC32Lookup[(byte)high + 0x300]
+ ^ CRC32Lookup[(byte)(high >>= 8) + 0x200]
+ ^ CRC32Lookup[(byte)(high >>= 8) + 0x100]
+ ^ CRC32Lookup[ /*(byte)*/(high >> 8) + 0x000];
+ }
+ }
+
+ while (count-- != 0)
+ {
+ crc = (crc >> 8) ^ CRC32Lookup[(byte)crc ^ block[offset++]];
+ }
+
+ _crc = crc;
+
+ }
+
+ public byte[] Crc32ResultB
+ {
+ get
+ {
+ byte[] result = BitConverter.GetBytes(~_crc);
+ Array.Reverse(result);
+ return result;
+ }
+ }
+ public Int32 Crc32Result => unchecked((Int32)(~_crc));
+
+ public uint Crc32ResultU => ~_crc;
+
+ public Int64 TotalBytesRead => _totalBytesRead;
+
+ public static uint CalculateDigest(byte[] data, uint offset, uint size)
+ {
+ CRC crc = new CRC();
+ // crc.Init();
+ crc.SlurpBlock(data, (int)offset, (int)size);
+ return crc.Crc32ResultU;
+ }
+
+ public static bool VerifyDigest(uint digest, byte[] data, uint offset, uint size)
+ {
+ return (CalculateDigest(data, offset, size) == digest);
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/Utils/CRCStream.cs b/SabreTools.Library/External/Compress/Utils/CRCStream.cs
new file mode 100644
index 00000000..5c416e82
--- /dev/null
+++ b/SabreTools.Library/External/Compress/Utils/CRCStream.cs
@@ -0,0 +1,343 @@
+using System;
+
+namespace Compress.Utils
+{
+ class CRCStream
+ {
+ }
+
+ ///
+ /// A Stream that calculates a CRC32 (a checksum) on all bytes read,
+ /// or on all bytes written.
+ ///
+ ///
+ ///
+ ///
+ /// This class can be used to verify the CRC of a ZipEntry when
+ /// reading from a stream, or to calculate a CRC when writing to a
+ /// stream. The stream should be used to either read, or write, but
+ /// not both. If you intermix reads and writes, the results are not
+ /// defined.
+ ///
+ ///
+ ///
+ /// This class is intended primarily for use internally by the
+ /// DotNetZip library.
+ ///
+ ///
+ public class CrcCalculatorStream : System.IO.Stream, System.IDisposable
+ {
+ private static readonly Int64 UnsetLengthLimit = -99;
+
+ internal System.IO.Stream _innerStream;
+ private CRC _Crc32;
+ private Int64 _lengthLimit = -99;
+ private bool _leaveOpen;
+
+ ///
+ /// The default constructor.
+ ///
+ ///
+ ///
+ /// Instances returned from this constructor will leave the underlying
+ /// stream open upon Close(). The stream uses the default CRC32
+ /// algorithm, which implies a polynomial of 0xEDB88320.
+ ///
+ ///
+ /// The underlying stream
+ public CrcCalculatorStream(System.IO.Stream stream)
+ : this(true, UnsetLengthLimit, stream, null)
+ {
+ }
+
+ ///
+ /// The constructor allows the caller to specify how to handle the
+ /// underlying stream at close.
+ ///
+ ///
+ ///
+ /// The stream uses the default CRC32 algorithm, which implies a
+ /// polynomial of 0xEDB88320.
+ ///
+ ///
+ /// The underlying stream
+ /// true to leave the underlying stream
+ /// open upon close of the CrcCalculatorStream; false otherwise.
+ public CrcCalculatorStream(System.IO.Stream stream, bool leaveOpen)
+ : this(leaveOpen, UnsetLengthLimit, stream, null)
+ {
+ }
+
+ ///
+ /// A constructor allowing the specification of the length of the stream
+ /// to read.
+ ///
+ ///
+ ///
+ /// The stream uses the default CRC32 algorithm, which implies a
+ /// polynomial of 0xEDB88320.
+ ///
+ ///
+ /// Instances returned from this constructor will leave the underlying
+ /// stream open upon Close().
+ ///
+ ///
+ /// The underlying stream
+ /// The length of the stream to slurp
+ public CrcCalculatorStream(System.IO.Stream stream, Int64 length)
+ : this(true, length, stream, null)
+ {
+ if (length < 0)
+ throw new ArgumentException("length");
+ }
+
+ ///
+ /// A constructor allowing the specification of the length of the stream
+ /// to read, as well as whether to keep the underlying stream open upon
+ /// Close().
+ ///
+ ///
+ ///
+ /// The stream uses the default CRC32 algorithm, which implies a
+ /// polynomial of 0xEDB88320.
+ ///
+ ///
+ /// The underlying stream
+ /// The length of the stream to slurp
+ /// true to leave the underlying stream
+ /// open upon close of the CrcCalculatorStream; false otherwise.
+ public CrcCalculatorStream(System.IO.Stream stream, Int64 length, bool leaveOpen)
+ : this(leaveOpen, length, stream, null)
+ {
+ if (length < 0)
+ throw new ArgumentException("length");
+ }
+
+ ///
+ /// A constructor allowing the specification of the length of the stream
+ /// to read, as well as whether to keep the underlying stream open upon
+ /// Close(), and the CRC32 instance to use.
+ ///
+ ///
+ ///
+ /// The stream uses the specified CRC32 instance, which allows the
+ /// application to specify how the CRC gets calculated.
+ ///
+ ///
+ /// The underlying stream
+ /// The length of the stream to slurp
+ /// true to leave the underlying stream
+ /// open upon close of the CrcCalculatorStream; false otherwise.
+ /// the CRC32 instance to use to calculate the CRC32
+ public CrcCalculatorStream(System.IO.Stream stream, Int64 length, bool leaveOpen,
+ CRC crc32)
+ : this(leaveOpen, length, stream, crc32)
+ {
+ if (length < 0)
+ throw new ArgumentException("length");
+ }
+
+
+ // This ctor is private - no validation is done here. This is to allow the use
+ // of a (specific) negative value for the _lengthLimit, to indicate that there
+ // is no length set. So we validate the length limit in those ctors that use an
+ // explicit param, otherwise we don't validate, because it could be our special
+ // value.
+ private CrcCalculatorStream
+ (bool leaveOpen, Int64 length, System.IO.Stream stream, CRC crc32)
+ : base()
+ {
+ _innerStream = stream;
+ _Crc32 = crc32 ?? new CRC();
+ _lengthLimit = length;
+ _leaveOpen = leaveOpen;
+ }
+
+
+ ///
+ /// Gets the total number of bytes run through the CRC32 calculator.
+ ///
+ ///
+ ///
+ /// This is either the total number of bytes read, or the total number of
+ /// bytes written, depending on the direction of this stream.
+ ///
+ public Int64 TotalBytesSlurped
+ {
+ get { return _Crc32.TotalBytesRead; }
+ }
+
+ ///
+ /// Provides the current CRC for all blocks slurped in.
+ ///
+ ///
+ ///
+ /// The running total of the CRC is kept as data is written or read
+ /// through the stream. read this property after all reads or writes to
+ /// get an accurate CRC for the entire stream.
+ ///
+ ///
+ public Int32 Crc
+ {
+ get { return _Crc32.Crc32Result; }
+ }
+
+ ///
+ /// Indicates whether the underlying stream will be left open when the
+ /// CrcCalculatorStream is Closed.
+ ///
+ ///
+ ///
+ /// Set this at any point before calling .
+ ///
+ ///
+ public bool LeaveOpen
+ {
+ get { return _leaveOpen; }
+ set { _leaveOpen = value; }
+ }
+
+ ///
+ /// Read from the stream
+ ///
+ /// the buffer to read
+ /// the offset at which to start
+ /// the number of bytes to read
+ /// the number of bytes actually read
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ int bytesToRead = count;
+
+ // Need to limit the # of bytes returned, if the stream is intended to have
+ // a definite length. This is especially useful when returning a stream for
+ // the uncompressed data directly to the application. The app won't
+ // necessarily read only the UncompressedSize number of bytes. For example
+ // wrapping the stream returned from OpenReader() into a StreadReader() and
+ // calling ReadToEnd() on it, We can "over-read" the zip data and get a
+ // corrupt string. The length limits that, prevents that problem.
+
+ if (_lengthLimit != UnsetLengthLimit)
+ {
+ if (_Crc32.TotalBytesRead >= _lengthLimit) return 0; // EOF
+ Int64 bytesRemaining = _lengthLimit - _Crc32.TotalBytesRead;
+ if (bytesRemaining < count) bytesToRead = (int)bytesRemaining;
+ }
+ int n = _innerStream.Read(buffer, offset, bytesToRead);
+ if (n > 0) _Crc32.SlurpBlock(buffer, offset, n);
+ return n;
+ }
+
+ ///
+ /// Write to the stream.
+ ///
+ /// the buffer from which to write
+ /// the offset at which to start writing
+ /// the number of bytes to write
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ if (count > 0) _Crc32.SlurpBlock(buffer, offset, count);
+ _innerStream.Write(buffer, offset, count);
+ }
+
+ ///
+ /// Indicates whether the stream supports reading.
+ ///
+ public override bool CanRead
+ {
+ get { return _innerStream.CanRead; }
+ }
+
+ ///
+ /// Indicates whether the stream supports seeking.
+ ///
+ ///
+ ///
+ /// Always returns false.
+ ///
+ ///
+ public override bool CanSeek
+ {
+ get { return false; }
+ }
+
+ ///
+ /// Indicates whether the stream supports writing.
+ ///
+ public override bool CanWrite
+ {
+ get { return _innerStream.CanWrite; }
+ }
+
+ ///
+ /// Flush the stream.
+ ///
+ public override void Flush()
+ {
+ _innerStream.Flush();
+ }
+
+ ///
+ /// Returns the length of the underlying stream.
+ ///
+ public override long Length
+ {
+ get
+ {
+ if (_lengthLimit == CrcCalculatorStream.UnsetLengthLimit)
+ return _innerStream.Length;
+ else return _lengthLimit;
+ }
+ }
+
+ ///
+ /// The getter for this property returns the total bytes read.
+ /// If you use the setter, it will throw
+ /// .
+ ///
+ public override long Position
+ {
+ get { return _Crc32.TotalBytesRead; }
+ set { throw new NotSupportedException(); }
+ }
+
+ ///
+ /// Seeking is not supported on this stream. This method always throws
+ ///
+ ///
+ /// N/A
+ /// N/A
+ /// N/A
+ public override long Seek(long offset, System.IO.SeekOrigin origin)
+ {
+ throw new NotSupportedException();
+ }
+
+ ///
+ /// This method always throws
+ ///
+ ///
+ /// N/A
+ public override void SetLength(long value)
+ {
+ throw new NotSupportedException();
+ }
+
+
+ void IDisposable.Dispose()
+ {
+ Close();
+ }
+
+ ///
+ /// Closes the stream.
+ ///
+ public override void Close()
+ {
+ base.Close();
+ if (!_leaveOpen)
+ _innerStream.Close();
+ }
+
+ }
+
+}
diff --git a/SabreTools.Library/External/Compress/Utils/DirUtil.cs b/SabreTools.Library/External/Compress/Utils/DirUtil.cs
new file mode 100644
index 00000000..ad67dfb0
--- /dev/null
+++ b/SabreTools.Library/External/Compress/Utils/DirUtil.cs
@@ -0,0 +1,39 @@
+using RVIO;
+
+namespace Compress.Utils
+{
+ public static class DirUtil
+ {
+ public static void CreateDirForFile(string sFilename)
+ {
+ string strTemp = Path.GetDirectoryName(sFilename);
+
+ if (string.IsNullOrEmpty(strTemp))
+ {
+ return;
+ }
+
+ if (Directory.Exists(strTemp))
+ {
+ return;
+ }
+
+
+ while ((strTemp.Length > 0) && !Directory.Exists(strTemp))
+ {
+ int pos = strTemp.LastIndexOf(Path.DirectorySeparatorChar);
+ if (pos < 0)
+ {
+ pos = 0;
+ }
+ strTemp = strTemp.Substring(0, pos);
+ }
+
+ while (sFilename.IndexOf(Path.DirectorySeparatorChar, strTemp.Length + 1) > 0)
+ {
+ strTemp = sFilename.Substring(0, sFilename.IndexOf(Path.DirectorySeparatorChar, strTemp.Length + 1));
+ Directory.CreateDirectory(strTemp);
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/Utils/Reporter.cs b/SabreTools.Library/External/Compress/Utils/Reporter.cs
new file mode 100644
index 00000000..a6a60588
--- /dev/null
+++ b/SabreTools.Library/External/Compress/Utils/Reporter.cs
@@ -0,0 +1,43 @@
+
+namespace Compress.Utils
+{
+ public static class Reporter
+ {
+ public static string ToArrayString(this ulong[] arr)
+ {
+ if (arr == null)
+ return "NULL";
+
+ string ret = $"({arr.Length}) " + arr[0].ToString();
+ for (int i = 1; i < arr.Length; i++)
+ {
+ ret += "," + arr[i].ToString();
+ }
+
+ return ret;
+ }
+ public static string ToArrayString(this byte[] arr)
+ {
+ if (arr == null)
+ return "NULL";
+
+ string ret = $"({arr.Length}) " + arr[0].ToString("X2");
+ for (int i = 1; i < arr.Length; i++)
+ {
+ ret += "," + arr[i].ToString("X2");
+ }
+
+ return ret;
+ }
+
+
+ public static string ToHex(this uint? v)
+ {
+ return v == null ? "NULL" : ((uint)v).ToString("X8");
+ }
+ public static string ToHex(this ulong? v)
+ {
+ return v == null ? "NULL" : ((ulong)v).ToString("X8");
+ }
+ }
+}
diff --git a/SabreTools.Library/External/Compress/ZipEnums.cs b/SabreTools.Library/External/Compress/ZipEnums.cs
new file mode 100644
index 00000000..f51773af
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ZipEnums.cs
@@ -0,0 +1,52 @@
+using System;
+
+namespace Compress
+{
+ public enum ZipReturn
+ {
+ ZipGood,
+ ZipFileLocked,
+ ZipFileCountError,
+ ZipSignatureError,
+ ZipExtraDataOnEndOfZip,
+ ZipUnsupportedCompression,
+ ZipLocalFileHeaderError,
+ ZipCentralDirError,
+ ZipEndOfCentralDirectoryError,
+ Zip64EndOfCentralDirError,
+ Zip64EndOfCentralDirectoryLocatorError,
+ ZipReadingFromOutputFile,
+ ZipWritingToInputFile,
+ ZipErrorGettingDataStream,
+ ZipCRCDecodeError,
+ ZipDecodeError,
+ ZipFileNameToLong,
+ ZipFileAlreadyOpen,
+ ZipCannotFastOpen,
+ ZipErrorOpeningFile,
+ ZipErrorFileNotFound,
+ ZipErrorReadingFile,
+ ZipErrorTimeStamp,
+ ZipErrorRollBackFile,
+ ZipTryingToAccessADirectory,
+ ZipUntested
+
+ }
+
+ public enum ZipOpenType
+ {
+ Closed,
+ OpenRead,
+ OpenWrite,
+ OpenFakeWrite
+ }
+
+ [Flags]
+ public enum ZipStatus
+ {
+ None = 0x0,
+ TrrntZip = 0x1,
+ ExtraData = 0x2,
+ Trrnt7Zip = 0x4
+ }
+}
diff --git a/SabreTools.Library/External/Compress/ZipFile/ZLib/Deflate.cs b/SabreTools.Library/External/Compress/ZipFile/ZLib/Deflate.cs
new file mode 100644
index 00000000..83830228
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ZipFile/ZLib/Deflate.cs
@@ -0,0 +1,1888 @@
+// Deflate.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2011-August-03 19:52:15>
+//
+// ------------------------------------------------------------------
+//
+// This module defines logic for handling the Deflate or compression.
+//
+// This code is based on multiple sources:
+// - the original zlib v1.2.3 source, which is Copyright (C) 1995-2005 Jean-loup Gailly.
+// - the original jzlib, which is Copyright (c) 2000-2003 ymnk, JCraft,Inc.
+//
+// However, this code is significantly different from both.
+// The object model is not the same, and many of the behaviors are different.
+//
+// In keeping with the license for these other works, the copyrights for
+// jzlib and zlib are here.
+//
+// -----------------------------------------------------------------------
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// This program is based on zlib-1.1.3; credit to authors
+// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+// and contributors of zlib.
+//
+// -----------------------------------------------------------------------
+
+
+using System;
+
+namespace Compress.ZipFile.ZLib
+{
+
+ internal enum BlockState
+ {
+ NeedMore = 0, // block not completed, need more input or more output
+ BlockDone, // block flush performed
+ FinishStarted, // finish started, need only more output at next deflate
+ FinishDone // finish done, accept no more input or output
+ }
+
+ internal enum DeflateFlavor
+ {
+ Store,
+ Fast,
+ Slow
+ }
+
+ internal sealed class DeflateManager
+ {
+ private static readonly int MEM_LEVEL_MAX = 9;
+ private static readonly int MEM_LEVEL_DEFAULT = 8;
+
+ internal delegate BlockState CompressFunc(FlushType flush);
+
+ internal class Config
+ {
+ // Use a faster search when the previous match is longer than this
+ internal int GoodLength; // reduce lazy search above this match length
+
+ // Attempt to find a better match only when the current match is
+ // strictly smaller than this value. This mechanism is used only for
+ // compression levels >= 4. For levels 1,2,3: MaxLazy is actually
+ // MaxInsertLength. (See DeflateFast)
+
+ internal int MaxLazy; // do not perform lazy search above this match length
+
+ internal int NiceLength; // quit search above this match length
+
+ // To speed up deflation, hash chains are never searched beyond this
+ // length. A higher limit improves compression ratio but degrades the speed.
+
+ internal int MaxChainLength;
+
+ internal DeflateFlavor Flavor;
+
+ private Config(int goodLength, int maxLazy, int niceLength, int maxChainLength, DeflateFlavor flavor)
+ {
+ this.GoodLength = goodLength;
+ this.MaxLazy = maxLazy;
+ this.NiceLength = niceLength;
+ this.MaxChainLength = maxChainLength;
+ this.Flavor = flavor;
+ }
+
+ public static Config Lookup(CompressionLevel level)
+ {
+ return Table[(int)level];
+ }
+
+
+ static Config()
+ {
+ Table = new Config[] {
+ new Config(0, 0, 0, 0, DeflateFlavor.Store),
+ new Config(4, 4, 8, 4, DeflateFlavor.Fast),
+ new Config(4, 5, 16, 8, DeflateFlavor.Fast),
+ new Config(4, 6, 32, 32, DeflateFlavor.Fast),
+
+ new Config(4, 4, 16, 16, DeflateFlavor.Slow),
+ new Config(8, 16, 32, 32, DeflateFlavor.Slow),
+ new Config(8, 16, 128, 128, DeflateFlavor.Slow),
+ new Config(8, 32, 128, 256, DeflateFlavor.Slow),
+ new Config(32, 128, 258, 1024, DeflateFlavor.Slow),
+ new Config(32, 258, 258, 4096, DeflateFlavor.Slow),
+ };
+ }
+
+ private static readonly Config[] Table;
+ }
+
+
+ private CompressFunc DeflateFunction;
+
+ private static readonly System.String[] _ErrorMessage = new System.String[]
+ {
+ "need dictionary",
+ "stream end",
+ "",
+ "file error",
+ "stream error",
+ "data error",
+ "insufficient memory",
+ "buffer error",
+ "incompatible version",
+ ""
+ };
+
+ // preset dictionary flag in zlib header
+ private static readonly int PRESET_DICT = 0x20;
+
+ private static readonly int INIT_STATE = 42;
+ private static readonly int BUSY_STATE = 113;
+ private static readonly int FINISH_STATE = 666;
+
+ // The deflate compression method
+ private static readonly int Z_DEFLATED = 8;
+
+ private static readonly int STORED_BLOCK = 0;
+ private static readonly int STATIC_TREES = 1;
+ private static readonly int DYN_TREES = 2;
+
+ // The three kinds of block type
+ private static readonly int Z_BINARY = 0;
+ private static readonly int Z_ASCII = 1;
+ private static readonly int Z_UNKNOWN = 2;
+
+ private static readonly int Buf_size = 8 * 2;
+
+ private static readonly int MIN_MATCH = 3;
+ private static readonly int MAX_MATCH = 258;
+
+ private static readonly int MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);
+
+ private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1);
+
+ private static readonly int END_BLOCK = 256;
+
+ internal ZlibCodec _codec; // the zlib encoder/decoder
+ internal int status; // as the name implies
+ internal byte[] pending; // output still pending - waiting to be compressed
+ internal int nextPending; // index of next pending byte to output to the stream
+ internal int pendingCount; // number of bytes in the pending buffer
+
+ internal sbyte data_type; // UNKNOWN, BINARY or ASCII
+ internal int last_flush; // value of flush param for previous deflate call
+
+ internal int w_size; // LZ77 window size (32K by default)
+ internal int w_bits; // log2(w_size) (8..16)
+ internal int w_mask; // w_size - 1
+
+ //internal byte[] dictionary;
+ internal byte[] window;
+
+ // Sliding window. Input bytes are read into the second half of the window,
+ // and move to the first half later to keep a dictionary of at least wSize
+ // bytes. With this organization, matches are limited to a distance of
+ // wSize-MAX_MATCH bytes, but this ensures that IO is always
+ // performed with a length multiple of the block size.
+ //
+ // To do: use the user input buffer as sliding window.
+
+ internal int window_size;
+ // Actual size of window: 2*wSize, except when the user input buffer
+ // is directly used as sliding window.
+
+ internal short[] prev;
+ // Link to older string with same hash index. To limit the size of this
+ // array to 64K, this link is maintained only for the last 32K strings.
+ // An index in this array is thus a window InStreamIndex modulo 32K.
+
+ internal short[] head; // Heads of the hash chains or NIL.
+
+ internal int ins_h; // hash index of string to be inserted
+ internal int hash_size; // number of elements in hash table
+ internal int hash_bits; // log2(hash_size)
+ internal int hash_mask; // hash_size-1
+
+ // Number of bits by which ins_h must be shifted at each input
+ // step. It must be such that after MIN_MATCH steps, the oldest
+ // byte no longer takes part in the hash key, that is:
+ // hash_shift * MIN_MATCH >= hash_bits
+ internal int hash_shift;
+
+ // Window position at the beginning of the current output block. Gets
+ // negative when the window is moved backwards.
+
+ internal int block_start;
+
+ Config config;
+ internal int match_length; // length of best match
+ internal int prev_match; // previous match
+ internal int match_available; // set if previous match exists
+ internal int strstart; // start of string to insert into.....????
+ internal int match_start; // start of matching string
+ internal int lookahead; // number of valid bytes ahead in window
+
+ // Length of the best match at previous step. Matches not greater than this
+ // are discarded. This is used in the lazy match evaluation.
+ internal int prev_length;
+
+ // Insert new strings in the hash table only if the match length is not
+ // greater than this length. This saves time but degrades compression.
+ // max_insert_length is used only for compression levels <= 3.
+
+ internal CompressionLevel compressionLevel; // compression level (1..9)
+ internal CompressionStrategy compressionStrategy; // favor or force Huffman coding
+
+
+ internal short[] dyn_ltree; // literal and length tree
+ internal short[] dyn_dtree; // distance tree
+ internal short[] bl_tree; // Huffman tree for bit lengths
+
+ internal Tree treeLiterals = new Tree(); // desc for literal tree
+ internal Tree treeDistances = new Tree(); // desc for distance tree
+ internal Tree treeBitLengths = new Tree(); // desc for bit length tree
+
+ // number of codes at each bit length for an optimal tree
+ internal short[] bl_count = new short[InternalConstants.MAX_BITS + 1];
+
+ // heap used to build the Huffman trees
+ internal int[] heap = new int[2 * InternalConstants.L_CODES + 1];
+
+ internal int heap_len; // number of elements in the heap
+ internal int heap_max; // element of largest frequency
+
+ // The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ // The same heap array is used to build all trees.
+
+ // Depth of each subtree used as tie breaker for trees of equal frequency
+ internal sbyte[] depth = new sbyte[2 * InternalConstants.L_CODES + 1];
+
+ internal int _lengthOffset; // index for literals or lengths
+
+
+ // Size of match buffer for literals/lengths. There are 4 reasons for
+ // limiting lit_bufsize to 64K:
+ // - frequencies can be kept in 16 bit counters
+ // - if compression is not successful for the first block, all input
+ // data is still in the window so we can still emit a stored block even
+ // when input comes from standard input. (This can also be done for
+ // all blocks if lit_bufsize is not greater than 32K.)
+ // - if compression is not successful for a file smaller than 64K, we can
+ // even emit a stored file instead of a stored block (saving 5 bytes).
+ // This is applicable only for zip (not gzip or zlib).
+ // - creating new Huffman trees less frequently may not provide fast
+ // adaptation to changes in the input data statistics. (Take for
+ // example a binary file with poorly compressible code followed by
+ // a highly compressible string table.) Smaller buffer sizes give
+ // fast adaptation but have of course the overhead of transmitting
+ // trees more frequently.
+
+ internal int lit_bufsize;
+
+ internal int last_lit; // running index in l_buf
+
+ // Buffer for distances. To simplify the code, d_buf and l_buf have
+ // the same number of elements. To use different lengths, an extra flag
+ // array would be necessary.
+
+ internal int _distanceOffset; // index into pending; points to distance data??
+
+ internal int opt_len; // bit length of current block with optimal trees
+ internal int static_len; // bit length of current block with static trees
+ internal int matches; // number of string matches in current block
+ internal int last_eob_len; // bit length of EOB code for last block
+
+ // Output buffer. bits are inserted starting at the bottom (least
+ // significant bits).
+ internal short bi_buf;
+
+ // Number of valid bits in bi_buf. All bits above the last valid bit
+ // are always zero.
+ internal int bi_valid;
+
+
+ internal DeflateManager()
+ {
+ dyn_ltree = new short[HEAP_SIZE * 2];
+ dyn_dtree = new short[(2 * InternalConstants.D_CODES + 1) * 2]; // distance tree
+ bl_tree = new short[(2 * InternalConstants.BL_CODES + 1) * 2]; // Huffman tree for bit lengths
+ }
+
+
+ // lm_init
+ private void _InitializeLazyMatch()
+ {
+ window_size = 2 * w_size;
+
+ // clear the hash - workitem 9063
+ Array.Clear(head, 0, hash_size);
+ //for (int i = 0; i < hash_size; i++) head[i] = 0;
+
+ config = Config.Lookup(compressionLevel);
+ SetDeflater();
+
+ strstart = 0;
+ block_start = 0;
+ lookahead = 0;
+ match_length = prev_length = MIN_MATCH - 1;
+ match_available = 0;
+ ins_h = 0;
+ }
+
+ // Initialize the tree data structures for a new zlib stream.
+ private void _InitializeTreeData()
+ {
+ treeLiterals.dyn_tree = dyn_ltree;
+ treeLiterals.staticTree = StaticTree.Literals;
+
+ treeDistances.dyn_tree = dyn_dtree;
+ treeDistances.staticTree = StaticTree.Distances;
+
+ treeBitLengths.dyn_tree = bl_tree;
+ treeBitLengths.staticTree = StaticTree.BitLengths;
+
+ bi_buf = 0;
+ bi_valid = 0;
+ last_eob_len = 8; // enough lookahead for inflate
+
+ // Initialize the first block of the first file:
+ _InitializeBlocks();
+ }
+
+ internal void _InitializeBlocks()
+ {
+ // Initialize the trees.
+ for (int i = 0; i < InternalConstants.L_CODES; i++)
+ dyn_ltree[i * 2] = 0;
+ for (int i = 0; i < InternalConstants.D_CODES; i++)
+ dyn_dtree[i * 2] = 0;
+ for (int i = 0; i < InternalConstants.BL_CODES; i++)
+ bl_tree[i * 2] = 0;
+
+ dyn_ltree[END_BLOCK * 2] = 1;
+ opt_len = static_len = 0;
+ last_lit = matches = 0;
+ }
+
+ // Restore the heap property by moving down the tree starting at node k,
+ // exchanging a node with the smallest of its two sons if necessary, stopping
+ // when the heap property is re-established (each father smaller than its
+ // two sons).
+ internal void pqdownheap(short[] tree, int k)
+ {
+ int v = heap[k];
+ int j = k << 1; // left son of k
+ while (j <= heap_len)
+ {
+ // Set j to the smallest of the two sons:
+ if (j < heap_len && _IsSmaller(tree, heap[j + 1], heap[j], depth))
+ {
+ j++;
+ }
+ // Exit if v is smaller than both sons
+ if (_IsSmaller(tree, v, heap[j], depth))
+ break;
+
+ // Exchange v with the smallest son
+ heap[k] = heap[j]; k = j;
+ // And continue down the tree, setting j to the left son of k
+ j <<= 1;
+ }
+ heap[k] = v;
+ }
+
+ internal static bool _IsSmaller(short[] tree, int n, int m, sbyte[] depth)
+ {
+ short tn2 = tree[n * 2];
+ short tm2 = tree[m * 2];
+ return (tn2 < tm2 || (tn2 == tm2 && depth[n] <= depth[m]));
+ }
+
+
+ // Scan a literal or distance tree to determine the frequencies of the codes
+ // in the bit length tree.
+ internal void scan_tree(short[] tree, int max_code)
+ {
+ int n; // iterates over all tree elements
+ int prevlen = -1; // last emitted length
+ int curlen; // length of current code
+ int nextlen = (int)tree[0 * 2 + 1]; // length of next code
+ int count = 0; // repeat count of the current code
+ int max_count = 7; // max repeat count
+ int min_count = 4; // min repeat count
+
+ if (nextlen == 0)
+ {
+ max_count = 138; min_count = 3;
+ }
+ tree[(max_code + 1) * 2 + 1] = (short)0x7fff; // guard //??
+
+ for (n = 0; n <= max_code; n++)
+ {
+ curlen = nextlen; nextlen = (int)tree[(n + 1) * 2 + 1];
+ if (++count < max_count && curlen == nextlen)
+ {
+ continue;
+ }
+ else if (count < min_count)
+ {
+ bl_tree[curlen * 2] = (short)(bl_tree[curlen * 2] + count);
+ }
+ else if (curlen != 0)
+ {
+ if (curlen != prevlen)
+ bl_tree[curlen * 2]++;
+ bl_tree[InternalConstants.REP_3_6 * 2]++;
+ }
+ else if (count <= 10)
+ {
+ bl_tree[InternalConstants.REPZ_3_10 * 2]++;
+ }
+ else
+ {
+ bl_tree[InternalConstants.REPZ_11_138 * 2]++;
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0)
+ {
+ max_count = 138; min_count = 3;
+ }
+ else if (curlen == nextlen)
+ {
+ max_count = 6; min_count = 3;
+ }
+ else
+ {
+ max_count = 7; min_count = 4;
+ }
+ }
+ }
+
+ // Construct the Huffman tree for the bit lengths and return the index in
+ // bl_order of the last bit length code to send.
+ internal int build_bl_tree()
+ {
+ int max_blindex; // index of last bit length code of non zero freq
+
+ // Determine the bit length frequencies for literal and distance trees
+ scan_tree(dyn_ltree, treeLiterals.max_code);
+ scan_tree(dyn_dtree, treeDistances.max_code);
+
+ // Build the bit length tree:
+ treeBitLengths.build_tree(this);
+ // opt_len now includes the length of the tree representations, except
+ // the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
+
+ // Determine the number of bit length codes to send. The pkzip format
+ // requires that at least 4 bit length codes be sent. (appnote.txt says
+ // 3 but the actual value used is 4.)
+ for (max_blindex = InternalConstants.BL_CODES - 1; max_blindex >= 3; max_blindex--)
+ {
+ if (bl_tree[Tree.bl_order[max_blindex] * 2 + 1] != 0)
+ break;
+ }
+ // Update opt_len to include the bit length tree and counts
+ opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;
+
+ return max_blindex;
+ }
+
+
+ // Send the header for a block using dynamic Huffman trees: the counts, the
+ // lengths of the bit length codes, the literal tree and the distance tree.
+ // IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ internal void send_all_trees(int lcodes, int dcodes, int blcodes)
+ {
+ int rank; // index in bl_order
+
+ send_bits(lcodes - 257, 5); // not +255 as stated in appnote.txt
+ send_bits(dcodes - 1, 5);
+ send_bits(blcodes - 4, 4); // not -3 as stated in appnote.txt
+ for (rank = 0; rank < blcodes; rank++)
+ {
+ send_bits(bl_tree[Tree.bl_order[rank] * 2 + 1], 3);
+ }
+ send_tree(dyn_ltree, lcodes - 1); // literal tree
+ send_tree(dyn_dtree, dcodes - 1); // distance tree
+ }
+
+ // Send a literal or distance tree in compressed form, using the codes in
+ // bl_tree.
+ internal void send_tree(short[] tree, int max_code)
+ {
+ int n; // iterates over all tree elements
+ int prevlen = -1; // last emitted length
+ int curlen; // length of current code
+ int nextlen = tree[0 * 2 + 1]; // length of next code
+ int count = 0; // repeat count of the current code
+ int max_count = 7; // max repeat count
+ int min_count = 4; // min repeat count
+
+ if (nextlen == 0)
+ {
+ max_count = 138; min_count = 3;
+ }
+
+ for (n = 0; n <= max_code; n++)
+ {
+ curlen = nextlen; nextlen = tree[(n + 1) * 2 + 1];
+ if (++count < max_count && curlen == nextlen)
+ {
+ continue;
+ }
+ else if (count < min_count)
+ {
+ do
+ {
+ send_code(curlen, bl_tree);
+ }
+ while (--count != 0);
+ }
+ else if (curlen != 0)
+ {
+ if (curlen != prevlen)
+ {
+ send_code(curlen, bl_tree); count--;
+ }
+ send_code(InternalConstants.REP_3_6, bl_tree);
+ send_bits(count - 3, 2);
+ }
+ else if (count <= 10)
+ {
+ send_code(InternalConstants.REPZ_3_10, bl_tree);
+ send_bits(count - 3, 3);
+ }
+ else
+ {
+ send_code(InternalConstants.REPZ_11_138, bl_tree);
+ send_bits(count - 11, 7);
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0)
+ {
+ max_count = 138; min_count = 3;
+ }
+ else if (curlen == nextlen)
+ {
+ max_count = 6; min_count = 3;
+ }
+ else
+ {
+ max_count = 7; min_count = 4;
+ }
+ }
+ }
+
+ // Output a block of bytes on the stream.
+ // IN assertion: there is enough room in pending_buf.
+ private void put_bytes(byte[] p, int start, int len)
+ {
+ Array.Copy(p, start, pending, pendingCount, len);
+ pendingCount += len;
+ }
+
+#if NOTNEEDED
+ private void put_byte(byte c)
+ {
+ pending[pendingCount++] = c;
+ }
+ internal void put_short(int b)
+ {
+ unchecked
+ {
+ pending[pendingCount++] = (byte)b;
+ pending[pendingCount++] = (byte)(b >> 8);
+ }
+ }
+ internal void putShortMSB(int b)
+ {
+ unchecked
+ {
+ pending[pendingCount++] = (byte)(b >> 8);
+ pending[pendingCount++] = (byte)b;
+ }
+ }
+#endif
+
+ internal void send_code(int c, short[] tree)
+ {
+ int c2 = c * 2;
+ send_bits((tree[c2] & 0xffff), (tree[c2 + 1] & 0xffff));
+ }
+
+ internal void send_bits(int value, int length)
+ {
+ int len = length;
+ unchecked
+ {
+ if (bi_valid > (int)Buf_size - len)
+ {
+ //int val = value;
+ // bi_buf |= (val << bi_valid);
+
+ bi_buf |= (short)((value << bi_valid) & 0xffff);
+ //put_short(bi_buf);
+ pending[pendingCount++] = (byte)bi_buf;
+ pending[pendingCount++] = (byte)(bi_buf >> 8);
+
+
+ bi_buf = (short)((uint)value >> (Buf_size - bi_valid));
+ bi_valid += len - Buf_size;
+ }
+ else
+ {
+ // bi_buf |= (value) << bi_valid;
+ bi_buf |= (short)((value << bi_valid) & 0xffff);
+ bi_valid += len;
+ }
+ }
+ }
+
+ // Send one empty static block to give enough lookahead for inflate.
+ // This takes 10 bits, of which 7 may remain in the bit buffer.
+ // The current inflate code requires 9 bits of lookahead. If the
+ // last two codes for the previous block (real code plus EOB) were coded
+ // on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
+ // the last real code. In this case we send two empty static blocks instead
+ // of one. (There are no problems if the previous block is stored or fixed.)
+ // To simplify the code, we assume the worst case of last real code encoded
+ // on one bit only.
+ internal void _tr_align()
+ {
+ send_bits(STATIC_TREES << 1, 3);
+ send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes);
+
+ bi_flush();
+
+ // Of the 10 bits for the empty block, we have already sent
+ // (10 - bi_valid) bits. The lookahead for the last real code (before
+ // the EOB of the previous block) was thus at least one plus the length
+ // of the EOB plus what we have just sent of the empty static block.
+ if (1 + last_eob_len + 10 - bi_valid < 9)
+ {
+ send_bits(STATIC_TREES << 1, 3);
+ send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes);
+ bi_flush();
+ }
+ last_eob_len = 7;
+ }
+
+
+ // Save the match info and tally the frequency counts. Return true if
+ // the current block must be flushed.
+ internal bool _tr_tally(int dist, int lc)
+ {
+ pending[_distanceOffset + last_lit * 2] = unchecked((byte)((uint)dist >> 8));
+ pending[_distanceOffset + last_lit * 2 + 1] = unchecked((byte)dist);
+ pending[_lengthOffset + last_lit] = unchecked((byte)lc);
+ last_lit++;
+
+ if (dist == 0)
+ {
+ // lc is the unmatched char
+ dyn_ltree[lc * 2]++;
+ }
+ else
+ {
+ matches++;
+ // Here, lc is the match length - MIN_MATCH
+ dist--; // dist = match distance - 1
+ dyn_ltree[(Tree.LengthCode[lc] + InternalConstants.LITERALS + 1) * 2]++;
+ dyn_dtree[Tree.DistanceCode(dist) * 2]++;
+ }
+
+ /* ************************************************************
+ * *
+ * this code is not turned on by default in ZLIB Trrntzip code *
+ * *
+ * *************************************************************
+ */
+ if (false) //CompSettings
+ {
+ if ((last_lit & 0x1fff) == 0 && (int)compressionLevel > 2)
+ {
+ // Compute an upper bound for the compressed length
+ int out_length = last_lit << 3;
+ int in_length = strstart - block_start;
+ int dcode;
+ for (dcode = 0; dcode < InternalConstants.D_CODES; dcode++)
+ {
+ out_length = (int)(out_length + (int)dyn_dtree[dcode * 2] * (5L + Tree.ExtraDistanceBits[dcode]));
+ }
+ out_length >>= 3;
+ if ((matches < (last_lit / 2)) && out_length < in_length / 2)
+ return true;
+ }
+ }
+
+ return (last_lit == lit_bufsize - 1) || (last_lit == lit_bufsize);
+ // dinoch - wraparound?
+ // We avoid equality with lit_bufsize because of wraparound at 64K
+ // on 16 bit machines and because stored blocks are restricted to
+ // 64K-1 bytes.
+ }
+
+
+
+ // Send the block data compressed using the given Huffman trees
+ internal void send_compressed_block(short[] ltree, short[] dtree)
+ {
+ int distance; // distance of matched string
+ int lc; // match length or unmatched char (if dist == 0)
+ int lx = 0; // running index in l_buf
+ int code; // the code to send
+ int extra; // number of extra bits to send
+
+ if (last_lit != 0)
+ {
+ do
+ {
+ int ix = _distanceOffset + lx * 2;
+ distance = ((pending[ix] << 8) & 0xff00) |
+ (pending[ix + 1] & 0xff);
+ lc = (pending[_lengthOffset + lx]) & 0xff;
+ lx++;
+
+ if (distance == 0)
+ {
+ send_code(lc, ltree); // send a literal byte
+ }
+ else
+ {
+ // literal or match pair
+ // Here, lc is the match length - MIN_MATCH
+ code = Tree.LengthCode[lc];
+
+ // send the length code
+ send_code(code + InternalConstants.LITERALS + 1, ltree);
+ extra = Tree.ExtraLengthBits[code];
+ if (extra != 0)
+ {
+ // send the extra length bits
+ lc -= Tree.LengthBase[code];
+ send_bits(lc, extra);
+ }
+ distance--; // dist is now the match distance - 1
+ code = Tree.DistanceCode(distance);
+
+ // send the distance code
+ send_code(code, dtree);
+
+ extra = Tree.ExtraDistanceBits[code];
+ if (extra != 0)
+ {
+ // send the extra distance bits
+ distance -= Tree.DistanceBase[code];
+ send_bits(distance, extra);
+ }
+ }
+
+ // Check that the overlay between pending and d_buf+l_buf is ok:
+ }
+ while (lx < last_lit);
+ }
+
+ send_code(END_BLOCK, ltree);
+ last_eob_len = ltree[END_BLOCK * 2 + 1];
+ }
+
+
+
+ // Set the data type to ASCII or BINARY, using a crude approximation:
+ // binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
+ // IN assertion: the fields freq of dyn_ltree are set and the total of all
+ // frequencies does not exceed 64K (to fit in an int on 16 bit machines).
+ internal void set_data_type()
+ {
+ int n = 0;
+ int ascii_freq = 0;
+ int bin_freq = 0;
+ while (n < 7)
+ {
+ bin_freq += dyn_ltree[n * 2]; n++;
+ }
+ while (n < 128)
+ {
+ ascii_freq += dyn_ltree[n * 2]; n++;
+ }
+ while (n < InternalConstants.LITERALS)
+ {
+ bin_freq += dyn_ltree[n * 2]; n++;
+ }
+ data_type = (sbyte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
+ }
+
+
+
+ // Flush the bit buffer, keeping at most 7 bits in it.
+ internal void bi_flush()
+ {
+ if (bi_valid == 16)
+ {
+ pending[pendingCount++] = (byte)bi_buf;
+ pending[pendingCount++] = (byte)(bi_buf >> 8);
+ bi_buf = 0;
+ bi_valid = 0;
+ }
+ else if (bi_valid >= 8)
+ {
+ //put_byte((byte)bi_buf);
+ pending[pendingCount++] = (byte)bi_buf;
+ bi_buf >>= 8;
+ bi_valid -= 8;
+ }
+ }
+
+ // Flush the bit buffer and align the output on a byte boundary
+ internal void bi_windup()
+ {
+ if (bi_valid > 8)
+ {
+ pending[pendingCount++] = (byte)bi_buf;
+ pending[pendingCount++] = (byte)(bi_buf >> 8);
+ }
+ else if (bi_valid > 0)
+ {
+ //put_byte((byte)bi_buf);
+ pending[pendingCount++] = (byte)bi_buf;
+ }
+ bi_buf = 0;
+ bi_valid = 0;
+ }
+
+ // Copy a stored block, storing first the length and its
+ // one's complement if requested.
+ internal void copy_block(int buf, int len, bool header)
+ {
+ bi_windup(); // align on byte boundary
+ last_eob_len = 8; // enough lookahead for inflate
+
+ if (header)
+ unchecked
+ {
+ //put_short((short)len);
+ pending[pendingCount++] = (byte)len;
+ pending[pendingCount++] = (byte)(len >> 8);
+ //put_short((short)~len);
+ pending[pendingCount++] = (byte)~len;
+ pending[pendingCount++] = (byte)(~len >> 8);
+ }
+
+ put_bytes(window, buf, len);
+ }
+
+ internal void flush_block_only(bool eof)
+ {
+ _tr_flush_block(block_start >= 0 ? block_start : -1, strstart - block_start, eof);
+ block_start = strstart;
+ _codec.flush_pending();
+ }
+
+ // Copy without compression as much as possible from the input stream, return
+ // the current block state.
+ // This function does not insert new strings in the dictionary since
+ // uncompressible data is probably not useful. This function is used
+ // only for the level=0 compression option.
+ // NOTE: this function should be optimized to avoid extra copying from
+ // window to pending_buf.
+ internal BlockState DeflateNone(FlushType flush)
+ {
+ // Stored blocks are limited to 0xffff bytes, pending is limited
+ // to pending_buf_size, and each stored block has a 5 byte header:
+
+ int max_block_size = 0xffff;
+ int max_start;
+
+ if (max_block_size > pending.Length - 5)
+ {
+ max_block_size = pending.Length - 5;
+ }
+
+ // Copy as much as possible from input to output:
+ while (true)
+ {
+ // Fill the window as much as possible:
+ if (lookahead <= 1)
+ {
+ _fillWindow();
+ if (lookahead == 0 && flush == FlushType.None)
+ return BlockState.NeedMore;
+ if (lookahead == 0)
+ break; // flush the current block
+ }
+
+ strstart += lookahead;
+ lookahead = 0;
+
+ // Emit a stored block if pending will be full:
+ max_start = block_start + max_block_size;
+ if (strstart == 0 || strstart >= max_start)
+ {
+ // strstart == 0 is possible when wraparound on 16-bit machine
+ lookahead = (int)(strstart - max_start);
+ strstart = (int)max_start;
+
+ flush_block_only(false);
+ if (_codec.AvailableBytesOut == 0)
+ return BlockState.NeedMore;
+ }
+
+ // Flush if we may have to slide, otherwise block_start may become
+ // negative and the data will be gone:
+ if (strstart - block_start >= w_size - MIN_LOOKAHEAD)
+ {
+ flush_block_only(false);
+ if (_codec.AvailableBytesOut == 0)
+ return BlockState.NeedMore;
+ }
+ }
+
+ flush_block_only(flush == FlushType.Finish);
+ if (_codec.AvailableBytesOut == 0)
+ return (flush == FlushType.Finish) ? BlockState.FinishStarted : BlockState.NeedMore;
+
+ return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone;
+ }
+
+
+ // Send a stored block
+ internal void _tr_stored_block(int buf, int stored_len, bool eof)
+ {
+ send_bits((STORED_BLOCK << 1) + (eof ? 1 : 0), 3); // send block type
+ copy_block(buf, stored_len, true); // with header
+ }
+
+ // Determine the best encoding for the current block: dynamic trees, static
+ // trees or store, and output the encoded block to the zip file.
+ internal void _tr_flush_block(int buf, int stored_len, bool eof)
+ {
+ int opt_lenb, static_lenb; // opt_len and static_len in bytes
+ int max_blindex = 0; // index of last bit length code of non zero freq
+
+ // Build the Huffman trees unless a stored block is forced
+ if (compressionLevel > 0)
+ {
+ // Check if the file is ascii or binary
+ if (data_type == Z_UNKNOWN)
+ set_data_type();
+
+ // Construct the literal and distance trees
+ treeLiterals.build_tree(this);
+
+ treeDistances.build_tree(this);
+
+ // At this point, opt_len and static_len are the total bit lengths of
+ // the compressed block data, excluding the tree representations.
+
+ // Build the bit length tree for the above two trees, and get the index
+ // in bl_order of the last bit length code to send.
+ max_blindex = build_bl_tree();
+
+ // Determine the best encoding. Compute first the block length in bytes
+ opt_lenb = (opt_len + 3 + 7) >> 3;
+ static_lenb = (static_len + 3 + 7) >> 3;
+
+ if (static_lenb <= opt_lenb)
+ opt_lenb = static_lenb;
+ }
+ else
+ {
+ opt_lenb = static_lenb = stored_len + 5; // force a stored block
+ }
+
+ if (stored_len + 4 <= opt_lenb && buf != -1)
+ {
+ // 4: two words for the lengths
+ // The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+ // Otherwise we can't have processed more than WSIZE input bytes since
+ // the last block flush, because compression would have been
+ // successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+ // transform a block into a stored block.
+ _tr_stored_block(buf, stored_len, eof);
+ }
+ else if (static_lenb == opt_lenb)
+ {
+ send_bits((STATIC_TREES << 1) + (eof ? 1 : 0), 3);
+ send_compressed_block(StaticTree.lengthAndLiteralsTreeCodes, StaticTree.distTreeCodes);
+ }
+ else
+ {
+ send_bits((DYN_TREES << 1) + (eof ? 1 : 0), 3);
+ send_all_trees(treeLiterals.max_code + 1, treeDistances.max_code + 1, max_blindex + 1);
+ send_compressed_block(dyn_ltree, dyn_dtree);
+ }
+
+ // The above check is made mod 2^32, for files larger than 512 MB
+ // and uLong implemented on 32 bits.
+
+ _InitializeBlocks();
+
+ if (eof)
+ {
+ bi_windup();
+ }
+ }
+
+ // Fill the window when the lookahead becomes insufficient.
+ // Updates strstart and lookahead.
+ //
+ // IN assertion: lookahead < MIN_LOOKAHEAD
+ // OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ // At least one byte has been read, or avail_in == 0; reads are
+ // performed for at least two bytes (required for the zip translate_eol
+ // option -- not supported here).
+ private void _fillWindow()
+ {
+ int n, m;
+ int p;
+ int more; // Amount of free space at the end of the window.
+
+ do
+ {
+ more = (window_size - lookahead - strstart);
+
+ // Deal with !@#$% 64K limit:
+ if (more == 0 && strstart == 0 && lookahead == 0)
+ {
+ more = w_size;
+ }
+ else if (more == -1)
+ {
+ // Very unlikely, but possible on 16 bit machine if strstart == 0
+ // and lookahead == 1 (input done one byte at time)
+ more--;
+
+ // If the window is almost full and there is insufficient lookahead,
+ // move the upper half to the lower one to make room in the upper half.
+ }
+ else if (strstart >= w_size + w_size - MIN_LOOKAHEAD)
+ {
+ Array.Copy(window, w_size, window, 0, w_size);
+ match_start -= w_size;
+ strstart -= w_size; // we now have strstart >= MAX_DIST
+ block_start -= w_size;
+
+ // Slide the hash table (could be avoided with 32 bit values
+ // at the expense of memory usage). We slide even when level == 0
+ // to keep the hash table consistent if we switch back to level > 0
+ // later. (Using level 0 permanently is not an optimal usage of
+ // zlib, so we don't care about this pathological case.)
+
+ n = hash_size;
+ p = n;
+ do
+ {
+ m = (head[--p] & 0xffff);
+ head[p] = (short)((m >= w_size) ? (m - w_size) : 0);
+ }
+ while (--n != 0);
+
+ n = w_size;
+ p = n;
+ do
+ {
+ m = (prev[--p] & 0xffff);
+ prev[p] = (short)((m >= w_size) ? (m - w_size) : 0);
+ // If n is not on any hash chain, prev[n] is garbage but
+ // its value will never be used.
+ }
+ while (--n != 0);
+ more += w_size;
+ }
+
+ if (_codec.AvailableBytesIn == 0)
+ return;
+
+ // If there was no sliding:
+ // strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ // more == window_size - lookahead - strstart
+ // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ // => more >= window_size - 2*WSIZE + 2
+ // In the BIG_MEM or MMAP case (not yet supported),
+ // window_size == input_size + MIN_LOOKAHEAD &&
+ // strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ // Otherwise, window_size == 2*WSIZE so more >= 2.
+ // If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+
+ n = _codec.read_buf(window, strstart + lookahead, more);
+ lookahead += n;
+
+ // Initialize the hash value now that we have some input:
+ if (lookahead >= MIN_MATCH)
+ {
+ ins_h = window[strstart] & 0xff;
+ ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
+ }
+ // If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+ // but this is not important since only literal bytes will be emitted.
+ }
+ while (lookahead < MIN_LOOKAHEAD && _codec.AvailableBytesIn != 0);
+ }
+
+ // Compress as much as possible from the input stream, return the current
+ // block state.
+ // This function does not perform lazy evaluation of matches and inserts
+ // new strings in the dictionary only for unmatched strings or for short
+ // matches. It is used only for the fast compression options.
+ internal BlockState DeflateFast(FlushType flush)
+ {
+ // short hash_head = 0; // head of the hash chain
+ int hash_head = 0; // head of the hash chain
+ bool bflush; // set if current block must be flushed
+
+ while (true)
+ {
+ // Make sure that we always have enough lookahead, except
+ // at the end of the input file. We need MAX_MATCH bytes
+ // for the next match, plus MIN_MATCH bytes to insert the
+ // string following the next match.
+ if (lookahead < MIN_LOOKAHEAD)
+ {
+ _fillWindow();
+ if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None)
+ {
+ return BlockState.NeedMore;
+ }
+ if (lookahead == 0)
+ break; // flush the current block
+ }
+
+ // Insert the string window[strstart .. strstart+2] in the
+ // dictionary, and set hash_head to the head of the hash chain:
+ if (lookahead >= MIN_MATCH)
+ {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = unchecked((short)strstart);
+ }
+
+ // Find the longest match, discarding those <= prev_length.
+ // At this point we have always match_length < MIN_MATCH
+
+ if (hash_head != 0L && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD)
+ {
+ // To simplify the code, we prevent matches with the string
+ // of window index 0 (in particular we have to avoid a match
+ // of the string with itself at the start of the input file).
+ if (compressionStrategy != CompressionStrategy.HuffmanOnly)
+ {
+ match_length = longest_match(hash_head);
+ }
+ // longest_match() sets match_start
+ }
+ if (match_length >= MIN_MATCH)
+ {
+ // check_match(strstart, match_start, match_length);
+
+ bflush = _tr_tally(strstart - match_start, match_length - MIN_MATCH);
+
+ lookahead -= match_length;
+
+ // Insert new strings in the hash table only if the match length
+ // is not too large. This saves time but degrades compression.
+ if (match_length <= config.MaxLazy && lookahead >= MIN_MATCH)
+ {
+ match_length--; // string at strstart already in hash table
+ do
+ {
+ strstart++;
+
+ ins_h = ((ins_h << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = unchecked((short)strstart);
+
+ // strstart never exceeds WSIZE-MAX_MATCH, so there are
+ // always MIN_MATCH bytes ahead.
+ }
+ while (--match_length != 0);
+ strstart++;
+ }
+ else
+ {
+ strstart += match_length;
+ match_length = 0;
+ ins_h = window[strstart] & 0xff;
+
+ ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
+ // If lookahead < MIN_MATCH, ins_h is garbage, but it does not
+ // matter since it will be recomputed at next deflate call.
+ }
+ }
+ else
+ {
+ // No match, output a literal byte
+
+ bflush = _tr_tally(0, window[strstart] & 0xff);
+ lookahead--;
+ strstart++;
+ }
+ if (bflush)
+ {
+ flush_block_only(false);
+ if (_codec.AvailableBytesOut == 0)
+ return BlockState.NeedMore;
+ }
+ }
+
+ flush_block_only(flush == FlushType.Finish);
+ if (_codec.AvailableBytesOut == 0)
+ {
+ if (flush == FlushType.Finish)
+ return BlockState.FinishStarted;
+ else
+ return BlockState.NeedMore;
+ }
+ return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone;
+ }
+
+ // Same as above, but achieves better compression. We use a lazy
+ // evaluation for matches: a match is finally adopted only if there is
+ // no better match at the next window position.
+ internal BlockState DeflateSlow(FlushType flush)
+ {
+ // short hash_head = 0; // head of hash chain
+ int hash_head = 0; // head of hash chain
+ bool bflush; // set if current block must be flushed
+
+ // Process the input block.
+ while (true)
+ {
+ // Make sure that we always have enough lookahead, except
+ // at the end of the input file. We need MAX_MATCH bytes
+ // for the next match, plus MIN_MATCH bytes to insert the
+ // string following the next match.
+
+ if (lookahead < MIN_LOOKAHEAD)
+ {
+ _fillWindow();
+ if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None)
+ return BlockState.NeedMore;
+
+ if (lookahead == 0)
+ break; // flush the current block
+ }
+
+ // Insert the string window[strstart .. strstart+2] in the
+ // dictionary, and set hash_head to the head of the hash chain:
+
+ if (lookahead >= MIN_MATCH)
+ {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = unchecked((short)strstart);
+ }
+
+ // Find the longest match, discarding those <= prev_length.
+ prev_length = match_length;
+ prev_match = match_start;
+ match_length = MIN_MATCH - 1;
+
+ if (hash_head != 0 && prev_length < config.MaxLazy &&
+ ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD)
+ {
+ // To simplify the code, we prevent matches with the string
+ // of window index 0 (in particular we have to avoid a match
+ // of the string with itself at the start of the input file).
+
+ if (compressionStrategy != CompressionStrategy.HuffmanOnly)
+ {
+ match_length = longest_match(hash_head);
+ }
+ // longest_match() sets match_start
+
+ if (match_length <= 5 && (compressionStrategy == CompressionStrategy.Filtered ||
+ (match_length == MIN_MATCH && strstart - match_start > 4096)))
+ {
+
+ // If prev_match is also MIN_MATCH, match_start is garbage
+ // but we will ignore the current match anyway.
+ match_length = MIN_MATCH - 1;
+ }
+ }
+
+ // If there was a match at the previous step and the current
+ // match is not better, output the previous match:
+ if (prev_length >= MIN_MATCH && match_length <= prev_length)
+ {
+ int max_insert = strstart + lookahead - MIN_MATCH;
+ // Do not insert strings in hash table beyond this.
+
+ // check_match(strstart-1, prev_match, prev_length);
+
+ bflush = _tr_tally(strstart - 1 - prev_match, prev_length - MIN_MATCH);
+
+ // Insert in hash table all strings up to the end of the match.
+ // strstart-1 and strstart are already inserted. If there is not
+ // enough lookahead, the last two strings are not inserted in
+ // the hash table.
+ lookahead -= (prev_length - 1);
+ prev_length -= 2;
+ do
+ {
+ if (++strstart <= max_insert)
+ {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ //prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = unchecked((short)strstart);
+ }
+ }
+ while (--prev_length != 0);
+ match_available = 0;
+ match_length = MIN_MATCH - 1;
+ strstart++;
+
+ if (bflush)
+ {
+ flush_block_only(false);
+ if (_codec.AvailableBytesOut == 0)
+ return BlockState.NeedMore;
+ }
+ }
+ else if (match_available != 0)
+ {
+
+ // If there was no match at the previous position, output a
+ // single literal. If there was a match but the current match
+ // is longer, truncate the previous match to a single literal.
+
+ bflush = _tr_tally(0, window[strstart - 1] & 0xff);
+
+ if (bflush)
+ {
+ flush_block_only(false);
+ }
+ strstart++;
+ lookahead--;
+ if (_codec.AvailableBytesOut == 0)
+ return BlockState.NeedMore;
+ }
+ else
+ {
+ // There is no previous match to compare with, wait for
+ // the next step to decide.
+
+ match_available = 1;
+ strstart++;
+ lookahead--;
+ }
+ }
+
+ if (match_available != 0)
+ {
+ bflush = _tr_tally(0, window[strstart - 1] & 0xff);
+ match_available = 0;
+ }
+ flush_block_only(flush == FlushType.Finish);
+
+ if (_codec.AvailableBytesOut == 0)
+ {
+ if (flush == FlushType.Finish)
+ return BlockState.FinishStarted;
+ else
+ return BlockState.NeedMore;
+ }
+
+ return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone;
+ }
+
+
+ internal int longest_match(int cur_match)
+ {
+ int chain_length = config.MaxChainLength; // max hash chain length
+ int scan = strstart; // current string
+ int match; // matched string
+ int len; // length of current match
+ int best_len = prev_length; // best match length so far
+ int limit = strstart > (w_size - MIN_LOOKAHEAD) ? strstart - (w_size - MIN_LOOKAHEAD) : 0;
+
+ int niceLength = config.NiceLength;
+
+ // Stop when cur_match becomes <= limit. To simplify the code,
+ // we prevent matches with the string of window index 0.
+
+ int wmask = w_mask;
+
+ int strend = strstart + MAX_MATCH;
+ byte scan_end1 = window[scan + best_len - 1];
+ byte scan_end = window[scan + best_len];
+
+ // The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ // It is easy to get rid of this optimization if necessary.
+
+ // Do not waste too much time if we already have a good match:
+ if (prev_length >= config.GoodLength)
+ {
+ chain_length >>= 2;
+ }
+
+ // Do not look for matches beyond the end of the input. This is necessary
+ // to make deflate deterministic.
+ if (niceLength > lookahead)
+ niceLength = lookahead;
+
+ do
+ {
+ match = cur_match;
+
+ // Skip to next match if the match length cannot increase
+ // or if the match length is less than 2:
+ if (window[match + best_len] != scan_end ||
+ window[match + best_len - 1] != scan_end1 ||
+ window[match] != window[scan] ||
+ window[++match] != window[scan + 1])
+ continue;
+
+ // The check at best_len-1 can be removed because it will be made
+ // again later. (This heuristic is not always a win.)
+ // It is not necessary to compare scan[2] and match[2] since they
+ // are always equal when the other bytes match, given that
+ // the hash keys are equal and that HASH_BITS >= 8.
+ scan += 2; match++;
+
+ // We check for insufficient lookahead only every 8th comparison;
+ // the 256th check will be made at strstart+258.
+ do
+ {
+ }
+ while (window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] && scan < strend);
+
+ len = MAX_MATCH - (int)(strend - scan);
+ scan = strend - MAX_MATCH;
+
+ if (len > best_len)
+ {
+ match_start = cur_match;
+ best_len = len;
+ if (len >= niceLength)
+ break;
+ scan_end1 = window[scan + best_len - 1];
+ scan_end = window[scan + best_len];
+ }
+ }
+ while ((cur_match = (prev[cur_match & wmask] & 0xffff)) > limit && --chain_length != 0);
+
+ if (best_len <= lookahead)
+ return best_len;
+ return lookahead;
+ }
+
+
+ private bool Rfc1950BytesEmitted = false;
+ private bool _WantRfc1950HeaderBytes = true;
+ internal bool WantRfc1950HeaderBytes
+ {
+ get { return _WantRfc1950HeaderBytes; }
+ set { _WantRfc1950HeaderBytes = value; }
+ }
+
+
+ internal int Initialize(ZlibCodec codec, CompressionLevel level)
+ {
+ return Initialize(codec, level, ZlibConstants.WindowBitsMax);
+ }
+
+ internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits)
+ {
+ return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, CompressionStrategy.Default);
+ }
+
+ internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits, CompressionStrategy compressionStrategy)
+ {
+ return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, compressionStrategy);
+ }
+
+ internal int Initialize(ZlibCodec codec, CompressionLevel level, int windowBits, int memLevel, CompressionStrategy strategy)
+ {
+ _codec = codec;
+ _codec.Message = null;
+
+ // validation
+ if (windowBits < 9 || windowBits > 15)
+ throw new ZlibException("windowBits must be in the range 9..15.");
+
+ if (memLevel < 1 || memLevel > MEM_LEVEL_MAX)
+ throw new ZlibException(String.Format("memLevel must be in the range 1.. {0}", MEM_LEVEL_MAX));
+
+ _codec.dstate = this;
+
+ w_bits = windowBits;
+ w_size = 1 << w_bits;
+ w_mask = w_size - 1;
+
+ hash_bits = memLevel + 7;
+ hash_size = 1 << hash_bits;
+ hash_mask = hash_size - 1;
+ hash_shift = ((hash_bits + MIN_MATCH - 1) / MIN_MATCH);
+
+ window = new byte[w_size * 2];
+ prev = new short[w_size];
+ head = new short[hash_size];
+
+ // for memLevel==8, this will be 16384, 16k
+ lit_bufsize = 1 << (memLevel + 6);
+
+ // Use a single array as the buffer for data pending compression,
+ // the output distance codes, and the output length codes (aka tree).
+ // orig comment: This works just fine since the average
+ // output size for (length,distance) codes is <= 24 bits.
+ pending = new byte[lit_bufsize * 4];
+ _distanceOffset = lit_bufsize;
+ _lengthOffset = (1 + 2) * lit_bufsize;
+
+ // So, for memLevel 8, the length of the pending buffer is 65536. 64k.
+ // The first 16k are pending bytes.
+ // The middle slice, of 32k, is used for distance codes.
+ // The final 16k are length codes.
+
+ this.compressionLevel = level;
+ this.compressionStrategy = strategy;
+
+ Reset();
+ return ZlibConstants.Z_OK;
+ }
+
+
+ internal void Reset()
+ {
+ _codec.TotalBytesIn = _codec.TotalBytesOut = 0;
+ _codec.Message = null;
+ //strm.data_type = Z_UNKNOWN;
+
+ pendingCount = 0;
+ nextPending = 0;
+
+ Rfc1950BytesEmitted = false;
+
+ status = (WantRfc1950HeaderBytes) ? INIT_STATE : BUSY_STATE;
+ _codec._Adler32 = Adler.Adler32(0, null, 0, 0);
+
+ last_flush = (int)FlushType.None;
+
+ _InitializeTreeData();
+ _InitializeLazyMatch();
+ }
+
+
+ internal int End()
+ {
+ if (status != INIT_STATE && status != BUSY_STATE && status != FINISH_STATE)
+ {
+ return ZlibConstants.Z_STREAM_ERROR;
+ }
+ // Deallocate in reverse order of allocations:
+ pending = null;
+ head = null;
+ prev = null;
+ window = null;
+ // free
+ // dstate=null;
+ return status == BUSY_STATE ? ZlibConstants.Z_DATA_ERROR : ZlibConstants.Z_OK;
+ }
+
+
+ private void SetDeflater()
+ {
+ switch (config.Flavor)
+ {
+ case DeflateFlavor.Store:
+ DeflateFunction = DeflateNone;
+ break;
+ case DeflateFlavor.Fast:
+ DeflateFunction = DeflateFast;
+ break;
+ case DeflateFlavor.Slow:
+ DeflateFunction = DeflateSlow;
+ break;
+ }
+ }
+
+
+ internal int SetParams(CompressionLevel level, CompressionStrategy strategy)
+ {
+ int result = ZlibConstants.Z_OK;
+
+ if (compressionLevel != level)
+ {
+ Config newConfig = Config.Lookup(level);
+
+ // change in the deflate flavor (Fast vs slow vs none)?
+ if (newConfig.Flavor != config.Flavor && _codec.TotalBytesIn != 0)
+ {
+ // Flush the last buffer:
+ result = _codec.Deflate(FlushType.Partial);
+ }
+
+ compressionLevel = level;
+ config = newConfig;
+ SetDeflater();
+ }
+
+ // no need to flush with change in strategy? Really?
+ compressionStrategy = strategy;
+
+ return result;
+ }
+
+
+ internal int SetDictionary(byte[] dictionary)
+ {
+ int length = dictionary.Length;
+ int index = 0;
+
+ if (dictionary == null || status != INIT_STATE)
+ throw new ZlibException("Stream error.");
+
+ _codec._Adler32 = Adler.Adler32(_codec._Adler32, dictionary, 0, dictionary.Length);
+
+ if (length < MIN_MATCH)
+ return ZlibConstants.Z_OK;
+ if (length > w_size - MIN_LOOKAHEAD)
+ {
+ length = w_size - MIN_LOOKAHEAD;
+ index = dictionary.Length - length; // use the tail of the dictionary
+ }
+ Array.Copy(dictionary, index, window, 0, length);
+ strstart = length;
+ block_start = length;
+
+ // Insert all strings in the hash table (except for the last two bytes).
+ // s->lookahead stays null, so s->ins_h will be recomputed at the next
+ // call of fill_window.
+
+ ins_h = window[0] & 0xff;
+ ins_h = (((ins_h) << hash_shift) ^ (window[1] & 0xff)) & hash_mask;
+
+ for (int n = 0; n <= length - MIN_MATCH; n++)
+ {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(n) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ prev[n & w_mask] = head[ins_h];
+ head[ins_h] = (short)n;
+ }
+ return ZlibConstants.Z_OK;
+ }
+
+
+
+ internal int Deflate(FlushType flush)
+ {
+ int old_flush;
+
+ if (_codec.OutputBuffer == null ||
+ (_codec.InputBuffer == null && _codec.AvailableBytesIn != 0) ||
+ (status == FINISH_STATE && flush != FlushType.Finish))
+ {
+ _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_STREAM_ERROR)];
+ throw new ZlibException(String.Format("Something is fishy. [{0}]", _codec.Message));
+ }
+ if (_codec.AvailableBytesOut == 0)
+ {
+ _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)];
+ throw new ZlibException("OutputBuffer is full (AvailableBytesOut == 0)");
+ }
+
+ old_flush = last_flush;
+ last_flush = (int)flush;
+
+ // Write the zlib (rfc1950) header bytes
+ if (status == INIT_STATE)
+ {
+ int header = (Z_DEFLATED + ((w_bits - 8) << 4)) << 8;
+ int level_flags = (((int)compressionLevel - 1) & 0xff) >> 1;
+
+ if (level_flags > 3)
+ level_flags = 3;
+ header |= (level_flags << 6);
+ if (strstart != 0)
+ header |= PRESET_DICT;
+ header += 31 - (header % 31);
+
+ status = BUSY_STATE;
+ //putShortMSB(header);
+ unchecked
+ {
+ pending[pendingCount++] = (byte)(header >> 8);
+ pending[pendingCount++] = (byte)header;
+ }
+ // Save the adler32 of the preset dictionary:
+ if (strstart != 0)
+ {
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24);
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16);
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8);
+ pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF);
+ }
+ _codec._Adler32 = Adler.Adler32(0, null, 0, 0);
+ }
+
+ // Flush as much pending output as possible
+ if (pendingCount != 0)
+ {
+ _codec.flush_pending();
+ if (_codec.AvailableBytesOut == 0)
+ {
+ //System.out.println(" avail_out==0");
+ // Since avail_out is 0, deflate will be called again with
+ // more output space, but possibly with both pending and
+ // avail_in equal to zero. There won't be anything to do,
+ // but this is not an error situation so make sure we
+ // return OK instead of BUF_ERROR at next call of deflate:
+ last_flush = -1;
+ return ZlibConstants.Z_OK;
+ }
+
+ // Make sure there is something to do and avoid duplicate consecutive
+ // flushes. For repeated and useless calls with Z_FINISH, we keep
+ // returning Z_STREAM_END instead of Z_BUFF_ERROR.
+ }
+ else if (_codec.AvailableBytesIn == 0 &&
+ (int)flush <= old_flush &&
+ flush != FlushType.Finish)
+ {
+ // workitem 8557
+ //
+ // Not sure why this needs to be an error. pendingCount == 0, which
+ // means there's nothing to deflate. And the caller has not asked
+ // for a FlushType.Finish, but... that seems very non-fatal. We
+ // can just say "OK" and do nothing.
+
+ // _codec.Message = z_errmsg[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)];
+ // throw new ZlibException("AvailableBytesIn == 0 && flush<=old_flush && flush != FlushType.Finish");
+
+ return ZlibConstants.Z_OK;
+ }
+
+ // User must not provide more input after the first FINISH:
+ if (status == FINISH_STATE && _codec.AvailableBytesIn != 0)
+ {
+ _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)];
+ throw new ZlibException("status == FINISH_STATE && _codec.AvailableBytesIn != 0");
+ }
+
+ // Start a new block or continue the current one.
+ if (_codec.AvailableBytesIn != 0 || lookahead != 0 || (flush != FlushType.None && status != FINISH_STATE))
+ {
+ BlockState bstate = DeflateFunction(flush);
+
+ if (bstate == BlockState.FinishStarted || bstate == BlockState.FinishDone)
+ {
+ status = FINISH_STATE;
+ }
+ if (bstate == BlockState.NeedMore || bstate == BlockState.FinishStarted)
+ {
+ if (_codec.AvailableBytesOut == 0)
+ {
+ last_flush = -1; // avoid BUF_ERROR next call, see above
+ }
+ return ZlibConstants.Z_OK;
+ // If flush != Z_NO_FLUSH && avail_out == 0, the next call
+ // of deflate should use the same flush parameter to make sure
+ // that the flush is complete. So we don't have to output an
+ // empty block here, this will be done at next call. This also
+ // ensures that for a very small output buffer, we emit at most
+ // one empty block.
+ }
+
+ if (bstate == BlockState.BlockDone)
+ {
+ if (flush == FlushType.Partial)
+ {
+ _tr_align();
+ }
+ else
+ {
+ // FlushType.Full or FlushType.Sync
+ _tr_stored_block(0, 0, false);
+ // For a full flush, this empty block will be recognized
+ // as a special marker by inflate_sync().
+ if (flush == FlushType.Full)
+ {
+ // clear hash (forget the history)
+ for (int i = 0; i < hash_size; i++)
+ head[i] = 0;
+ }
+ }
+ _codec.flush_pending();
+ if (_codec.AvailableBytesOut == 0)
+ {
+ last_flush = -1; // avoid BUF_ERROR at next call, see above
+ return ZlibConstants.Z_OK;
+ }
+ }
+ }
+
+ if (flush != FlushType.Finish)
+ return ZlibConstants.Z_OK;
+
+ if (!WantRfc1950HeaderBytes || Rfc1950BytesEmitted)
+ return ZlibConstants.Z_STREAM_END;
+
+ // Write the zlib trailer (adler32)
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24);
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16);
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8);
+ pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF);
+ //putShortMSB((int)(SharedUtils.URShift(_codec._Adler32, 16)));
+ //putShortMSB((int)(_codec._Adler32 & 0xffff));
+
+ _codec.flush_pending();
+
+ // If avail_out is zero, the application will call deflate again
+ // to flush the rest.
+
+ Rfc1950BytesEmitted = true; // write the trailer only once!
+
+ return pendingCount != 0 ? ZlibConstants.Z_OK : ZlibConstants.Z_STREAM_END;
+ }
+
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/ZipFile/ZLib/InfTree.cs b/SabreTools.Library/External/Compress/ZipFile/ZLib/InfTree.cs
new file mode 100644
index 00000000..587f9c10
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ZipFile/ZLib/InfTree.cs
@@ -0,0 +1,436 @@
+// Inftree.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2009-October-28 12:43:54>
+//
+// ------------------------------------------------------------------
+//
+// This module defines classes used in decompression. This code is derived
+// from the jzlib implementation of zlib. In keeping with the license for jzlib,
+// the copyright to that code is below.
+//
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// This program is based on zlib-1.1.3; credit to authors
+// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+// and contributors of zlib.
+//
+// -----------------------------------------------------------------------
+
+
+using System;
+
+namespace Compress.ZipFile.ZLib
+{
+
+ sealed class InfTree
+ {
+
+ private const int MANY = 1440;
+
+ private const int Z_OK = 0;
+ private const int Z_STREAM_END = 1;
+ private const int Z_NEED_DICT = 2;
+ private const int Z_ERRNO = - 1;
+ private const int Z_STREAM_ERROR = - 2;
+ private const int Z_DATA_ERROR = - 3;
+ private const int Z_MEM_ERROR = - 4;
+ private const int Z_BUF_ERROR = - 5;
+ private const int Z_VERSION_ERROR = - 6;
+
+ internal const int fixed_bl = 9;
+ internal const int fixed_bd = 5;
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_tl'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] fixed_tl = new int[]{96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 192, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 160, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 224, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 144, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 208, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 176, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 240, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 200, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 168, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 232, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 152, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 216, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 184, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 248, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 196, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 164, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 228, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 148, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 212, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 180, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 244, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 204, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 172, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 236, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 156, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 220, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 188, 0, 8, 14, 0, 8, 142, 0, 8, 78, 0, 9, 252, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 194, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 162, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 226, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 146, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 210, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 178, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 242, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 202, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 170, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 234, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 154, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 218, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 186,
+ 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 250, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 198, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 166, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 230, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 150, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 214, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 182, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 246, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 206, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 174, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 238, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 158, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 222, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 190, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 254, 96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 193, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 161, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 225, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 145, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 209, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 177, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 241, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 201, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 169, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 233, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 153, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 217, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 185, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 249, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 197, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 165, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 229, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 149, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 213, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 181, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 245, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 205, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 173, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 237, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 157, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 221, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 189, 0, 8,
+ 14, 0, 8, 142, 0, 8, 78, 0, 9, 253, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 195, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 163, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 227, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 147, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 211, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 179, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 243, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 203, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 171, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 235, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 155, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 219, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 187, 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 251, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 199, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 167, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 231, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 151, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 215, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 183, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 247, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 207, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 175, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 239, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 159, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 223, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 191, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 255};
+ //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_td'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] fixed_td = new int[]{80, 5, 1, 87, 5, 257, 83, 5, 17, 91, 5, 4097, 81, 5, 5, 89, 5, 1025, 85, 5, 65, 93, 5, 16385, 80, 5, 3, 88, 5, 513, 84, 5, 33, 92, 5, 8193, 82, 5, 9, 90, 5, 2049, 86, 5, 129, 192, 5, 24577, 80, 5, 2, 87, 5, 385, 83, 5, 25, 91, 5, 6145, 81, 5, 7, 89, 5, 1537, 85, 5, 97, 93, 5, 24577, 80, 5, 4, 88, 5, 769, 84, 5, 49, 92, 5, 12289, 82, 5, 13, 90, 5, 3073, 86, 5, 193, 192, 5, 24577};
+
+ // Tables for deflate from PKZIP's appnote.txt.
+ //UPGRADE_NOTE: Final was removed from the declaration of 'cplens'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] cplens = new int[]{3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+
+ // see note #13 above about 258
+ //UPGRADE_NOTE: Final was removed from the declaration of 'cplext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] cplext = new int[]{0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112};
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'cpdist'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] cpdist = new int[]{1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577};
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'cpdext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] cpdext = new int[]{0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
+
+ // If BMAX needs to be larger than 16, then h and x[] should be uLong.
+ internal const int BMAX = 15; // maximum bit length of any code
+
+ internal int[] hn = null; // hufts used in space
+ internal int[] v = null; // work area for huft_build
+ internal int[] c = null; // bit length count table
+ internal int[] r = null; // table entry for structure assignment
+ internal int[] u = null; // table stack
+ internal int[] x = null; // bit offsets, then code stack
+
+ private int huft_build(int[] b, int bindex, int n, int s, int[] d, int[] e, int[] t, int[] m, int[] hp, int[] hn, int[] v)
+ {
+ // Given a list of code lengths and a maximum table size, make a set of
+ // tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
+ // if the given code set is incomplete (the tables are still built in this
+ // case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of
+ // lengths), or Z_MEM_ERROR if not enough memory.
+
+ int a; // counter for codes of length k
+ int f; // i repeats in table every f entries
+ int g; // maximum code length
+ int h; // table level
+ int i; // counter, current code
+ int j; // counter
+ int k; // number of bits in current code
+ int l; // bits per table (returned in m)
+ int mask; // (1 << w) - 1, to avoid cc -O bug on HP
+ int p; // pointer into c[], b[], or v[]
+ int q; // points to current table
+ int w; // bits before this table == (l * h)
+ int xp; // pointer into x
+ int y; // number of dummy codes added
+ int z; // number of entries in current table
+
+ // Generate counts for each bit length
+
+ p = 0; i = n;
+ do
+ {
+ c[b[bindex + p]]++; p++; i--; // assume all entries <= BMAX
+ }
+ while (i != 0);
+
+ if (c[0] == n)
+ {
+ // null input--all zero length codes
+ t[0] = - 1;
+ m[0] = 0;
+ return Z_OK;
+ }
+
+ // Find minimum and maximum length, bound *m by those
+ l = m[0];
+ for (j = 1; j <= BMAX; j++)
+ if (c[j] != 0)
+ break;
+ k = j; // minimum code length
+ if (l < j)
+ {
+ l = j;
+ }
+ for (i = BMAX; i != 0; i--)
+ {
+ if (c[i] != 0)
+ break;
+ }
+ g = i; // maximum code length
+ if (l > i)
+ {
+ l = i;
+ }
+ m[0] = l;
+
+ // Adjust last length count to fill out codes, if needed
+ for (y = 1 << j; j < i; j++, y <<= 1)
+ {
+ if ((y -= c[j]) < 0)
+ {
+ return Z_DATA_ERROR;
+ }
+ }
+ if ((y -= c[i]) < 0)
+ {
+ return Z_DATA_ERROR;
+ }
+ c[i] += y;
+
+ // Generate starting offsets into the value table for each length
+ x[1] = j = 0;
+ p = 1; xp = 2;
+ while (--i != 0)
+ {
+ // note that i == g from above
+ x[xp] = (j += c[p]);
+ xp++;
+ p++;
+ }
+
+ // Make a table of values in order of bit lengths
+ i = 0; p = 0;
+ do
+ {
+ if ((j = b[bindex + p]) != 0)
+ {
+ v[x[j]++] = i;
+ }
+ p++;
+ }
+ while (++i < n);
+ n = x[g]; // set n to length of v
+
+ // Generate the Huffman codes and for each, make the table entries
+ x[0] = i = 0; // first Huffman code is zero
+ p = 0; // grab values in bit order
+ h = - 1; // no tables yet--level -1
+ w = - l; // bits decoded == (l * h)
+ u[0] = 0; // just to keep compilers happy
+ q = 0; // ditto
+ z = 0; // ditto
+
+ // go through the bit lengths (k already is bits in shortest code)
+ for (; k <= g; k++)
+ {
+ a = c[k];
+ while (a-- != 0)
+ {
+ // here i is the Huffman code of length k bits for value *p
+ // make tables up to required level
+ while (k > w + l)
+ {
+ h++;
+ w += l; // previous table always l bits
+ // compute minimum size table less than or equal to l bits
+ z = g - w;
+ z = (z > l)?l:z; // table size upper limit
+ if ((f = 1 << (j = k - w)) > a + 1)
+ {
+ // try a k-w bit table
+ // too few codes for k-w bit table
+ f -= (a + 1); // deduct codes from patterns left
+ xp = k;
+ if (j < z)
+ {
+ while (++j < z)
+ {
+ // try smaller tables up to z bits
+ if ((f <<= 1) <= c[++xp])
+ break; // enough codes to use up j bits
+ f -= c[xp]; // else deduct codes from patterns
+ }
+ }
+ }
+ z = 1 << j; // table entries for j-bit table
+
+ // allocate new table
+ if (hn[0] + z > MANY)
+ {
+ // (note: doesn't matter for fixed)
+ return Z_DATA_ERROR; // overflow of MANY
+ }
+ u[h] = q = hn[0]; // DEBUG
+ hn[0] += z;
+
+ // connect to last table, if there is one
+ if (h != 0)
+ {
+ x[h] = i; // save pattern for backing up
+ r[0] = (sbyte) j; // bits in this table
+ r[1] = (sbyte) l; // bits to dump before this table
+ j = SharedUtils.URShift(i, (w - l));
+ r[2] = (int) (q - u[h - 1] - j); // offset to this table
+ Array.Copy(r, 0, hp, (u[h - 1] + j) * 3, 3); // connect to last table
+ }
+ else
+ {
+ t[0] = q; // first table is returned result
+ }
+ }
+
+ // set up table entry in r
+ r[1] = (sbyte) (k - w);
+ if (p >= n)
+ {
+ r[0] = 128 + 64; // out of values--invalid code
+ }
+ else if (v[p] < s)
+ {
+ r[0] = (sbyte) (v[p] < 256?0:32 + 64); // 256 is end-of-block
+ r[2] = v[p++]; // simple code is just the value
+ }
+ else
+ {
+ r[0] = (sbyte) (e[v[p] - s] + 16 + 64); // non-simple--look up in lists
+ r[2] = d[v[p++] - s];
+ }
+
+ // fill code-like entries with r
+ f = 1 << (k - w);
+ for (j = SharedUtils.URShift(i, w); j < z; j += f)
+ {
+ Array.Copy(r, 0, hp, (q + j) * 3, 3);
+ }
+
+ // backwards increment the k-bit code i
+ for (j = 1 << (k - 1); (i & j) != 0; j = SharedUtils.URShift(j, 1))
+ {
+ i ^= j;
+ }
+ i ^= j;
+
+ // backup over finished tables
+ mask = (1 << w) - 1; // needed on HP, cc -O bug
+ while ((i & mask) != x[h])
+ {
+ h--; // don't need to update q
+ w -= l;
+ mask = (1 << w) - 1;
+ }
+ }
+ }
+ // Return Z_BUF_ERROR if we were given an incomplete table
+ return y != 0 && g != 1?Z_BUF_ERROR:Z_OK;
+ }
+
+ internal int inflate_trees_bits(int[] c, int[] bb, int[] tb, int[] hp, ZlibCodec z)
+ {
+ int result;
+ initWorkArea(19);
+ hn[0] = 0;
+ result = huft_build(c, 0, 19, 19, null, null, tb, bb, hp, hn, v);
+
+ if (result == Z_DATA_ERROR)
+ {
+ z.Message = "oversubscribed dynamic bit lengths tree";
+ }
+ else if (result == Z_BUF_ERROR || bb[0] == 0)
+ {
+ z.Message = "incomplete dynamic bit lengths tree";
+ result = Z_DATA_ERROR;
+ }
+ return result;
+ }
+
+ internal int inflate_trees_dynamic(int nl, int nd, int[] c, int[] bl, int[] bd, int[] tl, int[] td, int[] hp, ZlibCodec z)
+ {
+ int result;
+
+ // build literal/length tree
+ initWorkArea(288);
+ hn[0] = 0;
+ result = huft_build(c, 0, nl, 257, cplens, cplext, tl, bl, hp, hn, v);
+ if (result != Z_OK || bl[0] == 0)
+ {
+ if (result == Z_DATA_ERROR)
+ {
+ z.Message = "oversubscribed literal/length tree";
+ }
+ else if (result != Z_MEM_ERROR)
+ {
+ z.Message = "incomplete literal/length tree";
+ result = Z_DATA_ERROR;
+ }
+ return result;
+ }
+
+ // build distance tree
+ initWorkArea(288);
+ result = huft_build(c, nl, nd, 0, cpdist, cpdext, td, bd, hp, hn, v);
+
+ if (result != Z_OK || (bd[0] == 0 && nl > 257))
+ {
+ if (result == Z_DATA_ERROR)
+ {
+ z.Message = "oversubscribed distance tree";
+ }
+ else if (result == Z_BUF_ERROR)
+ {
+ z.Message = "incomplete distance tree";
+ result = Z_DATA_ERROR;
+ }
+ else if (result != Z_MEM_ERROR)
+ {
+ z.Message = "empty distance tree with lengths";
+ result = Z_DATA_ERROR;
+ }
+ return result;
+ }
+
+ return Z_OK;
+ }
+
+ internal static int inflate_trees_fixed(int[] bl, int[] bd, int[][] tl, int[][] td, ZlibCodec z)
+ {
+ bl[0] = fixed_bl;
+ bd[0] = fixed_bd;
+ tl[0] = fixed_tl;
+ td[0] = fixed_td;
+ return Z_OK;
+ }
+
+ private void initWorkArea(int vsize)
+ {
+ if (hn == null)
+ {
+ hn = new int[1];
+ v = new int[vsize];
+ c = new int[BMAX + 1];
+ r = new int[3];
+ u = new int[BMAX];
+ x = new int[BMAX + 1];
+ }
+ else
+ {
+ if (v.Length < vsize)
+ {
+ v = new int[vsize];
+ }
+ Array.Clear(v,0,vsize);
+ Array.Clear(c,0,BMAX+1);
+ r[0]=0; r[1]=0; r[2]=0;
+ // for(int i=0; i
+//
+// ------------------------------------------------------------------
+//
+// This module defines classes for decompression. This code is derived
+// from the jzlib implementation of zlib, but significantly modified.
+// The object model is not the same, and many of the behaviors are
+// different. Nonetheless, in keeping with the license for jzlib, I am
+// reproducing the copyright to that code here.
+//
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// This program is based on zlib-1.1.3; credit to authors
+// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+// and contributors of zlib.
+//
+// -----------------------------------------------------------------------
+
+
+using System;
+
+namespace Compress.ZipFile.ZLib
+{
+ sealed class InflateBlocks
+ {
+ private const int MANY = 1440;
+
+ // Table for deflate from PKZIP's appnote.txt.
+ internal static readonly int[] border = new int[]
+ { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
+
+ private enum InflateBlockMode
+ {
+ TYPE = 0, // get type bits (3, including end bit)
+ LENS = 1, // get lengths for stored
+ STORED = 2, // processing stored block
+ TABLE = 3, // get table lengths
+ BTREE = 4, // get bit lengths tree for a dynamic block
+ DTREE = 5, // get length, distance trees for a dynamic block
+ CODES = 6, // processing fixed or dynamic block
+ DRY = 7, // output remaining window bytes
+ DONE = 8, // finished last block, done
+ BAD = 9, // ot a data error--stuck here
+ }
+
+ private InflateBlockMode mode; // current inflate_block mode
+
+ internal int left; // if STORED, bytes left to copy
+
+ internal int table; // table lengths (14 bits)
+ internal int index; // index into blens (or border)
+ internal int[] blens; // bit lengths of codes
+ internal int[] bb = new int[1]; // bit length tree depth
+ internal int[] tb = new int[1]; // bit length decoding tree
+
+ internal InflateCodes codes = new InflateCodes(); // if CODES, current state
+
+ internal int last; // true if this block is the last block
+
+ internal ZlibCodec _codec; // pointer back to this zlib stream
+
+ // mode independent information
+ internal int bitk; // bits in bit buffer
+ internal int bitb; // bit buffer
+ internal int[] hufts; // single malloc for tree space
+ internal byte[] window; // sliding window
+ internal int end; // one byte after sliding window
+ internal int readAt; // window read pointer
+ internal int writeAt; // window write pointer
+ internal System.Object checkfn; // check function
+ internal uint check; // check on output
+
+ internal InfTree inftree = new InfTree();
+
+ internal InflateBlocks(ZlibCodec codec, System.Object checkfn, int w)
+ {
+ _codec = codec;
+ hufts = new int[MANY * 3];
+ window = new byte[w];
+ end = w;
+ this.checkfn = checkfn;
+ mode = InflateBlockMode.TYPE;
+ Reset();
+ }
+
+ internal uint Reset()
+ {
+ uint oldCheck = check;
+ mode = InflateBlockMode.TYPE;
+ bitk = 0;
+ bitb = 0;
+ readAt = writeAt = 0;
+
+ if (checkfn != null)
+ _codec._Adler32 = check = Adler.Adler32(0, null, 0, 0);
+ return oldCheck;
+ }
+
+
+ internal int Process(int r)
+ {
+ int t; // temporary storage
+ int b; // bit buffer
+ int k; // bits in bit buffer
+ int p; // input data pointer
+ int n; // bytes available there
+ int q; // output window write pointer
+ int m; // bytes to end of window or read pointer
+
+ // copy input/output information to locals (UPDATE macro restores)
+
+ p = _codec.NextIn;
+ n = _codec.AvailableBytesIn;
+ b = bitb;
+ k = bitk;
+
+ q = writeAt;
+ m = (int)(q < readAt ? readAt - q - 1 : end - q);
+
+
+ // process input based on current state
+ while (true)
+ {
+ switch (mode)
+ {
+ case InflateBlockMode.TYPE:
+
+ while (k < (3))
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+ t = (int)(b & 7);
+ last = t & 1;
+
+ switch ((uint)t >> 1)
+ {
+ case 0: // stored
+ b >>= 3; k -= (3);
+ t = k & 7; // go to byte boundary
+ b >>= t; k -= t;
+ mode = InflateBlockMode.LENS; // get length of stored block
+ break;
+
+ case 1: // fixed
+ int[] bl = new int[1];
+ int[] bd = new int[1];
+ int[][] tl = new int[1][];
+ int[][] td = new int[1][];
+ InfTree.inflate_trees_fixed(bl, bd, tl, td, _codec);
+ codes.Init(bl[0], bd[0], tl[0], 0, td[0], 0);
+ b >>= 3; k -= 3;
+ mode = InflateBlockMode.CODES;
+ break;
+
+ case 2: // dynamic
+ b >>= 3; k -= 3;
+ mode = InflateBlockMode.TABLE;
+ break;
+
+ case 3: // illegal
+ b >>= 3; k -= 3;
+ mode = InflateBlockMode.BAD;
+ _codec.Message = "invalid block type";
+ r = ZlibConstants.Z_DATA_ERROR;
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ break;
+
+ case InflateBlockMode.LENS:
+
+ while (k < (32))
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ ;
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ if ( ( ((~b)>>16) & 0xffff) != (b & 0xffff))
+ {
+ mode = InflateBlockMode.BAD;
+ _codec.Message = "invalid stored block lengths";
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ left = (b & 0xffff);
+ b = k = 0; // dump bits
+ mode = left != 0 ? InflateBlockMode.STORED : (last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE);
+ break;
+
+ case InflateBlockMode.STORED:
+ if (n == 0)
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ if (m == 0)
+ {
+ if (q == end && readAt != 0)
+ {
+ q = 0; m = (int)(q < readAt ? readAt - q - 1 : end - q);
+ }
+ if (m == 0)
+ {
+ writeAt = q;
+ r = Flush(r);
+ q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q);
+ if (q == end && readAt != 0)
+ {
+ q = 0; m = (int)(q < readAt ? readAt - q - 1 : end - q);
+ }
+ if (m == 0)
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ }
+ }
+ r = ZlibConstants.Z_OK;
+
+ t = left;
+ if (t > n)
+ t = n;
+ if (t > m)
+ t = m;
+ Array.Copy(_codec.InputBuffer, p, window, q, t);
+ p += t; n -= t;
+ q += t; m -= t;
+ if ((left -= t) != 0)
+ break;
+ mode = last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE;
+ break;
+
+ case InflateBlockMode.TABLE:
+
+ while (k < (14))
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ table = t = (b & 0x3fff);
+ if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
+ {
+ mode = InflateBlockMode.BAD;
+ _codec.Message = "too many length or distance symbols";
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
+ if (blens == null || blens.Length < t)
+ {
+ blens = new int[t];
+ }
+ else
+ {
+ Array.Clear(blens, 0, t);
+ // for (int i = 0; i < t; i++)
+ // {
+ // blens[i] = 0;
+ // }
+ }
+
+ b >>= 14;
+ k -= 14;
+
+
+ index = 0;
+ mode = InflateBlockMode.BTREE;
+ goto case InflateBlockMode.BTREE;
+
+ case InflateBlockMode.BTREE:
+ while (index < 4 + (table >> 10))
+ {
+ while (k < (3))
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ blens[border[index++]] = b & 7;
+
+ b >>= 3; k -= 3;
+ }
+
+ while (index < 19)
+ {
+ blens[border[index++]] = 0;
+ }
+
+ bb[0] = 7;
+ t = inftree.inflate_trees_bits(blens, bb, tb, hufts, _codec);
+ if (t != ZlibConstants.Z_OK)
+ {
+ r = t;
+ if (r == ZlibConstants.Z_DATA_ERROR)
+ {
+ blens = null;
+ mode = InflateBlockMode.BAD;
+ }
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ index = 0;
+ mode = InflateBlockMode.DTREE;
+ goto case InflateBlockMode.DTREE;
+
+ case InflateBlockMode.DTREE:
+ while (true)
+ {
+ t = table;
+ if (!(index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f)))
+ {
+ break;
+ }
+
+ int i, j, c;
+
+ t = bb[0];
+
+ while (k < t)
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ t = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 1];
+ c = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 2];
+
+ if (c < 16)
+ {
+ b >>= t; k -= t;
+ blens[index++] = c;
+ }
+ else
+ {
+ // c == 16..18
+ i = c == 18 ? 7 : c - 14;
+ j = c == 18 ? 11 : 3;
+
+ while (k < (t + i))
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ b >>= t; k -= t;
+
+ j += (b & InternalInflateConstants.InflateMask[i]);
+
+ b >>= i; k -= i;
+
+ i = index;
+ t = table;
+ if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || (c == 16 && i < 1))
+ {
+ blens = null;
+ mode = InflateBlockMode.BAD;
+ _codec.Message = "invalid bit length repeat";
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ c = (c == 16) ? blens[i-1] : 0;
+ do
+ {
+ blens[i++] = c;
+ }
+ while (--j != 0);
+ index = i;
+ }
+ }
+
+ tb[0] = -1;
+ {
+ int[] bl = new int[] { 9 }; // must be <= 9 for lookahead assumptions
+ int[] bd = new int[] { 6 }; // must be <= 9 for lookahead assumptions
+ int[] tl = new int[1];
+ int[] td = new int[1];
+
+ t = table;
+ t = inftree.inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), blens, bl, bd, tl, td, hufts, _codec);
+
+ if (t != ZlibConstants.Z_OK)
+ {
+ if (t == ZlibConstants.Z_DATA_ERROR)
+ {
+ blens = null;
+ mode = InflateBlockMode.BAD;
+ }
+ r = t;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ codes.Init(bl[0], bd[0], hufts, tl[0], hufts, td[0]);
+ }
+ mode = InflateBlockMode.CODES;
+ goto case InflateBlockMode.CODES;
+
+ case InflateBlockMode.CODES:
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+
+ r = codes.Process(this, r);
+ if (r != ZlibConstants.Z_STREAM_END)
+ {
+ return Flush(r);
+ }
+
+ r = ZlibConstants.Z_OK;
+ p = _codec.NextIn;
+ n = _codec.AvailableBytesIn;
+ b = bitb;
+ k = bitk;
+ q = writeAt;
+ m = (int)(q < readAt ? readAt - q - 1 : end - q);
+
+ if (last == 0)
+ {
+ mode = InflateBlockMode.TYPE;
+ break;
+ }
+ mode = InflateBlockMode.DRY;
+ goto case InflateBlockMode.DRY;
+
+ case InflateBlockMode.DRY:
+ writeAt = q;
+ r = Flush(r);
+ q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q);
+ if (readAt != writeAt)
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ mode = InflateBlockMode.DONE;
+ goto case InflateBlockMode.DONE;
+
+ case InflateBlockMode.DONE:
+ r = ZlibConstants.Z_STREAM_END;
+ bitb = b;
+ bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+
+ case InflateBlockMode.BAD:
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+
+
+ default:
+ r = ZlibConstants.Z_STREAM_ERROR;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ }
+ }
+
+
+ internal void Free()
+ {
+ Reset();
+ window = null;
+ hufts = null;
+ }
+
+ internal void SetDictionary(byte[] d, int start, int n)
+ {
+ Array.Copy(d, start, window, 0, n);
+ readAt = writeAt = n;
+ }
+
+ // Returns true if inflate is currently at the end of a block generated
+ // by Z_SYNC_FLUSH or Z_FULL_FLUSH.
+ internal int SyncPoint()
+ {
+ return mode == InflateBlockMode.LENS ? 1 : 0;
+ }
+
+ // copy as much as possible from the sliding window to the output area
+ internal int Flush(int r)
+ {
+ int nBytes;
+
+ for (int pass=0; pass < 2; pass++)
+ {
+ if (pass==0)
+ {
+ // compute number of bytes to copy as far as end of window
+ nBytes = (int)((readAt <= writeAt ? writeAt : end) - readAt);
+ }
+ else
+ {
+ // compute bytes to copy
+ nBytes = writeAt - readAt;
+ }
+
+ // workitem 8870
+ if (nBytes == 0)
+ {
+ if (r == ZlibConstants.Z_BUF_ERROR)
+ r = ZlibConstants.Z_OK;
+ return r;
+ }
+
+ if (nBytes > _codec.AvailableBytesOut)
+ nBytes = _codec.AvailableBytesOut;
+
+ if (nBytes != 0 && r == ZlibConstants.Z_BUF_ERROR)
+ r = ZlibConstants.Z_OK;
+
+ // update counters
+ _codec.AvailableBytesOut -= nBytes;
+ _codec.TotalBytesOut += nBytes;
+
+ // update check information
+ if (checkfn != null)
+ _codec._Adler32 = check = Adler.Adler32(check, window, readAt, nBytes);
+
+ // copy as far as end of window
+ Array.Copy(window, readAt, _codec.OutputBuffer, _codec.NextOut, nBytes);
+ _codec.NextOut += nBytes;
+ readAt += nBytes;
+
+ // see if more to copy at beginning of window
+ if (readAt == end && pass == 0)
+ {
+ // wrap pointers
+ readAt = 0;
+ if (writeAt == end)
+ writeAt = 0;
+ }
+ else pass++;
+ }
+
+ // done
+ return r;
+ }
+ }
+
+
+ internal static class InternalInflateConstants
+ {
+ // And'ing with mask[n] masks the lower n bits
+ internal static readonly int[] InflateMask = new int[] {
+ 0x00000000, 0x00000001, 0x00000003, 0x00000007,
+ 0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f,
+ 0x000000ff, 0x000001ff, 0x000003ff, 0x000007ff,
+ 0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff };
+ }
+
+
+ sealed class InflateCodes
+ {
+ // waiting for "i:"=input,
+ // "o:"=output,
+ // "x:"=nothing
+ private const int START = 0; // x: set up for LEN
+ private const int LEN = 1; // i: get length/literal/eob next
+ private const int LENEXT = 2; // i: getting length extra (have base)
+ private const int DIST = 3; // i: get distance next
+ private const int DISTEXT = 4; // i: getting distance extra
+ private const int COPY = 5; // o: copying bytes in window, waiting for space
+ private const int LIT = 6; // o: got literal, waiting for output space
+ private const int WASH = 7; // o: got eob, possibly still output waiting
+ private const int END = 8; // x: got eob and all data flushed
+ private const int BADCODE = 9; // x: got error
+
+ internal int mode; // current inflate_codes mode
+
+ // mode dependent information
+ internal int len;
+
+ internal int[] tree; // pointer into tree
+ internal int tree_index = 0;
+ internal int need; // bits needed
+
+ internal int lit;
+
+ // if EXT or COPY, where and how much
+ internal int bitsToGet; // bits to get for extra
+ internal int dist; // distance back to copy from
+
+ internal byte lbits; // ltree bits decoded per branch
+ internal byte dbits; // dtree bits decoder per branch
+ internal int[] ltree; // literal/length/eob tree
+ internal int ltree_index; // literal/length/eob tree
+ internal int[] dtree; // distance tree
+ internal int dtree_index; // distance tree
+
+ internal InflateCodes()
+ {
+ }
+
+ internal void Init(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index)
+ {
+ mode = START;
+ lbits = (byte)bl;
+ dbits = (byte)bd;
+ ltree = tl;
+ ltree_index = tl_index;
+ dtree = td;
+ dtree_index = td_index;
+ tree = null;
+ }
+
+ internal int Process(InflateBlocks blocks, int r)
+ {
+ int j; // temporary storage
+ int tindex; // temporary pointer
+ int e; // extra bits or operation
+ int b = 0; // bit buffer
+ int k = 0; // bits in bit buffer
+ int p = 0; // input data pointer
+ int n; // bytes available there
+ int q; // output window write pointer
+ int m; // bytes to end of window or read pointer
+ int f; // pointer to copy strings from
+
+ ZlibCodec z = blocks._codec;
+
+ // copy input/output information to locals (UPDATE macro restores)
+ p = z.NextIn;
+ n = z.AvailableBytesIn;
+ b = blocks.bitb;
+ k = blocks.bitk;
+ q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+
+ // process input and output based on current state
+ while (true)
+ {
+ switch (mode)
+ {
+ // waiting for "i:"=input, "o:"=output, "x:"=nothing
+ case START: // x: set up for LEN
+ if (m >= 258 && n >= 10)
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n;
+ z.TotalBytesIn += p - z.NextIn;
+ z.NextIn = p;
+ blocks.writeAt = q;
+ r = InflateFast(lbits, dbits, ltree, ltree_index, dtree, dtree_index, blocks, z);
+
+ p = z.NextIn;
+ n = z.AvailableBytesIn;
+ b = blocks.bitb;
+ k = blocks.bitk;
+ q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+
+ if (r != ZlibConstants.Z_OK)
+ {
+ mode = (r == ZlibConstants.Z_STREAM_END) ? WASH : BADCODE;
+ break;
+ }
+ }
+ need = lbits;
+ tree = ltree;
+ tree_index = ltree_index;
+
+ mode = LEN;
+ goto case LEN;
+
+ case LEN: // i: get length/literal/eob next
+ j = need;
+
+ while (k < j)
+ {
+ if (n != 0)
+ r = ZlibConstants.Z_OK;
+ else
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n;
+ z.TotalBytesIn += p - z.NextIn;
+ z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ n--;
+ b |= (z.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3;
+
+ b >>= (tree[tindex + 1]);
+ k -= (tree[tindex + 1]);
+
+ e = tree[tindex];
+
+ if (e == 0)
+ {
+ // literal
+ lit = tree[tindex + 2];
+ mode = LIT;
+ break;
+ }
+ if ((e & 16) != 0)
+ {
+ // length
+ bitsToGet = e & 15;
+ len = tree[tindex + 2];
+ mode = LENEXT;
+ break;
+ }
+ if ((e & 64) == 0)
+ {
+ // next table
+ need = e;
+ tree_index = tindex / 3 + tree[tindex + 2];
+ break;
+ }
+ if ((e & 32) != 0)
+ {
+ // end of block
+ mode = WASH;
+ break;
+ }
+ mode = BADCODE; // invalid code
+ z.Message = "invalid literal/length code";
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n;
+ z.TotalBytesIn += p - z.NextIn;
+ z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+
+
+ case LENEXT: // i: getting length extra (have base)
+ j = bitsToGet;
+
+ while (k < j)
+ {
+ if (n != 0)
+ r = ZlibConstants.Z_OK;
+ else
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ n--; b |= (z.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ len += (b & InternalInflateConstants.InflateMask[j]);
+
+ b >>= j;
+ k -= j;
+
+ need = dbits;
+ tree = dtree;
+ tree_index = dtree_index;
+ mode = DIST;
+ goto case DIST;
+
+ case DIST: // i: get distance next
+ j = need;
+
+ while (k < j)
+ {
+ if (n != 0)
+ r = ZlibConstants.Z_OK;
+ else
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ n--; b |= (z.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3;
+
+ b >>= tree[tindex + 1];
+ k -= tree[tindex + 1];
+
+ e = (tree[tindex]);
+ if ((e & 0x10) != 0)
+ {
+ // distance
+ bitsToGet = e & 15;
+ dist = tree[tindex + 2];
+ mode = DISTEXT;
+ break;
+ }
+ if ((e & 64) == 0)
+ {
+ // next table
+ need = e;
+ tree_index = tindex / 3 + tree[tindex + 2];
+ break;
+ }
+ mode = BADCODE; // invalid code
+ z.Message = "invalid distance code";
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+
+
+ case DISTEXT: // i: getting distance extra
+ j = bitsToGet;
+
+ while (k < j)
+ {
+ if (n != 0)
+ r = ZlibConstants.Z_OK;
+ else
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ n--; b |= (z.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ dist += (b & InternalInflateConstants.InflateMask[j]);
+
+ b >>= j;
+ k -= j;
+
+ mode = COPY;
+ goto case COPY;
+
+ case COPY: // o: copying bytes in window, waiting for space
+ f = q - dist;
+ while (f < 0)
+ {
+ // modulo window size-"while" instead
+ f += blocks.end; // of "if" handles invalid distances
+ }
+ while (len != 0)
+ {
+ if (m == 0)
+ {
+ if (q == blocks.end && blocks.readAt != 0)
+ {
+ q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+ }
+ if (m == 0)
+ {
+ blocks.writeAt = q; r = blocks.Flush(r);
+ q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+
+ if (q == blocks.end && blocks.readAt != 0)
+ {
+ q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+ }
+
+ if (m == 0)
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n;
+ z.TotalBytesIn += p - z.NextIn;
+ z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ }
+ }
+
+ blocks.window[q++] = blocks.window[f++]; m--;
+
+ if (f == blocks.end)
+ f = 0;
+ len--;
+ }
+ mode = START;
+ break;
+
+ case LIT: // o: got literal, waiting for output space
+ if (m == 0)
+ {
+ if (q == blocks.end && blocks.readAt != 0)
+ {
+ q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+ }
+ if (m == 0)
+ {
+ blocks.writeAt = q; r = blocks.Flush(r);
+ q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+
+ if (q == blocks.end && blocks.readAt != 0)
+ {
+ q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+ }
+ if (m == 0)
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ }
+ }
+ r = ZlibConstants.Z_OK;
+
+ blocks.window[q++] = (byte)lit; m--;
+
+ mode = START;
+ break;
+
+ case WASH: // o: got eob, possibly more output
+ if (k > 7)
+ {
+ // return unused byte, if any
+ k -= 8;
+ n++;
+ p--; // can always return one
+ }
+
+ blocks.writeAt = q; r = blocks.Flush(r);
+ q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+
+ if (blocks.readAt != blocks.writeAt)
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ mode = END;
+ goto case END;
+
+ case END:
+ r = ZlibConstants.Z_STREAM_END;
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+
+ case BADCODE: // x: got error
+
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+
+ default:
+ r = ZlibConstants.Z_STREAM_ERROR;
+
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ }
+ }
+
+
+ // Called with number of bytes left to write in window at least 258
+ // (the maximum string length) and number of input bytes available
+ // at least ten. The ten bytes are six bytes for the longest length/
+ // distance pair plus four bytes for overloading the bit buffer.
+
+ internal int InflateFast(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index, InflateBlocks s, ZlibCodec z)
+ {
+ int t; // temporary pointer
+ int[] tp; // temporary pointer
+ int tp_index; // temporary pointer
+ int e; // extra bits or operation
+ int b; // bit buffer
+ int k; // bits in bit buffer
+ int p; // input data pointer
+ int n; // bytes available there
+ int q; // output window write pointer
+ int m; // bytes to end of window or read pointer
+ int ml; // mask for literal/length tree
+ int md; // mask for distance tree
+ int c; // bytes to copy
+ int d; // distance back to copy from
+ int r; // copy source pointer
+
+ int tp_index_t_3; // (tp_index+t)*3
+
+ // load input, output, bit values
+ p = z.NextIn; n = z.AvailableBytesIn; b = s.bitb; k = s.bitk;
+ q = s.writeAt; m = q < s.readAt ? s.readAt - q - 1 : s.end - q;
+
+ // initialize masks
+ ml = InternalInflateConstants.InflateMask[bl];
+ md = InternalInflateConstants.InflateMask[bd];
+
+ // do until not enough input or output space for fast loop
+ do
+ {
+ // assume called with m >= 258 && n >= 10
+ // get literal/length code
+ while (k < (20))
+ {
+ // max bits for literal/length code
+ n--;
+ b |= (z.InputBuffer[p++] & 0xff) << k; k += 8;
+ }
+
+ t = b & ml;
+ tp = tl;
+ tp_index = tl_index;
+ tp_index_t_3 = (tp_index + t) * 3;
+ if ((e = tp[tp_index_t_3]) == 0)
+ {
+ b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
+
+ s.window[q++] = (byte)tp[tp_index_t_3 + 2];
+ m--;
+ continue;
+ }
+ do
+ {
+
+ b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
+
+ if ((e & 16) != 0)
+ {
+ e &= 15;
+ c = tp[tp_index_t_3 + 2] + ((int)b & InternalInflateConstants.InflateMask[e]);
+
+ b >>= e; k -= e;
+
+ // decode distance base of block to copy
+ while (k < 15)
+ {
+ // max bits for distance code
+ n--;
+ b |= (z.InputBuffer[p++] & 0xff) << k; k += 8;
+ }
+
+ t = b & md;
+ tp = td;
+ tp_index = td_index;
+ tp_index_t_3 = (tp_index + t) * 3;
+ e = tp[tp_index_t_3];
+
+ do
+ {
+
+ b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
+
+ if ((e & 16) != 0)
+ {
+ // get extra bits to add to distance base
+ e &= 15;
+ while (k < e)
+ {
+ // get extra bits (up to 13)
+ n--;
+ b |= (z.InputBuffer[p++] & 0xff) << k; k += 8;
+ }
+
+ d = tp[tp_index_t_3 + 2] + (b & InternalInflateConstants.InflateMask[e]);
+
+ b >>= e; k -= e;
+
+ // do the copy
+ m -= c;
+ if (q >= d)
+ {
+ // offset before dest
+ // just copy
+ r = q - d;
+ if (q - r > 0 && 2 > (q - r))
+ {
+ s.window[q++] = s.window[r++]; // minimum count is three,
+ s.window[q++] = s.window[r++]; // so unroll loop a little
+ c -= 2;
+ }
+ else
+ {
+ Array.Copy(s.window, r, s.window, q, 2);
+ q += 2; r += 2; c -= 2;
+ }
+ }
+ else
+ {
+ // else offset after destination
+ r = q - d;
+ do
+ {
+ r += s.end; // force pointer in window
+ }
+ while (r < 0); // covers invalid distances
+ e = s.end - r;
+ if (c > e)
+ {
+ // if source crosses,
+ c -= e; // wrapped copy
+ if (q - r > 0 && e > (q - r))
+ {
+ do
+ {
+ s.window[q++] = s.window[r++];
+ }
+ while (--e != 0);
+ }
+ else
+ {
+ Array.Copy(s.window, r, s.window, q, e);
+ q += e; r += e; e = 0;
+ }
+ r = 0; // copy rest from start of window
+ }
+ }
+
+ // copy all or what's left
+ if (q - r > 0 && c > (q - r))
+ {
+ do
+ {
+ s.window[q++] = s.window[r++];
+ }
+ while (--c != 0);
+ }
+ else
+ {
+ Array.Copy(s.window, r, s.window, q, c);
+ q += c; r += c; c = 0;
+ }
+ break;
+ }
+ else if ((e & 64) == 0)
+ {
+ t += tp[tp_index_t_3 + 2];
+ t += (b & InternalInflateConstants.InflateMask[e]);
+ tp_index_t_3 = (tp_index + t) * 3;
+ e = tp[tp_index_t_3];
+ }
+ else
+ {
+ z.Message = "invalid distance code";
+
+ c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
+
+ s.bitb = b; s.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ s.writeAt = q;
+
+ return ZlibConstants.Z_DATA_ERROR;
+ }
+ }
+ while (true);
+ break;
+ }
+
+ if ((e & 64) == 0)
+ {
+ t += tp[tp_index_t_3 + 2];
+ t += (b & InternalInflateConstants.InflateMask[e]);
+ tp_index_t_3 = (tp_index + t) * 3;
+ if ((e = tp[tp_index_t_3]) == 0)
+ {
+ b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
+ s.window[q++] = (byte)tp[tp_index_t_3 + 2];
+ m--;
+ break;
+ }
+ }
+ else if ((e & 32) != 0)
+ {
+ c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
+
+ s.bitb = b; s.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ s.writeAt = q;
+
+ return ZlibConstants.Z_STREAM_END;
+ }
+ else
+ {
+ z.Message = "invalid literal/length code";
+
+ c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
+
+ s.bitb = b; s.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ s.writeAt = q;
+
+ return ZlibConstants.Z_DATA_ERROR;
+ }
+ }
+ while (true);
+ }
+ while (m >= 258 && n >= 10);
+
+ // not enough input or output--restore pointers and return
+ c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
+
+ s.bitb = b; s.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ s.writeAt = q;
+
+ return ZlibConstants.Z_OK;
+ }
+ }
+
+
+ internal sealed class InflateManager
+ {
+ // preset dictionary flag in zlib header
+ private const int PRESET_DICT = 0x20;
+
+ private const int Z_DEFLATED = 8;
+
+ private enum InflateManagerMode
+ {
+ METHOD = 0, // waiting for method byte
+ FLAG = 1, // waiting for flag byte
+ DICT4 = 2, // four dictionary check bytes to go
+ DICT3 = 3, // three dictionary check bytes to go
+ DICT2 = 4, // two dictionary check bytes to go
+ DICT1 = 5, // one dictionary check byte to go
+ DICT0 = 6, // waiting for inflateSetDictionary
+ BLOCKS = 7, // decompressing blocks
+ CHECK4 = 8, // four check bytes to go
+ CHECK3 = 9, // three check bytes to go
+ CHECK2 = 10, // two check bytes to go
+ CHECK1 = 11, // one check byte to go
+ DONE = 12, // finished check, done
+ BAD = 13, // got an error--stay here
+ }
+
+ private InflateManagerMode mode; // current inflate mode
+ internal ZlibCodec _codec; // pointer back to this zlib stream
+
+ // mode dependent information
+ internal int method; // if FLAGS, method byte
+
+ // if CHECK, check values to compare
+ internal uint computedCheck; // computed check value
+ internal uint expectedCheck; // stream check value
+
+ // if BAD, inflateSync's marker bytes count
+ internal int marker;
+
+ // mode independent information
+ //internal int nowrap; // flag for no wrapper
+ private bool _handleRfc1950HeaderBytes = true;
+ internal bool HandleRfc1950HeaderBytes
+ {
+ get { return _handleRfc1950HeaderBytes; }
+ set { _handleRfc1950HeaderBytes = value; }
+ }
+ internal int wbits; // log2(window size) (8..15, defaults to 15)
+
+ internal InflateBlocks blocks; // current inflate_blocks state
+
+ public InflateManager() { }
+
+ public InflateManager(bool expectRfc1950HeaderBytes)
+ {
+ _handleRfc1950HeaderBytes = expectRfc1950HeaderBytes;
+ }
+
+ internal int Reset()
+ {
+ _codec.TotalBytesIn = _codec.TotalBytesOut = 0;
+ _codec.Message = null;
+ mode = HandleRfc1950HeaderBytes ? InflateManagerMode.METHOD : InflateManagerMode.BLOCKS;
+ blocks.Reset();
+ return ZlibConstants.Z_OK;
+ }
+
+ internal int End()
+ {
+ if (blocks != null)
+ blocks.Free();
+ blocks = null;
+ return ZlibConstants.Z_OK;
+ }
+
+ internal int Initialize(ZlibCodec codec, int w)
+ {
+ _codec = codec;
+ _codec.Message = null;
+ blocks = null;
+
+ // handle undocumented nowrap option (no zlib header or check)
+ //nowrap = 0;
+ //if (w < 0)
+ //{
+ // w = - w;
+ // nowrap = 1;
+ //}
+
+ // set window size
+ if (w < 8 || w > 15)
+ {
+ End();
+ throw new ZlibException("Bad window size.");
+
+ //return ZlibConstants.Z_STREAM_ERROR;
+ }
+ wbits = w;
+
+ blocks = new InflateBlocks(codec,
+ HandleRfc1950HeaderBytes ? this : null,
+ 1 << w);
+
+ // reset state
+ Reset();
+ return ZlibConstants.Z_OK;
+ }
+
+
+ internal int Inflate(FlushType flush)
+ {
+ int b;
+
+ if (_codec.InputBuffer == null)
+ throw new ZlibException("InputBuffer is null. ");
+
+// int f = (flush == FlushType.Finish)
+// ? ZlibConstants.Z_BUF_ERROR
+// : ZlibConstants.Z_OK;
+
+ // workitem 8870
+ int f = ZlibConstants.Z_OK;
+ int r = ZlibConstants.Z_BUF_ERROR;
+
+ while (true)
+ {
+ switch (mode)
+ {
+ case InflateManagerMode.METHOD:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ if (((method = _codec.InputBuffer[_codec.NextIn++]) & 0xf) != Z_DEFLATED)
+ {
+ mode = InflateManagerMode.BAD;
+ _codec.Message = String.Format("unknown compression method (0x{0:X2})", method);
+ marker = 5; // can't try inflateSync
+ break;
+ }
+ if ((method >> 4) + 8 > wbits)
+ {
+ mode = InflateManagerMode.BAD;
+ _codec.Message = String.Format("invalid window size ({0})", (method >> 4) + 8);
+ marker = 5; // can't try inflateSync
+ break;
+ }
+ mode = InflateManagerMode.FLAG;
+ break;
+
+
+ case InflateManagerMode.FLAG:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ b = (_codec.InputBuffer[_codec.NextIn++]) & 0xff;
+
+ if ((((method << 8) + b) % 31) != 0)
+ {
+ mode = InflateManagerMode.BAD;
+ _codec.Message = "incorrect header check";
+ marker = 5; // can't try inflateSync
+ break;
+ }
+
+ mode = ((b & PRESET_DICT) == 0)
+ ? InflateManagerMode.BLOCKS
+ : InflateManagerMode.DICT4;
+ break;
+
+ case InflateManagerMode.DICT4:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000);
+ mode = InflateManagerMode.DICT3;
+ break;
+
+ case InflateManagerMode.DICT3:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000);
+ mode = InflateManagerMode.DICT2;
+ break;
+
+ case InflateManagerMode.DICT2:
+
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00);
+ mode = InflateManagerMode.DICT1;
+ break;
+
+
+ case InflateManagerMode.DICT1:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--; _codec.TotalBytesIn++;
+ expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff);
+ _codec._Adler32 = expectedCheck;
+ mode = InflateManagerMode.DICT0;
+ return ZlibConstants.Z_NEED_DICT;
+
+
+ case InflateManagerMode.DICT0:
+ mode = InflateManagerMode.BAD;
+ _codec.Message = "need dictionary";
+ marker = 0; // can try inflateSync
+ return ZlibConstants.Z_STREAM_ERROR;
+
+
+ case InflateManagerMode.BLOCKS:
+ r = blocks.Process(r);
+ if (r == ZlibConstants.Z_DATA_ERROR)
+ {
+ mode = InflateManagerMode.BAD;
+ marker = 0; // can try inflateSync
+ break;
+ }
+
+ if (r == ZlibConstants.Z_OK) r = f;
+
+ if (r != ZlibConstants.Z_STREAM_END)
+ return r;
+
+ r = f;
+ computedCheck = blocks.Reset();
+ if (!HandleRfc1950HeaderBytes)
+ {
+ mode = InflateManagerMode.DONE;
+ return ZlibConstants.Z_STREAM_END;
+ }
+ mode = InflateManagerMode.CHECK4;
+ break;
+
+ case InflateManagerMode.CHECK4:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000);
+ mode = InflateManagerMode.CHECK3;
+ break;
+
+ case InflateManagerMode.CHECK3:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--; _codec.TotalBytesIn++;
+ expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000);
+ mode = InflateManagerMode.CHECK2;
+ break;
+
+ case InflateManagerMode.CHECK2:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00);
+ mode = InflateManagerMode.CHECK1;
+ break;
+
+ case InflateManagerMode.CHECK1:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--; _codec.TotalBytesIn++;
+ expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff);
+ if (computedCheck != expectedCheck)
+ {
+ mode = InflateManagerMode.BAD;
+ _codec.Message = "incorrect data check";
+ marker = 5; // can't try inflateSync
+ break;
+ }
+ mode = InflateManagerMode.DONE;
+ return ZlibConstants.Z_STREAM_END;
+
+ case InflateManagerMode.DONE:
+ return ZlibConstants.Z_STREAM_END;
+
+ case InflateManagerMode.BAD:
+ throw new ZlibException(String.Format("Bad state ({0})", _codec.Message));
+
+ default:
+ throw new ZlibException("Stream error.");
+
+ }
+ }
+ }
+
+
+
+ internal int SetDictionary(byte[] dictionary)
+ {
+ int index = 0;
+ int length = dictionary.Length;
+ if (mode != InflateManagerMode.DICT0)
+ throw new ZlibException("Stream error.");
+
+ if (Adler.Adler32(1, dictionary, 0, dictionary.Length) != _codec._Adler32)
+ {
+ return ZlibConstants.Z_DATA_ERROR;
+ }
+
+ _codec._Adler32 = Adler.Adler32(0, null, 0, 0);
+
+ if (length >= (1 << wbits))
+ {
+ length = (1 << wbits) - 1;
+ index = dictionary.Length - length;
+ }
+ blocks.SetDictionary(dictionary, index, length);
+ mode = InflateManagerMode.BLOCKS;
+ return ZlibConstants.Z_OK;
+ }
+
+
+ private static readonly byte[] mark = new byte[] { 0, 0, 0xff, 0xff };
+
+ internal int Sync()
+ {
+ int n; // number of bytes to look at
+ int p; // pointer to bytes
+ int m; // number of marker bytes found in a row
+ long r, w; // temporaries to save total_in and total_out
+
+ // set up
+ if (mode != InflateManagerMode.BAD)
+ {
+ mode = InflateManagerMode.BAD;
+ marker = 0;
+ }
+ if ((n = _codec.AvailableBytesIn) == 0)
+ return ZlibConstants.Z_BUF_ERROR;
+ p = _codec.NextIn;
+ m = marker;
+
+ // search
+ while (n != 0 && m < 4)
+ {
+ if (_codec.InputBuffer[p] == mark[m])
+ {
+ m++;
+ }
+ else if (_codec.InputBuffer[p] != 0)
+ {
+ m = 0;
+ }
+ else
+ {
+ m = 4 - m;
+ }
+ p++; n--;
+ }
+
+ // restore
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ _codec.AvailableBytesIn = n;
+ marker = m;
+
+ // return no joy or set up to restart on a new block
+ if (m != 4)
+ {
+ return ZlibConstants.Z_DATA_ERROR;
+ }
+ r = _codec.TotalBytesIn;
+ w = _codec.TotalBytesOut;
+ Reset();
+ _codec.TotalBytesIn = r;
+ _codec.TotalBytesOut = w;
+ mode = InflateManagerMode.BLOCKS;
+ return ZlibConstants.Z_OK;
+ }
+
+
+ // Returns true if inflate is currently at the end of a block generated
+ // by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
+ // implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH
+ // but removes the length bytes of the resulting empty stored block. When
+ // decompressing, PPP checks that at the end of input packet, inflate is
+ // waiting for these length bytes.
+ internal int SyncPoint(ZlibCodec z)
+ {
+ return blocks.SyncPoint();
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/ZipFile/ZLib/Tree.cs b/SabreTools.Library/External/Compress/ZipFile/ZLib/Tree.cs
new file mode 100644
index 00000000..98441bd7
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ZipFile/ZLib/Tree.cs
@@ -0,0 +1,421 @@
+// Tree.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2009-October-28 13:29:50>
+//
+// ------------------------------------------------------------------
+//
+// This module defines classes for zlib compression and
+// decompression. This code is derived from the jzlib implementation of
+// zlib. In keeping with the license for jzlib, the copyright to that
+// code is below.
+//
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// This program is based on zlib-1.1.3; credit to authors
+// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+// and contributors of zlib.
+//
+// -----------------------------------------------------------------------
+
+
+namespace Compress.ZipFile.ZLib
+{
+ sealed class Tree
+ {
+ private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1);
+
+ // extra bits for each length code
+ internal static readonly int[] ExtraLengthBits = new int[]
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0
+ };
+
+ // extra bits for each distance code
+ internal static readonly int[] ExtraDistanceBits = new int[]
+ {
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13
+ };
+
+ // extra bits for each bit length code
+ internal static readonly int[] extra_blbits = new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7};
+
+ internal static readonly sbyte[] bl_order = new sbyte[]{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+
+ // The lengths of the bit length codes are sent in order of decreasing
+ // probability, to avoid transmitting the lengths for unused bit
+ // length codes.
+
+ internal const int Buf_size = 8 * 2;
+
+ // see definition of array dist_code below
+ //internal const int DIST_CODE_LEN = 512;
+
+ private static readonly sbyte[] _dist_code = new sbyte[]
+ {
+ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 0, 0, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29
+ };
+
+ internal static readonly sbyte[] LengthCode = new sbyte[]
+ {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15,
+ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28
+ };
+
+
+ internal static readonly int[] LengthBase = new int[]
+ {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28,
+ 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0
+ };
+
+
+ internal static readonly int[] DistanceBase = new int[]
+ {
+ 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
+ 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576
+ };
+
+
+ ///
+ /// Map from a distance to a distance code.
+ ///
+ ///
+ /// No side effects. _dist_code[256] and _dist_code[257] are never used.
+ ///
+ internal static int DistanceCode(int dist)
+ {
+ return (dist < 256)
+ ? _dist_code[dist]
+ : _dist_code[256 + SharedUtils.URShift(dist, 7)];
+ }
+
+ internal short[] dyn_tree; // the dynamic tree
+ internal int max_code; // largest code with non zero frequency
+ internal StaticTree staticTree; // the corresponding static tree
+
+ // Compute the optimal bit lengths for a tree and update the total bit length
+ // for the current block.
+ // IN assertion: the fields freq and dad are set, heap[heap_max] and
+ // above are the tree nodes sorted by increasing frequency.
+ // OUT assertions: the field len is set to the optimal bit length, the
+ // array bl_count contains the frequencies for each bit length.
+ // The length opt_len is updated; static_len is also updated if stree is
+ // not null.
+ internal void gen_bitlen(DeflateManager s)
+ {
+ short[] tree = dyn_tree;
+ short[] stree = staticTree.treeCodes;
+ int[] extra = staticTree.extraBits;
+ int base_Renamed = staticTree.extraBase;
+ int max_length = staticTree.maxLength;
+ int h; // heap index
+ int n, m; // iterate over the tree elements
+ int bits; // bit length
+ int xbits; // extra bits
+ short f; // frequency
+ int overflow = 0; // number of elements with bit length too large
+
+ for (bits = 0; bits <= InternalConstants.MAX_BITS; bits++)
+ s.bl_count[bits] = 0;
+
+ // In a first pass, compute the optimal bit lengths (which may
+ // overflow in the case of the bit length tree).
+ tree[s.heap[s.heap_max] * 2 + 1] = 0; // root of the heap
+
+ for (h = s.heap_max + 1; h < HEAP_SIZE; h++)
+ {
+ n = s.heap[h];
+ bits = tree[tree[n * 2 + 1] * 2 + 1] + 1;
+ if (bits > max_length)
+ {
+ bits = max_length; overflow++;
+ }
+ tree[n * 2 + 1] = (short) bits;
+ // We overwrite tree[n*2+1] which is no longer needed
+
+ if (n > max_code)
+ continue; // not a leaf node
+
+ s.bl_count[bits]++;
+ xbits = 0;
+ if (n >= base_Renamed)
+ xbits = extra[n - base_Renamed];
+ f = tree[n * 2];
+ s.opt_len += f * (bits + xbits);
+ if (stree != null)
+ s.static_len += f * (stree[n * 2 + 1] + xbits);
+ }
+ if (overflow == 0)
+ return ;
+
+ // This happens for example on obj2 and pic of the Calgary corpus
+ // Find the first bit length which could increase:
+ do
+ {
+ bits = max_length - 1;
+ while (s.bl_count[bits] == 0)
+ bits--;
+ s.bl_count[bits]--; // move one leaf down the tree
+ s.bl_count[bits + 1] = (short) (s.bl_count[bits + 1] + 2); // move one overflow item as its brother
+ s.bl_count[max_length]--;
+ // The brother of the overflow item also moves one step up,
+ // but this does not affect bl_count[max_length]
+ overflow -= 2;
+ }
+ while (overflow > 0);
+
+ for (bits = max_length; bits != 0; bits--)
+ {
+ n = s.bl_count[bits];
+ while (n != 0)
+ {
+ m = s.heap[--h];
+ if (m > max_code)
+ continue;
+ if (tree[m * 2 + 1] != bits)
+ {
+ s.opt_len = (int) (s.opt_len + ((long) bits - (long) tree[m * 2 + 1]) * (long) tree[m * 2]);
+ tree[m * 2 + 1] = (short) bits;
+ }
+ n--;
+ }
+ }
+ }
+
+ // Construct one Huffman tree and assigns the code bit strings and lengths.
+ // Update the total bit length for the current block.
+ // IN assertion: the field freq is set for all tree elements.
+ // OUT assertions: the fields len and code are set to the optimal bit length
+ // and corresponding code. The length opt_len is updated; static_len is
+ // also updated if stree is not null. The field max_code is set.
+ internal void build_tree(DeflateManager s)
+ {
+ short[] tree = dyn_tree;
+ short[] stree = staticTree.treeCodes;
+ int elems = staticTree.elems;
+ int n, m; // iterate over heap elements
+ int max_code = -1; // largest code with non zero frequency
+ int node; // new node being created
+
+ // Construct the initial heap, with least frequent element in
+ // heap[1]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+ // heap[0] is not used.
+ s.heap_len = 0;
+ s.heap_max = HEAP_SIZE;
+
+ for (n = 0; n < elems; n++)
+ {
+ if (tree[n * 2] != 0)
+ {
+ s.heap[++s.heap_len] = max_code = n;
+ s.depth[n] = 0;
+ }
+ else
+ {
+ tree[n * 2 + 1] = 0;
+ }
+ }
+
+ // The pkzip format requires that at least one distance code exists,
+ // and that at least one bit should be sent even if there is only one
+ // possible code. So to avoid special checks later on we force at least
+ // two codes of non zero frequency.
+ while (s.heap_len < 2)
+ {
+ node = s.heap[++s.heap_len] = (max_code < 2?++max_code:0);
+ tree[node * 2] = 1;
+ s.depth[node] = 0;
+ s.opt_len--;
+ if (stree != null)
+ s.static_len -= stree[node * 2 + 1];
+ // node is 0 or 1 so it does not have extra bits
+ }
+ this.max_code = max_code;
+
+ // The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+ // establish sub-heaps of increasing lengths:
+
+ for (n = s.heap_len / 2; n >= 1; n--)
+ s.pqdownheap(tree, n);
+
+ // Construct the Huffman tree by repeatedly combining the least two
+ // frequent nodes.
+
+ node = elems; // next internal node of the tree
+ do
+ {
+ // n = node of least frequency
+ n = s.heap[1];
+ s.heap[1] = s.heap[s.heap_len--];
+ s.pqdownheap(tree, 1);
+ m = s.heap[1]; // m = node of next least frequency
+
+ s.heap[--s.heap_max] = n; // keep the nodes sorted by frequency
+ s.heap[--s.heap_max] = m;
+
+ // Create a new node father of n and m
+ tree[node * 2] = unchecked((short) (tree[n * 2] + tree[m * 2]));
+ s.depth[node] = (sbyte) (System.Math.Max((byte) s.depth[n], (byte) s.depth[m]) + 1);
+ tree[n * 2 + 1] = tree[m * 2 + 1] = (short) node;
+
+ // and insert the new node in the heap
+ s.heap[1] = node++;
+ s.pqdownheap(tree, 1);
+ }
+ while (s.heap_len >= 2);
+
+ s.heap[--s.heap_max] = s.heap[1];
+
+ // At this point, the fields freq and dad are set. We can now
+ // generate the bit lengths.
+
+ gen_bitlen(s);
+
+ // The field len is now set, we can generate the bit codes
+ gen_codes(tree, max_code, s.bl_count);
+ }
+
+ // Generate the codes for a given tree and bit counts (which need not be
+ // optimal).
+ // IN assertion: the array bl_count contains the bit length statistics for
+ // the given tree and the field len is set for all tree elements.
+ // OUT assertion: the field code is set for all tree elements of non
+ // zero code length.
+ internal static void gen_codes(short[] tree, int max_code, short[] bl_count)
+ {
+ short[] next_code = new short[InternalConstants.MAX_BITS + 1]; // next code value for each bit length
+ short code = 0; // running code value
+ int bits; // bit index
+ int n; // code index
+
+ // The distribution counts are first used to generate the code values
+ // without bit reversal.
+ for (bits = 1; bits <= InternalConstants.MAX_BITS; bits++)
+ unchecked {
+ next_code[bits] = code = (short) ((code + bl_count[bits - 1]) << 1);
+ }
+
+ // Check that the bit counts in bl_count are consistent. The last code
+ // must be all ones.
+ //Assert (code + bl_count[MAX_BITS]-1 == (1<>= 1; //SharedUtils.URShift(code, 1);
+ res <<= 1;
+ }
+ while (--len > 0);
+ return res >> 1;
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/ZipFile/ZLib/Zlib.cs b/SabreTools.Library/External/Compress/ZipFile/ZLib/Zlib.cs
new file mode 100644
index 00000000..368a0455
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ZipFile/ZLib/Zlib.cs
@@ -0,0 +1,480 @@
+// Zlib.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009-2011 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// Last Saved: <2011-August-03 19:52:28>
+//
+// ------------------------------------------------------------------
+//
+// This module defines classes for ZLIB compression and
+// decompression. This code is derived from the jzlib implementation of
+// zlib, but significantly modified. The object model is not the same,
+// and many of the behaviors are new or different. Nonetheless, in
+// keeping with the license for jzlib, the copyright to that code is
+// included below.
+//
+// ------------------------------------------------------------------
+//
+// The following notice applies to jzlib:
+//
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// jzlib is based on zlib-1.1.3.
+//
+// The following notice applies to zlib:
+//
+// -----------------------------------------------------------------------
+//
+// Copyright (C) 1995-2004 Jean-loup Gailly and Mark Adler
+//
+// The ZLIB software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+//
+// Jean-loup Gailly jloup@gzip.org
+// Mark Adler madler@alumni.caltech.edu
+//
+// -----------------------------------------------------------------------
+
+
+using System.Runtime.InteropServices;
+
+namespace Compress.ZipFile.ZLib
+{
+
+ ///
+ /// Describes how to flush the current deflate operation.
+ ///
+ ///
+ /// The different FlushType values are useful when using a Deflate in a streaming application.
+ ///
+ public enum FlushType
+ {
+ /// No flush at all.
+ None = 0,
+
+ /// Closes the current block, but doesn't flush it to
+ /// the output. Used internally only in hypothetical
+ /// scenarios. This was supposed to be removed by Zlib, but it is
+ /// still in use in some edge cases.
+ ///
+ Partial,
+
+ ///
+ /// Use this during compression to specify that all pending output should be
+ /// flushed to the output buffer and the output should be aligned on a byte
+ /// boundary. You might use this in a streaming communication scenario, so that
+ /// the decompressor can get all input data available so far. When using this
+ /// with a ZlibCodec, AvailableBytesIn will be zero after the call if
+ /// enough output space has been provided before the call. Flushing will
+ /// degrade compression and so it should be used only when necessary.
+ ///
+ Sync,
+
+ ///
+ /// Use this during compression to specify that all output should be flushed, as
+ /// with FlushType.Sync, but also, the compression state should be reset
+ /// so that decompression can restart from this point if previous compressed
+ /// data has been damaged or if random access is desired. Using
+ /// FlushType.Full too often can significantly degrade the compression.
+ ///
+ Full,
+
+ /// Signals the end of the compression/decompression stream.
+ Finish,
+ }
+
+
+ ///
+ /// The compression level to be used when using a DeflateStream or ZlibStream with CompressionMode.Compress.
+ ///
+ public enum CompressionLevel
+ {
+ ///
+ /// None means that the data will be simply stored, with no change at all.
+ /// If you are producing ZIPs for use on Mac OSX, be aware that archives produced with CompressionLevel.None
+ /// cannot be opened with the default zip reader. Use a different CompressionLevel.
+ ///
+ None= 0,
+ ///
+ /// Same as None.
+ ///
+ Level0 = 0,
+
+ ///
+ /// The fastest but least effective compression.
+ ///
+ BestSpeed = 1,
+
+ ///
+ /// A synonym for BestSpeed.
+ ///
+ Level1 = 1,
+
+ ///
+ /// A little slower, but better, than level 1.
+ ///
+ Level2 = 2,
+
+ ///
+ /// A little slower, but better, than level 2.
+ ///
+ Level3 = 3,
+
+ ///
+ /// A little slower, but better, than level 3.
+ ///
+ Level4 = 4,
+
+ ///
+ /// A little slower than level 4, but with better compression.
+ ///
+ Level5 = 5,
+
+ ///
+ /// The default compression level, with a good balance of speed and compression efficiency.
+ ///
+ Default = 6,
+ ///
+ /// A synonym for Default.
+ ///
+ Level6 = 6,
+
+ ///
+ /// Pretty good compression!
+ ///
+ Level7 = 7,
+
+ ///
+ /// Better compression than Level7!
+ ///
+ Level8 = 8,
+
+ ///
+ /// The "best" compression, where best means greatest reduction in size of the input data stream.
+ /// This is also the slowest compression.
+ ///
+ BestCompression = 9,
+
+ ///
+ /// A synonym for BestCompression.
+ ///
+ Level9 = 9,
+ }
+
+ ///
+ /// Describes options for how the compression algorithm is executed. Different strategies
+ /// work better on different sorts of data. The strategy parameter can affect the compression
+ /// ratio and the speed of compression but not the correctness of the compresssion.
+ ///
+ public enum CompressionStrategy
+ {
+ ///
+ /// The default strategy is probably the best for normal data.
+ ///
+ Default = 0,
+
+ ///
+ /// The Filtered strategy is intended to be used most effectively with data produced by a
+ /// filter or predictor. By this definition, filtered data consists mostly of small
+ /// values with a somewhat random distribution. In this case, the compression algorithm
+ /// is tuned to compress them better. The effect of Filtered is to force more Huffman
+ /// coding and less string matching; it is a half-step between Default and HuffmanOnly.
+ ///
+ Filtered = 1,
+
+ ///
+ /// Using HuffmanOnly will force the compressor to do Huffman encoding only, with no
+ /// string matching.
+ ///
+ HuffmanOnly = 2,
+ }
+
+
+ ///
+ /// An enum to specify the direction of transcoding - whether to compress or decompress.
+ ///
+ public enum CompressionMode
+ {
+ ///
+ /// Used to specify that the stream should compress the data.
+ ///
+ Compress= 0,
+ ///
+ /// Used to specify that the stream should decompress the data.
+ ///
+ Decompress = 1,
+ }
+
+
+ ///
+ /// A general purpose exception class for exceptions in the Zlib library.
+ ///
+ [Guid("ebc25cf6-9120-4283-b972-0e5520d0000E")]
+ public class ZlibException : System.Exception
+ {
+ ///
+ /// The ZlibException class captures exception information generated
+ /// by the Zlib library.
+ ///
+ public ZlibException()
+ : base()
+ {
+ }
+
+ ///
+ /// This ctor collects a message attached to the exception.
+ ///
+ /// the message for the exception.
+ public ZlibException(System.String s)
+ : base(s)
+ {
+ }
+ }
+
+
+ internal class SharedUtils
+ {
+ ///
+ /// Performs an unsigned bitwise right shift with the specified number
+ ///
+ /// Number to operate on
+ /// Ammount of bits to shift
+ /// The resulting number from the shift operation
+ public static int URShift(int number, int bits)
+ {
+ return (int)((uint)number >> bits);
+ }
+ }
+
+ internal static class InternalConstants
+ {
+ internal static readonly int MAX_BITS = 15;
+ internal static readonly int BL_CODES = 19;
+ internal static readonly int D_CODES = 30;
+ internal static readonly int LITERALS = 256;
+ internal static readonly int LENGTH_CODES = 29;
+ internal static readonly int L_CODES = (LITERALS + 1 + LENGTH_CODES);
+
+ // Bit length codes must not exceed MAX_BL_BITS bits
+ internal static readonly int MAX_BL_BITS = 7;
+
+ // repeat previous bit length 3-6 times (2 bits of repeat count)
+ internal static readonly int REP_3_6 = 16;
+
+ // repeat a zero length 3-10 times (3 bits of repeat count)
+ internal static readonly int REPZ_3_10 = 17;
+
+ // repeat a zero length 11-138 times (7 bits of repeat count)
+ internal static readonly int REPZ_11_138 = 18;
+
+ }
+
+ internal sealed class StaticTree
+ {
+ internal static readonly short[] lengthAndLiteralsTreeCodes = new short[] {
+ 12, 8, 140, 8, 76, 8, 204, 8, 44, 8, 172, 8, 108, 8, 236, 8,
+ 28, 8, 156, 8, 92, 8, 220, 8, 60, 8, 188, 8, 124, 8, 252, 8,
+ 2, 8, 130, 8, 66, 8, 194, 8, 34, 8, 162, 8, 98, 8, 226, 8,
+ 18, 8, 146, 8, 82, 8, 210, 8, 50, 8, 178, 8, 114, 8, 242, 8,
+ 10, 8, 138, 8, 74, 8, 202, 8, 42, 8, 170, 8, 106, 8, 234, 8,
+ 26, 8, 154, 8, 90, 8, 218, 8, 58, 8, 186, 8, 122, 8, 250, 8,
+ 6, 8, 134, 8, 70, 8, 198, 8, 38, 8, 166, 8, 102, 8, 230, 8,
+ 22, 8, 150, 8, 86, 8, 214, 8, 54, 8, 182, 8, 118, 8, 246, 8,
+ 14, 8, 142, 8, 78, 8, 206, 8, 46, 8, 174, 8, 110, 8, 238, 8,
+ 30, 8, 158, 8, 94, 8, 222, 8, 62, 8, 190, 8, 126, 8, 254, 8,
+ 1, 8, 129, 8, 65, 8, 193, 8, 33, 8, 161, 8, 97, 8, 225, 8,
+ 17, 8, 145, 8, 81, 8, 209, 8, 49, 8, 177, 8, 113, 8, 241, 8,
+ 9, 8, 137, 8, 73, 8, 201, 8, 41, 8, 169, 8, 105, 8, 233, 8,
+ 25, 8, 153, 8, 89, 8, 217, 8, 57, 8, 185, 8, 121, 8, 249, 8,
+ 5, 8, 133, 8, 69, 8, 197, 8, 37, 8, 165, 8, 101, 8, 229, 8,
+ 21, 8, 149, 8, 85, 8, 213, 8, 53, 8, 181, 8, 117, 8, 245, 8,
+ 13, 8, 141, 8, 77, 8, 205, 8, 45, 8, 173, 8, 109, 8, 237, 8,
+ 29, 8, 157, 8, 93, 8, 221, 8, 61, 8, 189, 8, 125, 8, 253, 8,
+ 19, 9, 275, 9, 147, 9, 403, 9, 83, 9, 339, 9, 211, 9, 467, 9,
+ 51, 9, 307, 9, 179, 9, 435, 9, 115, 9, 371, 9, 243, 9, 499, 9,
+ 11, 9, 267, 9, 139, 9, 395, 9, 75, 9, 331, 9, 203, 9, 459, 9,
+ 43, 9, 299, 9, 171, 9, 427, 9, 107, 9, 363, 9, 235, 9, 491, 9,
+ 27, 9, 283, 9, 155, 9, 411, 9, 91, 9, 347, 9, 219, 9, 475, 9,
+ 59, 9, 315, 9, 187, 9, 443, 9, 123, 9, 379, 9, 251, 9, 507, 9,
+ 7, 9, 263, 9, 135, 9, 391, 9, 71, 9, 327, 9, 199, 9, 455, 9,
+ 39, 9, 295, 9, 167, 9, 423, 9, 103, 9, 359, 9, 231, 9, 487, 9,
+ 23, 9, 279, 9, 151, 9, 407, 9, 87, 9, 343, 9, 215, 9, 471, 9,
+ 55, 9, 311, 9, 183, 9, 439, 9, 119, 9, 375, 9, 247, 9, 503, 9,
+ 15, 9, 271, 9, 143, 9, 399, 9, 79, 9, 335, 9, 207, 9, 463, 9,
+ 47, 9, 303, 9, 175, 9, 431, 9, 111, 9, 367, 9, 239, 9, 495, 9,
+ 31, 9, 287, 9, 159, 9, 415, 9, 95, 9, 351, 9, 223, 9, 479, 9,
+ 63, 9, 319, 9, 191, 9, 447, 9, 127, 9, 383, 9, 255, 9, 511, 9,
+ 0, 7, 64, 7, 32, 7, 96, 7, 16, 7, 80, 7, 48, 7, 112, 7,
+ 8, 7, 72, 7, 40, 7, 104, 7, 24, 7, 88, 7, 56, 7, 120, 7,
+ 4, 7, 68, 7, 36, 7, 100, 7, 20, 7, 84, 7, 52, 7, 116, 7,
+ 3, 8, 131, 8, 67, 8, 195, 8, 35, 8, 163, 8, 99, 8, 227, 8
+ };
+
+ internal static readonly short[] distTreeCodes = new short[] {
+ 0, 5, 16, 5, 8, 5, 24, 5, 4, 5, 20, 5, 12, 5, 28, 5,
+ 2, 5, 18, 5, 10, 5, 26, 5, 6, 5, 22, 5, 14, 5, 30, 5,
+ 1, 5, 17, 5, 9, 5, 25, 5, 5, 5, 21, 5, 13, 5, 29, 5,
+ 3, 5, 19, 5, 11, 5, 27, 5, 7, 5, 23, 5 };
+
+ internal static readonly StaticTree Literals;
+ internal static readonly StaticTree Distances;
+ internal static readonly StaticTree BitLengths;
+
+ internal short[] treeCodes; // static tree or null
+ internal int[] extraBits; // extra bits for each code or null
+ internal int extraBase; // base index for extra_bits
+ internal int elems; // max number of elements in the tree
+ internal int maxLength; // max bit length for the codes
+
+ private StaticTree(short[] treeCodes, int[] extraBits, int extraBase, int elems, int maxLength)
+ {
+ this.treeCodes = treeCodes;
+ this.extraBits = extraBits;
+ this.extraBase = extraBase;
+ this.elems = elems;
+ this.maxLength = maxLength;
+ }
+ static StaticTree()
+ {
+ Literals = new StaticTree(lengthAndLiteralsTreeCodes, Tree.ExtraLengthBits, InternalConstants.LITERALS + 1, InternalConstants.L_CODES, InternalConstants.MAX_BITS);
+ Distances = new StaticTree(distTreeCodes, Tree.ExtraDistanceBits, 0, InternalConstants.D_CODES, InternalConstants.MAX_BITS);
+ BitLengths = new StaticTree(null, Tree.extra_blbits, 0, InternalConstants.BL_CODES, InternalConstants.MAX_BL_BITS);
+ }
+ }
+
+
+
+ ///
+ /// Computes an Adler-32 checksum.
+ ///
+ ///
+ /// The Adler checksum is similar to a CRC checksum, but faster to compute, though less
+ /// reliable. It is used in producing RFC1950 compressed streams. The Adler checksum
+ /// is a required part of the "ZLIB" standard. Applications will almost never need to
+ /// use this class directly.
+ ///
+ ///
+ ///
+ public sealed class Adler
+ {
+ // largest prime smaller than 65536
+ private static readonly uint BASE = 65521;
+ // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
+ private static readonly int NMAX = 5552;
+
+ ///
+ /// Calculates the Adler32 checksum.
+ ///
+ ///
+ ///
+ /// This is used within ZLIB. You probably don't need to use this directly.
+ ///
+ ///
+ ///
+ /// To compute an Adler32 checksum on a byte array:
+ ///
+ /// var adler = Adler.Adler32(0, null, 0, 0);
+ /// adler = Adler.Adler32(adler, buffer, index, length);
+ ///
+ ///
+ public static uint Adler32(uint adler, byte[] buf, int index, int len)
+ {
+ if (buf == null)
+ return 1;
+
+ uint s1 = (uint) (adler & 0xffff);
+ uint s2 = (uint) ((adler >> 16) & 0xffff);
+
+ while (len > 0)
+ {
+ int k = len < NMAX ? len : NMAX;
+ len -= k;
+ while (k >= 16)
+ {
+ //s1 += (buf[index++] & 0xff); s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ k -= 16;
+ }
+ if (k != 0)
+ {
+ do
+ {
+ s1 += buf[index++];
+ s2 += s1;
+ }
+ while (--k != 0);
+ }
+ s1 %= BASE;
+ s2 %= BASE;
+ }
+ return (uint)((s2 << 16) | s1);
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/ZipFile/ZLib/ZlibBaseStream.cs b/SabreTools.Library/External/Compress/ZipFile/ZLib/ZlibBaseStream.cs
new file mode 100644
index 00000000..bb48fcb6
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ZipFile/ZLib/ZlibBaseStream.cs
@@ -0,0 +1,557 @@
+// ZlibBaseStream.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2011-August-06 21:22:38>
+//
+// ------------------------------------------------------------------
+//
+// This module defines the ZlibBaseStream class, which is an intnernal
+// base class for DeflateStream, ZlibStream and GZipStream.
+//
+// ------------------------------------------------------------------
+
+using System;
+using System.IO;
+
+namespace Compress.ZipFile.ZLib
+{
+
+ public enum ZlibStreamFlavor { ZLIB = 1950, DEFLATE = 1951, GZIP = 1952 }
+
+ public class ZlibBaseStream : System.IO.Stream
+ {
+ protected internal ZlibCodec _z = null; // deferred init... new ZlibCodec();
+
+ protected internal StreamMode _streamMode = StreamMode.Undefined;
+ protected internal FlushType _flushMode;
+ protected internal ZlibStreamFlavor _flavor;
+ protected internal CompressionMode _compressionMode;
+ protected internal CompressionLevel _level;
+ protected internal bool _leaveOpen;
+ protected internal byte[] _workingBuffer;
+ protected internal int _bufferSize = ZlibConstants.WorkingBufferSizeDefault;
+ protected internal byte[] _buf1 = new byte[1];
+
+ protected internal System.IO.Stream _stream;
+ protected internal CompressionStrategy Strategy = CompressionStrategy.Default;
+
+ // workitem 7159
+ Compress.Utils.CRC crc;
+ protected internal string _GzipFileName;
+ protected internal string _GzipComment;
+ protected internal DateTime _GzipMtime;
+ protected internal int _gzipHeaderByteCount;
+
+ internal int Crc32 { get { if (crc == null) return 0; return crc.Crc32Result; } }
+
+ public ZlibBaseStream(System.IO.Stream stream,
+ CompressionMode compressionMode,
+ CompressionLevel level,
+ ZlibStreamFlavor flavor,
+ bool leaveOpen)
+ : base()
+ {
+ this._flushMode = FlushType.None;
+ //this._workingBuffer = new byte[WORKING_BUFFER_SIZE_DEFAULT];
+ this._stream = stream;
+ this._leaveOpen = leaveOpen;
+ this._compressionMode = compressionMode;
+ this._flavor = flavor;
+ this._level = level;
+ // workitem 7159
+ if (flavor == ZlibStreamFlavor.GZIP)
+ {
+ this.crc = new Compress.Utils.CRC();
+ }
+ }
+
+
+ protected internal bool _wantCompress
+ {
+ get
+ {
+ return (this._compressionMode == CompressionMode.Compress);
+ }
+ }
+
+ private ZlibCodec z
+ {
+ get
+ {
+ if (_z == null)
+ {
+ bool wantRfc1950Header = (this._flavor == ZlibStreamFlavor.ZLIB);
+ _z = new ZlibCodec();
+ if (this._compressionMode == CompressionMode.Decompress)
+ {
+ _z.InitializeInflate(wantRfc1950Header);
+ }
+ else
+ {
+ _z.Strategy = Strategy;
+ _z.InitializeDeflate(this._level, wantRfc1950Header);
+ }
+ }
+ return _z;
+ }
+ }
+
+
+
+ private byte[] workingBuffer
+ {
+ get
+ {
+ if (_workingBuffer == null)
+ _workingBuffer = new byte[_bufferSize];
+ return _workingBuffer;
+ }
+ }
+
+
+
+ public override void Write(System.Byte[] buffer, int offset, int count)
+ {
+ // workitem 7159
+ // calculate the CRC on the unccompressed data (before writing)
+ if (crc != null)
+ crc.SlurpBlock(buffer, offset, count);
+
+ if (_streamMode == StreamMode.Undefined)
+ _streamMode = StreamMode.Writer;
+ else if (_streamMode != StreamMode.Writer)
+ throw new ZlibException("Cannot Write after Reading.");
+
+ if (count == 0)
+ return;
+
+ // first reference of z property will initialize the private var _z
+ z.InputBuffer = buffer;
+ _z.NextIn = offset;
+ _z.AvailableBytesIn = count;
+ bool done = false;
+ do
+ {
+ _z.OutputBuffer = workingBuffer;
+ _z.NextOut = 0;
+ _z.AvailableBytesOut = _workingBuffer.Length;
+ int rc = (_wantCompress)
+ ? _z.Deflate(_flushMode)
+ : _z.Inflate(_flushMode);
+ if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ throw new ZlibException((_wantCompress ? "de" : "in") + "flating: " + _z.Message);
+
+ //if (_workingBuffer.Length - _z.AvailableBytesOut > 0)
+ _stream.Write(_workingBuffer, 0, _workingBuffer.Length - _z.AvailableBytesOut);
+
+ done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0;
+
+ // If GZIP and de-compress, we're done when 8 bytes remain.
+ if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress)
+ done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0);
+
+ }
+ while (!done);
+ }
+
+
+
+ private void finish()
+ {
+ if (_z == null) return;
+
+ if (_streamMode == StreamMode.Writer)
+ {
+ bool done = false;
+ do
+ {
+ _z.OutputBuffer = workingBuffer;
+ _z.NextOut = 0;
+ _z.AvailableBytesOut = _workingBuffer.Length;
+ int rc = (_wantCompress)
+ ? _z.Deflate(FlushType.Finish)
+ : _z.Inflate(FlushType.Finish);
+
+ if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
+ {
+ string verb = (_wantCompress ? "de" : "in") + "flating";
+ if (_z.Message == null)
+ throw new ZlibException(String.Format("{0}: (rc = {1})", verb, rc));
+ else
+ throw new ZlibException(verb + ": " + _z.Message);
+ }
+
+ if (_workingBuffer.Length - _z.AvailableBytesOut > 0)
+ {
+ _stream.Write(_workingBuffer, 0, _workingBuffer.Length - _z.AvailableBytesOut);
+ }
+
+ done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0;
+ // If GZIP and de-compress, we're done when 8 bytes remain.
+ if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress)
+ done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0);
+
+ }
+ while (!done);
+
+ Flush();
+
+ // workitem 7159
+ if (_flavor == ZlibStreamFlavor.GZIP)
+ {
+ if (_wantCompress)
+ {
+ // Emit the GZIP trailer: CRC32 and size mod 2^32
+ int c1 = crc.Crc32Result;
+ _stream.Write(BitConverter.GetBytes(c1), 0, 4);
+ int c2 = (Int32)(crc.TotalBytesRead & 0x00000000FFFFFFFF);
+ _stream.Write(BitConverter.GetBytes(c2), 0, 4);
+ }
+ else
+ {
+ throw new ZlibException("Writing with decompression is not supported.");
+ }
+ }
+ }
+ // workitem 7159
+ else if (_streamMode == StreamMode.Reader)
+ {
+ if (_flavor == ZlibStreamFlavor.GZIP)
+ {
+ if (!_wantCompress)
+ {
+ // workitem 8501: handle edge case (decompress empty stream)
+ if (_z.TotalBytesOut == 0L)
+ return;
+
+ // Read and potentially verify the GZIP trailer:
+ // CRC32 and size mod 2^32
+ byte[] trailer = new byte[8];
+
+ // workitems 8679 & 12554
+ if (_z.AvailableBytesIn < 8)
+ {
+ // Make sure we have read to the end of the stream
+ Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, _z.AvailableBytesIn);
+ int bytesNeeded = 8 - _z.AvailableBytesIn;
+ int bytesRead = _stream.Read(trailer,
+ _z.AvailableBytesIn,
+ bytesNeeded);
+ if (bytesNeeded != bytesRead)
+ {
+ throw new ZlibException(String.Format("Missing or incomplete GZIP trailer. Expected 8 bytes, got {0}.",
+ _z.AvailableBytesIn + bytesRead));
+ }
+ }
+ else
+ {
+ Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, trailer.Length);
+ }
+
+ Int32 crc32_expected = BitConverter.ToInt32(trailer, 0);
+ Int32 crc32_actual = crc.Crc32Result;
+ Int32 isize_expected = BitConverter.ToInt32(trailer, 4);
+ Int32 isize_actual = (Int32)(_z.TotalBytesOut & 0x00000000FFFFFFFF);
+
+ if (crc32_actual != crc32_expected)
+ throw new ZlibException(String.Format("Bad CRC32 in GZIP trailer. (actual({0:X8})!=expected({1:X8}))", crc32_actual, crc32_expected));
+
+ if (isize_actual != isize_expected)
+ throw new ZlibException(String.Format("Bad size in GZIP trailer. (actual({0})!=expected({1}))", isize_actual, isize_expected));
+
+ }
+ else
+ {
+ throw new ZlibException("Reading with compression is not supported.");
+ }
+ }
+ }
+ }
+
+
+ private void end()
+ {
+ if (z == null)
+ return;
+ if (_wantCompress)
+ {
+ _z.EndDeflate();
+ }
+ else
+ {
+ _z.EndInflate();
+ }
+ _z = null;
+ }
+
+
+ public override void Close()
+ {
+ if (_stream == null) return;
+ try
+ {
+ finish();
+ }
+ finally
+ {
+ end();
+ if (!_leaveOpen) _stream.Close();
+ _stream = null;
+ }
+ }
+
+ public override void Flush()
+ {
+ _stream.Flush();
+ }
+
+ public override System.Int64 Seek(System.Int64 offset, System.IO.SeekOrigin origin)
+ {
+ throw new NotImplementedException();
+ //_outStream.Seek(offset, origin);
+ }
+ public override void SetLength(System.Int64 value)
+ {
+ _stream.SetLength(value);
+ }
+
+ private bool nomoreinput = false;
+
+
+
+ private string ReadZeroTerminatedString()
+ {
+ var list = new System.Collections.Generic.List();
+ bool done = false;
+ do
+ {
+ // workitem 7740
+ int n = _stream.Read(_buf1, 0, 1);
+ if (n != 1)
+ throw new ZlibException("Unexpected EOF reading GZIP header.");
+ else
+ {
+ if (_buf1[0] == 0)
+ done = true;
+ else
+ list.Add(_buf1[0]);
+ }
+ } while (!done);
+ byte[] a = list.ToArray();
+ return iso8859dash1.GetString(a, 0, a.Length);
+ }
+ internal static readonly System.Text.Encoding iso8859dash1 = System.Text.Encoding.GetEncoding("iso-8859-1");
+ internal static readonly System.DateTime _unixEpoch = new System.DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
+
+
+ private int _ReadAndValidateGzipHeader()
+ {
+ int totalBytesRead = 0;
+ // read the header on the first read
+ byte[] header = new byte[10];
+ int n = _stream.Read(header, 0, header.Length);
+
+ // workitem 8501: handle edge case (decompress empty stream)
+ if (n == 0)
+ return 0;
+
+ if (n != 10)
+ throw new ZlibException("Not a valid GZIP stream.");
+
+ if (header[0] != 0x1F || header[1] != 0x8B || header[2] != 8)
+ throw new ZlibException("Bad GZIP header.");
+
+ Int32 timet = BitConverter.ToInt32(header, 4);
+ _GzipMtime = _unixEpoch.AddSeconds(timet);
+ totalBytesRead += n;
+ if ((header[3] & 0x04) == 0x04)
+ {
+ // read and discard extra field
+ n = _stream.Read(header, 0, 2); // 2-byte length field
+ totalBytesRead += n;
+
+ Int16 extraLength = (Int16)(header[0] + header[1] * 256);
+ byte[] extra = new byte[extraLength];
+ n = _stream.Read(extra, 0, extra.Length);
+ if (n != extraLength)
+ throw new ZlibException("Unexpected end-of-file reading GZIP header.");
+ totalBytesRead += n;
+ }
+ if ((header[3] & 0x08) == 0x08)
+ _GzipFileName = ReadZeroTerminatedString();
+ if ((header[3] & 0x10) == 0x010)
+ _GzipComment = ReadZeroTerminatedString();
+ if ((header[3] & 0x02) == 0x02)
+ Read(_buf1, 0, 1); // CRC16, ignore
+
+ return totalBytesRead;
+ }
+
+
+
+ public override System.Int32 Read(System.Byte[] buffer, System.Int32 offset, System.Int32 count)
+ {
+ // According to MS documentation, any implementation of the IO.Stream.Read function must:
+ // (a) throw an exception if offset & count reference an invalid part of the buffer,
+ // or if count < 0, or if buffer is null
+ // (b) return 0 only upon EOF, or if count = 0
+ // (c) if not EOF, then return at least 1 byte, up to bytes
+
+ if (_streamMode == StreamMode.Undefined)
+ {
+ if (!this._stream.CanRead) throw new ZlibException("The stream is not readable.");
+ // for the first read, set up some controls.
+ _streamMode = StreamMode.Reader;
+ // (The first reference to _z goes through the private accessor which
+ // may initialize it.)
+ z.AvailableBytesIn = 0;
+ if (_flavor == ZlibStreamFlavor.GZIP)
+ {
+ _gzipHeaderByteCount = _ReadAndValidateGzipHeader();
+ // workitem 8501: handle edge case (decompress empty stream)
+ if (_gzipHeaderByteCount == 0)
+ return 0;
+ }
+ }
+
+ if (_streamMode != StreamMode.Reader)
+ throw new ZlibException("Cannot Read after Writing.");
+
+ if (count == 0) return 0;
+ if (nomoreinput && _wantCompress) return 0; // workitem 8557
+ if (buffer == null) throw new ArgumentNullException("buffer");
+ if (count < 0) throw new ArgumentOutOfRangeException("count");
+ if (offset < buffer.GetLowerBound(0)) throw new ArgumentOutOfRangeException("offset");
+ if ((offset + count) > buffer.GetLength(0)) throw new ArgumentOutOfRangeException("count");
+
+ int rc = 0;
+
+ // set up the output of the deflate/inflate codec:
+ _z.OutputBuffer = buffer;
+ _z.NextOut = offset;
+ _z.AvailableBytesOut = count;
+
+ // This is necessary in case _workingBuffer has been resized. (new byte[])
+ // (The first reference to _workingBuffer goes through the private accessor which
+ // may initialize it.)
+ _z.InputBuffer = workingBuffer;
+
+ do
+ {
+ // need data in _workingBuffer in order to deflate/inflate. Here, we check if we have any.
+ if ((_z.AvailableBytesIn == 0) && (!nomoreinput))
+ {
+ // No data available, so try to Read data from the captive stream.
+ _z.NextIn = 0;
+ _z.AvailableBytesIn = _stream.Read(_workingBuffer, 0, _workingBuffer.Length);
+ if (_z.AvailableBytesIn == 0)
+ nomoreinput = true;
+
+ }
+ // we have data in InputBuffer; now compress or decompress as appropriate
+ rc = (_wantCompress)
+ ? _z.Deflate(_flushMode)
+ : _z.Inflate(_flushMode);
+
+ if (nomoreinput && (rc == ZlibConstants.Z_BUF_ERROR))
+ return 0;
+
+ if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ throw new ZlibException(String.Format("{0}flating: rc={1} msg={2}", (_wantCompress ? "de" : "in"), rc, _z.Message));
+
+ if ((nomoreinput || rc == ZlibConstants.Z_STREAM_END) && (_z.AvailableBytesOut == count))
+ break; // nothing more to read
+ }
+ //while (_z.AvailableBytesOut == count && rc == ZlibConstants.Z_OK);
+ while (_z.AvailableBytesOut > 0 && !nomoreinput && rc == ZlibConstants.Z_OK);
+
+
+ // workitem 8557
+ // is there more room in output?
+ if (_z.AvailableBytesOut > 0)
+ {
+ if (rc == ZlibConstants.Z_OK && _z.AvailableBytesIn == 0)
+ {
+ // deferred
+ }
+
+ // are we completely done reading?
+ if (nomoreinput)
+ {
+ // and in compression?
+ if (_wantCompress)
+ {
+ // no more input data available; therefore we flush to
+ // try to complete the read
+ rc = _z.Deflate(FlushType.Finish);
+
+ if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ throw new ZlibException(String.Format("Deflating: rc={0} msg={1}", rc, _z.Message));
+ }
+ }
+ }
+
+
+ rc = (count - _z.AvailableBytesOut);
+
+ // calculate CRC after reading
+ if (crc != null)
+ crc.SlurpBlock(buffer, offset, rc);
+
+ return rc;
+ }
+
+
+
+ public override System.Boolean CanRead
+ {
+ get { return this._stream.CanRead; }
+ }
+
+ public override System.Boolean CanSeek
+ {
+ get { return this._stream.CanSeek; }
+ }
+
+ public override System.Boolean CanWrite
+ {
+ get { return this._stream.CanWrite; }
+ }
+
+ public override System.Int64 Length
+ {
+ get { return _stream.Length; }
+ }
+
+ public override long Position
+ {
+ get { throw new NotImplementedException(); }
+ set { throw new NotImplementedException(); }
+ }
+
+ public enum StreamMode
+ {
+ Writer,
+ Reader,
+ Undefined,
+ }
+
+
+ }
+
+
+}
diff --git a/SabreTools.Library/External/Compress/ZipFile/ZLib/ZlibCodec.cs b/SabreTools.Library/External/Compress/ZipFile/ZLib/ZlibCodec.cs
new file mode 100644
index 00000000..42bf00f7
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ZipFile/ZLib/ZlibCodec.cs
@@ -0,0 +1,718 @@
+// ZlibCodec.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2009-November-03 15:40:51>
+//
+// ------------------------------------------------------------------
+//
+// This module defines a Codec for ZLIB compression and
+// decompression. This code extends code that was based the jzlib
+// implementation of zlib, but this code is completely novel. The codec
+// class is new, and encapsulates some behaviors that are new, and some
+// that were present in other classes in the jzlib code base. In
+// keeping with the license for jzlib, the copyright to the jzlib code
+// is included below.
+//
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// This program is based on zlib-1.1.3; credit to authors
+// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+// and contributors of zlib.
+//
+// -----------------------------------------------------------------------
+
+
+using System;
+using System.Runtime.InteropServices;
+using Interop=System.Runtime.InteropServices;
+
+namespace Compress.ZipFile.ZLib
+{
+ ///
+ /// Encoder and Decoder for ZLIB and DEFLATE (IETF RFC1950 and RFC1951).
+ ///
+ ///
+ ///
+ /// This class compresses and decompresses data according to the Deflate algorithm
+ /// and optionally, the ZLIB format, as documented in RFC 1950 - ZLIB and RFC 1951 - DEFLATE.
+ ///
+ [Guid("ebc25cf6-9120-4283-b972-0e5520d0000D")]
+ [Interop.ComVisible(true)]
+#if !NETCF
+ [Interop.ClassInterface(Interop.ClassInterfaceType.AutoDispatch)]
+#endif
+ sealed public class ZlibCodec
+ {
+ ///
+ /// The buffer from which data is taken.
+ ///
+ public byte[] InputBuffer;
+
+ ///
+ /// An index into the InputBuffer array, indicating where to start reading.
+ ///
+ public int NextIn;
+
+ ///
+ /// The number of bytes available in the InputBuffer, starting at NextIn.
+ ///
+ ///
+ /// Generally you should set this to InputBuffer.Length before the first Inflate() or Deflate() call.
+ /// The class will update this number as calls to Inflate/Deflate are made.
+ ///
+ public int AvailableBytesIn;
+
+ ///
+ /// Total number of bytes read so far, through all calls to Inflate()/Deflate().
+ ///
+ public long TotalBytesIn;
+
+ ///
+ /// Buffer to store output data.
+ ///
+ public byte[] OutputBuffer;
+
+ ///
+ /// An index into the OutputBuffer array, indicating where to start writing.
+ ///
+ public int NextOut;
+
+ ///
+ /// The number of bytes available in the OutputBuffer, starting at NextOut.
+ ///
+ ///
+ /// Generally you should set this to OutputBuffer.Length before the first Inflate() or Deflate() call.
+ /// The class will update this number as calls to Inflate/Deflate are made.
+ ///
+ public int AvailableBytesOut;
+
+ ///
+ /// Total number of bytes written to the output so far, through all calls to Inflate()/Deflate().
+ ///
+ public long TotalBytesOut;
+
+ ///
+ /// used for diagnostics, when something goes wrong!
+ ///
+ public System.String Message;
+
+ internal DeflateManager dstate;
+ internal InflateManager istate;
+
+ internal uint _Adler32;
+
+ ///
+ /// The compression level to use in this codec. Useful only in compression mode.
+ ///
+ public CompressionLevel CompressLevel = CompressionLevel.Default;
+
+ ///
+ /// The number of Window Bits to use.
+ ///
+ ///
+ /// This gauges the size of the sliding window, and hence the
+ /// compression effectiveness as well as memory consumption. It's best to just leave this
+ /// setting alone if you don't know what it is. The maximum value is 15 bits, which implies
+ /// a 32k window.
+ ///
+ public int WindowBits = ZlibConstants.WindowBitsDefault;
+
+ ///
+ /// The compression strategy to use.
+ ///
+ ///
+ /// This is only effective in compression. The theory offered by ZLIB is that different
+ /// strategies could potentially produce significant differences in compression behavior
+ /// for different data sets. Unfortunately I don't have any good recommendations for how
+ /// to set it differently. When I tested changing the strategy I got minimally different
+ /// compression performance. It's best to leave this property alone if you don't have a
+ /// good feel for it. Or, you may want to produce a test harness that runs through the
+ /// different strategy options and evaluates them on different file types. If you do that,
+ /// let me know your results.
+ ///
+ public CompressionStrategy Strategy = CompressionStrategy.Default;
+
+
+ ///
+ /// The Adler32 checksum on the data transferred through the codec so far. You probably don't need to look at this.
+ ///
+ public int Adler32 { get { return (int)_Adler32; } }
+
+
+ ///
+ /// Create a ZlibCodec.
+ ///
+ ///
+ /// If you use this default constructor, you will later have to explicitly call
+ /// InitializeInflate() or InitializeDeflate() before using the ZlibCodec to compress
+ /// or decompress.
+ ///
+ public ZlibCodec() { }
+
+ ///
+ /// Create a ZlibCodec that either compresses or decompresses.
+ ///
+ ///
+ /// Indicates whether the codec should compress (deflate) or decompress (inflate).
+ ///
+ public ZlibCodec(CompressionMode mode)
+ {
+ if (mode == CompressionMode.Compress)
+ {
+ int rc = InitializeDeflate();
+ if (rc != ZlibConstants.Z_OK) throw new ZlibException("Cannot initialize for deflate.");
+ }
+ else if (mode == CompressionMode.Decompress)
+ {
+ int rc = InitializeInflate();
+ if (rc != ZlibConstants.Z_OK) throw new ZlibException("Cannot initialize for inflate.");
+ }
+ else throw new ZlibException("Invalid ZlibStreamFlavor.");
+ }
+
+ ///
+ /// Initialize the inflation state.
+ ///
+ ///
+ /// It is not necessary to call this before using the ZlibCodec to inflate data;
+ /// It is implicitly called when you call the constructor.
+ ///
+ /// Z_OK if everything goes well.
+ public int InitializeInflate()
+ {
+ return InitializeInflate(this.WindowBits);
+ }
+
+ ///
+ /// Initialize the inflation state with an explicit flag to
+ /// govern the handling of RFC1950 header bytes.
+ ///
+ ///
+ ///
+ /// By default, the ZLIB header defined in RFC 1950 is expected. If
+ /// you want to read a zlib stream you should specify true for
+ /// expectRfc1950Header. If you have a deflate stream, you will want to specify
+ /// false. It is only necessary to invoke this initializer explicitly if you
+ /// want to specify false.
+ ///
+ ///
+ /// whether to expect an RFC1950 header byte
+ /// pair when reading the stream of data to be inflated.
+ ///
+ /// Z_OK if everything goes well.
+ public int InitializeInflate(bool expectRfc1950Header)
+ {
+ return InitializeInflate(this.WindowBits, expectRfc1950Header);
+ }
+
+ ///
+ /// Initialize the ZlibCodec for inflation, with the specified number of window bits.
+ ///
+ /// The number of window bits to use. If you need to ask what that is,
+ /// then you shouldn't be calling this initializer.
+ /// Z_OK if all goes well.
+ public int InitializeInflate(int windowBits)
+ {
+ this.WindowBits = windowBits;
+ return InitializeInflate(windowBits, true);
+ }
+
+ ///
+ /// Initialize the inflation state with an explicit flag to govern the handling of
+ /// RFC1950 header bytes.
+ ///
+ ///
+ ///
+ /// If you want to read a zlib stream you should specify true for
+ /// expectRfc1950Header. In this case, the library will expect to find a ZLIB
+ /// header, as defined in RFC
+ /// 1950, in the compressed stream. If you will be reading a DEFLATE or
+ /// GZIP stream, which does not have such a header, you will want to specify
+ /// false.
+ ///
+ ///
+ /// whether to expect an RFC1950 header byte pair when reading
+ /// the stream of data to be inflated.
+ /// The number of window bits to use. If you need to ask what that is,
+ /// then you shouldn't be calling this initializer.
+ /// Z_OK if everything goes well.
+ public int InitializeInflate(int windowBits, bool expectRfc1950Header)
+ {
+ this.WindowBits = windowBits;
+ if (dstate != null) throw new ZlibException("You may not call InitializeInflate() after calling InitializeDeflate().");
+ istate = new InflateManager(expectRfc1950Header);
+ return istate.Initialize(this, windowBits);
+ }
+
+ ///
+ /// Inflate the data in the InputBuffer, placing the result in the OutputBuffer.
+ ///
+ ///
+ /// You must have set InputBuffer and OutputBuffer, NextIn and NextOut, and AvailableBytesIn and
+ /// AvailableBytesOut before calling this method.
+ ///
+ ///
+ ///
+ /// private void InflateBuffer()
+ /// {
+ /// int bufferSize = 1024;
+ /// byte[] buffer = new byte[bufferSize];
+ /// ZlibCodec decompressor = new ZlibCodec();
+ ///
+ /// Console.WriteLine("\n============================================");
+ /// Console.WriteLine("Size of Buffer to Inflate: {0} bytes.", CompressedBytes.Length);
+ /// MemoryStream ms = new MemoryStream(DecompressedBytes);
+ ///
+ /// int rc = decompressor.InitializeInflate();
+ ///
+ /// decompressor.InputBuffer = CompressedBytes;
+ /// decompressor.NextIn = 0;
+ /// decompressor.AvailableBytesIn = CompressedBytes.Length;
+ ///
+ /// decompressor.OutputBuffer = buffer;
+ ///
+ /// // pass 1: inflate
+ /// do
+ /// {
+ /// decompressor.NextOut = 0;
+ /// decompressor.AvailableBytesOut = buffer.Length;
+ /// rc = decompressor.Inflate(FlushType.None);
+ ///
+ /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ /// throw new Exception("inflating: " + decompressor.Message);
+ ///
+ /// ms.Write(decompressor.OutputBuffer, 0, buffer.Length - decompressor.AvailableBytesOut);
+ /// }
+ /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0);
+ ///
+ /// // pass 2: finish and flush
+ /// do
+ /// {
+ /// decompressor.NextOut = 0;
+ /// decompressor.AvailableBytesOut = buffer.Length;
+ /// rc = decompressor.Inflate(FlushType.Finish);
+ ///
+ /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
+ /// throw new Exception("inflating: " + decompressor.Message);
+ ///
+ /// if (buffer.Length - decompressor.AvailableBytesOut > 0)
+ /// ms.Write(buffer, 0, buffer.Length - decompressor.AvailableBytesOut);
+ /// }
+ /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0);
+ ///
+ /// decompressor.EndInflate();
+ /// }
+ ///
+ ///
+ ///
+ /// The flush to use when inflating.
+ /// Z_OK if everything goes well.
+ public int Inflate(FlushType flush)
+ {
+ if (istate == null)
+ throw new ZlibException("No Inflate State!");
+ return istate.Inflate(flush);
+ }
+
+
+ ///
+ /// Ends an inflation session.
+ ///
+ ///
+ /// Call this after successively calling Inflate(). This will cause all buffers to be flushed.
+ /// After calling this you cannot call Inflate() without a intervening call to one of the
+ /// InitializeInflate() overloads.
+ ///
+ /// Z_OK if everything goes well.
+ public int EndInflate()
+ {
+ if (istate == null)
+ throw new ZlibException("No Inflate State!");
+ int ret = istate.End();
+ istate = null;
+ return ret;
+ }
+
+ ///
+ /// I don't know what this does!
+ ///
+ /// Z_OK if everything goes well.
+ public int SyncInflate()
+ {
+ if (istate == null)
+ throw new ZlibException("No Inflate State!");
+ return istate.Sync();
+ }
+
+ ///
+ /// Initialize the ZlibCodec for deflation operation.
+ ///
+ ///
+ /// The codec will use the MAX window bits and the default level of compression.
+ ///
+ ///
+ ///
+ /// int bufferSize = 40000;
+ /// byte[] CompressedBytes = new byte[bufferSize];
+ /// byte[] DecompressedBytes = new byte[bufferSize];
+ ///
+ /// ZlibCodec compressor = new ZlibCodec();
+ ///
+ /// compressor.InitializeDeflate(CompressionLevel.Default);
+ ///
+ /// compressor.InputBuffer = System.Text.ASCIIEncoding.ASCII.GetBytes(TextToCompress);
+ /// compressor.NextIn = 0;
+ /// compressor.AvailableBytesIn = compressor.InputBuffer.Length;
+ ///
+ /// compressor.OutputBuffer = CompressedBytes;
+ /// compressor.NextOut = 0;
+ /// compressor.AvailableBytesOut = CompressedBytes.Length;
+ ///
+ /// while (compressor.TotalBytesIn != TextToCompress.Length && compressor.TotalBytesOut < bufferSize)
+ /// {
+ /// compressor.Deflate(FlushType.None);
+ /// }
+ ///
+ /// while (true)
+ /// {
+ /// int rc= compressor.Deflate(FlushType.Finish);
+ /// if (rc == ZlibConstants.Z_STREAM_END) break;
+ /// }
+ ///
+ /// compressor.EndDeflate();
+ ///
+ ///
+ ///
+ /// Z_OK if all goes well. You generally don't need to check the return code.
+ public int InitializeDeflate()
+ {
+ return _InternalInitializeDeflate(true);
+ }
+
+ ///
+ /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel.
+ ///
+ ///
+ /// The codec will use the maximum window bits (15) and the specified
+ /// CompressionLevel. It will emit a ZLIB stream as it compresses.
+ ///
+ /// The compression level for the codec.
+ /// Z_OK if all goes well.
+ public int InitializeDeflate(CompressionLevel level)
+ {
+ this.CompressLevel = level;
+ return _InternalInitializeDeflate(true);
+ }
+
+
+ ///
+ /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel,
+ /// and the explicit flag governing whether to emit an RFC1950 header byte pair.
+ ///
+ ///
+ /// The codec will use the maximum window bits (15) and the specified CompressionLevel.
+ /// If you want to generate a zlib stream, you should specify true for
+ /// wantRfc1950Header. In this case, the library will emit a ZLIB
+ /// header, as defined in RFC
+ /// 1950, in the compressed stream.
+ ///
+ /// The compression level for the codec.
+ /// whether to emit an initial RFC1950 byte pair in the compressed stream.
+ /// Z_OK if all goes well.
+ public int InitializeDeflate(CompressionLevel level, bool wantRfc1950Header)
+ {
+ this.CompressLevel = level;
+ return _InternalInitializeDeflate(wantRfc1950Header);
+ }
+
+
+ ///
+ /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel,
+ /// and the specified number of window bits.
+ ///
+ ///
+ /// The codec will use the specified number of window bits and the specified CompressionLevel.
+ ///
+ /// The compression level for the codec.
+ /// the number of window bits to use. If you don't know what this means, don't use this method.
+ /// Z_OK if all goes well.
+ public int InitializeDeflate(CompressionLevel level, int bits)
+ {
+ this.CompressLevel = level;
+ this.WindowBits = bits;
+ return _InternalInitializeDeflate(true);
+ }
+
+ ///
+ /// Initialize the ZlibCodec for deflation operation, using the specified
+ /// CompressionLevel, the specified number of window bits, and the explicit flag
+ /// governing whether to emit an RFC1950 header byte pair.
+ ///
+ ///
+ /// The compression level for the codec.
+ /// whether to emit an initial RFC1950 byte pair in the compressed stream.
+ /// the number of window bits to use. If you don't know what this means, don't use this method.
+ /// Z_OK if all goes well.
+ public int InitializeDeflate(CompressionLevel level, int bits, bool wantRfc1950Header)
+ {
+ this.CompressLevel = level;
+ this.WindowBits = bits;
+ return _InternalInitializeDeflate(wantRfc1950Header);
+ }
+
+ private int _InternalInitializeDeflate(bool wantRfc1950Header)
+ {
+ if (istate != null) throw new ZlibException("You may not call InitializeDeflate() after calling InitializeInflate().");
+ dstate = new DeflateManager();
+ dstate.WantRfc1950HeaderBytes = wantRfc1950Header;
+
+ return dstate.Initialize(this, this.CompressLevel, this.WindowBits, this.Strategy);
+ }
+
+ ///
+ /// Deflate one batch of data.
+ ///
+ ///
+ /// You must have set InputBuffer and OutputBuffer before calling this method.
+ ///
+ ///
+ ///
+ /// private void DeflateBuffer(CompressionLevel level)
+ /// {
+ /// int bufferSize = 1024;
+ /// byte[] buffer = new byte[bufferSize];
+ /// ZlibCodec compressor = new ZlibCodec();
+ ///
+ /// Console.WriteLine("\n============================================");
+ /// Console.WriteLine("Size of Buffer to Deflate: {0} bytes.", UncompressedBytes.Length);
+ /// MemoryStream ms = new MemoryStream();
+ ///
+ /// int rc = compressor.InitializeDeflate(level);
+ ///
+ /// compressor.InputBuffer = UncompressedBytes;
+ /// compressor.NextIn = 0;
+ /// compressor.AvailableBytesIn = UncompressedBytes.Length;
+ ///
+ /// compressor.OutputBuffer = buffer;
+ ///
+ /// // pass 1: deflate
+ /// do
+ /// {
+ /// compressor.NextOut = 0;
+ /// compressor.AvailableBytesOut = buffer.Length;
+ /// rc = compressor.Deflate(FlushType.None);
+ ///
+ /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ /// throw new Exception("deflating: " + compressor.Message);
+ ///
+ /// ms.Write(compressor.OutputBuffer, 0, buffer.Length - compressor.AvailableBytesOut);
+ /// }
+ /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0);
+ ///
+ /// // pass 2: finish and flush
+ /// do
+ /// {
+ /// compressor.NextOut = 0;
+ /// compressor.AvailableBytesOut = buffer.Length;
+ /// rc = compressor.Deflate(FlushType.Finish);
+ ///
+ /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
+ /// throw new Exception("deflating: " + compressor.Message);
+ ///
+ /// if (buffer.Length - compressor.AvailableBytesOut > 0)
+ /// ms.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut);
+ /// }
+ /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0);
+ ///
+ /// compressor.EndDeflate();
+ ///
+ /// ms.Seek(0, SeekOrigin.Begin);
+ /// CompressedBytes = new byte[compressor.TotalBytesOut];
+ /// ms.Read(CompressedBytes, 0, CompressedBytes.Length);
+ /// }
+ ///
+ ///
+ /// whether to flush all data as you deflate. Generally you will want to
+ /// use Z_NO_FLUSH here, in a series of calls to Deflate(), and then call EndDeflate() to
+ /// flush everything.
+ ///
+ /// Z_OK if all goes well.
+ public int Deflate(FlushType flush)
+ {
+ if (dstate == null)
+ throw new ZlibException("No Deflate State!");
+ return dstate.Deflate(flush);
+ }
+
+ ///
+ /// End a deflation session.
+ ///
+ ///
+ /// Call this after making a series of one or more calls to Deflate(). All buffers are flushed.
+ ///
+ /// Z_OK if all goes well.
+ public int EndDeflate()
+ {
+ if (dstate == null)
+ throw new ZlibException("No Deflate State!");
+ // TODO: dinoch Tue, 03 Nov 2009 15:39 (test this)
+ //int ret = dstate.End();
+ dstate = null;
+ return ZlibConstants.Z_OK; //ret;
+ }
+
+ ///
+ /// Reset a codec for another deflation session.
+ ///
+ ///
+ /// Call this to reset the deflation state. For example if a thread is deflating
+ /// non-consecutive blocks, you can call Reset() after the Deflate(Sync) of the first
+ /// block and before the next Deflate(None) of the second block.
+ ///
+ /// Z_OK if all goes well.
+ public void ResetDeflate()
+ {
+ if (dstate == null)
+ throw new ZlibException("No Deflate State!");
+ dstate.Reset();
+ }
+
+
+ ///
+ /// Set the CompressionStrategy and CompressionLevel for a deflation session.
+ ///
+ /// the level of compression to use.
+ /// the strategy to use for compression.
+ /// Z_OK if all goes well.
+ public int SetDeflateParams(CompressionLevel level, CompressionStrategy strategy)
+ {
+ if (dstate == null)
+ throw new ZlibException("No Deflate State!");
+ return dstate.SetParams(level, strategy);
+ }
+
+
+ ///
+ /// Set the dictionary to be used for either Inflation or Deflation.
+ ///
+ /// The dictionary bytes to use.
+ /// Z_OK if all goes well.
+ public int SetDictionary(byte[] dictionary)
+ {
+ if (istate != null)
+ return istate.SetDictionary(dictionary);
+
+ if (dstate != null)
+ return dstate.SetDictionary(dictionary);
+
+ throw new ZlibException("No Inflate or Deflate state!");
+ }
+
+ // Flush as much pending output as possible. All deflate() output goes
+ // through this function so some applications may wish to modify it
+ // to avoid allocating a large strm->next_out buffer and copying into it.
+ // (See also read_buf()).
+ internal void flush_pending()
+ {
+ int len = dstate.pendingCount;
+
+ if (len > AvailableBytesOut)
+ len = AvailableBytesOut;
+ if (len == 0)
+ return;
+
+ if (dstate.pending.Length <= dstate.nextPending ||
+ OutputBuffer.Length <= NextOut ||
+ dstate.pending.Length < (dstate.nextPending + len) ||
+ OutputBuffer.Length < (NextOut + len))
+ {
+ throw new ZlibException(String.Format("Invalid State. (pending.Length={0}, pendingCount={1})",
+ dstate.pending.Length, dstate.pendingCount));
+ }
+
+ Array.Copy(dstate.pending, dstate.nextPending, OutputBuffer, NextOut, len);
+
+ NextOut += len;
+ dstate.nextPending += len;
+ TotalBytesOut += len;
+ AvailableBytesOut -= len;
+ dstate.pendingCount -= len;
+ if (dstate.pendingCount == 0)
+ {
+ dstate.nextPending = 0;
+ }
+ }
+
+ // Read a new buffer from the current input stream, update the adler32
+ // and total number of bytes read. All deflate() input goes through
+ // this function so some applications may wish to modify it to avoid
+ // allocating a large strm->next_in buffer and copying from it.
+ // (See also flush_pending()).
+ internal int read_buf(byte[] buf, int start, int size)
+ {
+ int len = AvailableBytesIn;
+
+ if (len > size)
+ len = size;
+ if (len == 0)
+ return 0;
+
+ AvailableBytesIn -= len;
+
+ if (dstate.WantRfc1950HeaderBytes)
+ {
+ _Adler32 = Adler.Adler32(_Adler32, InputBuffer, NextIn, len);
+ }
+ Array.Copy(InputBuffer, NextIn, buf, start, len);
+ NextIn += len;
+ TotalBytesIn += len;
+ return len;
+ }
+
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Zlib/ZlibConstants.cs b/SabreTools.Library/External/Compress/ZipFile/ZLib/ZlibConstants.cs
similarity index 59%
rename from SabreTools.Library/External/Zlib/ZlibConstants.cs
rename to SabreTools.Library/External/Compress/ZipFile/ZLib/ZlibConstants.cs
index 9f113440..332684aa 100644
--- a/SabreTools.Library/External/Zlib/ZlibConstants.cs
+++ b/SabreTools.Library/External/Compress/ZipFile/ZLib/ZlibConstants.cs
@@ -60,65 +60,67 @@
//
// -----------------------------------------------------------------------
-namespace Ionic.Zlib
+
+namespace Compress.ZipFile.ZLib
{
- ///
- /// A bunch of constants used in the Zlib interface.
- ///
- public static class ZlibConstants
- {
- ///
- /// The maximum number of window bits for the Deflate algorithm.
- ///
- public const int WindowBitsMax = 15; // 32K LZ77 window
+ ///
+ /// A bunch of constants used in the Zlib interface.
+ ///
+ public static class ZlibConstants
+ {
+ ///
+ /// The maximum number of window bits for the Deflate algorithm.
+ ///
+ public const int WindowBitsMax = 15; // 32K LZ77 window
- ///
- /// The default number of window bits for the Deflate algorithm.
- ///
- public const int WindowBitsDefault = WindowBitsMax;
+ ///
+ /// The default number of window bits for the Deflate algorithm.
+ ///
+ public const int WindowBitsDefault = WindowBitsMax;
- ///
- /// indicates everything is A-OK
- ///
- public const int Z_OK = 0;
+ ///
+ /// indicates everything is A-OK
+ ///
+ public const int Z_OK = 0;
- ///
- /// Indicates that the last operation reached the end of the stream.
- ///
- public const int Z_STREAM_END = 1;
+ ///
+ /// Indicates that the last operation reached the end of the stream.
+ ///
+ public const int Z_STREAM_END = 1;
- ///
- /// The operation ended in need of a dictionary.
- ///
- public const int Z_NEED_DICT = 2;
+ ///
+ /// The operation ended in need of a dictionary.
+ ///
+ public const int Z_NEED_DICT = 2;
- ///
- /// There was an error with the stream - not enough data, not open and readable, etc.
- ///
- public const int Z_STREAM_ERROR = -2;
+ ///
+ /// There was an error with the stream - not enough data, not open and readable, etc.
+ ///
+ public const int Z_STREAM_ERROR = -2;
- ///
- /// There was an error with the data - not enough data, bad data, etc.
- ///
- public const int Z_DATA_ERROR = -3;
+ ///
+ /// There was an error with the data - not enough data, bad data, etc.
+ ///
+ public const int Z_DATA_ERROR = -3;
- ///
- /// There was an error with the working buffer.
- ///
- public const int Z_BUF_ERROR = -5;
+ ///
+ /// There was an error with the working buffer.
+ ///
+ public const int Z_BUF_ERROR = -5;
- ///
- /// The size of the working buffer used in the ZlibCodec class. Defaults to 8192 bytes.
- ///
-#if NETCF
- public const int WorkingBufferSizeDefault = 8192;
+ ///
+ /// The size of the working buffer used in the ZlibCodec class. Defaults to 8192 bytes.
+ ///
+#if NETCF
+ public const int WorkingBufferSizeDefault = 8192;
#else
- public const int WorkingBufferSizeDefault = 16384;
+ public const int WorkingBufferSizeDefault = 16384;
#endif
- ///
- /// The minimum size of the working buffer used in the ZlibCodec class. Currently it is 128 bytes.
- ///
- public const int WorkingBufferSizeMin = 1024;
- }
+ ///
+ /// The minimum size of the working buffer used in the ZlibCodec class. Currently it is 128 bytes.
+ ///
+ public const int WorkingBufferSizeMin = 1024;
+ }
+
}
diff --git a/SabreTools.Library/External/Compress/ZipFile/zipFile.cs b/SabreTools.Library/External/Compress/ZipFile/zipFile.cs
new file mode 100644
index 00000000..6bcbe891
--- /dev/null
+++ b/SabreTools.Library/External/Compress/ZipFile/zipFile.cs
@@ -0,0 +1,1912 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using Compress.Utils;
+using Compress.ZipFile.ZLib;
+using Directory = RVIO.Directory;
+using FileInfo = RVIO.FileInfo;
+using FileStream = RVIO.FileStream;
+using Path = RVIO.Path;
+
+// UInt16 = ushort
+// UInt32 = uint
+// ULong = ulong
+
+namespace Compress.ZipFile
+{
+ public class ZipFile : ICompress
+ {
+ private const uint LocalFileHeaderSignature = 0x04034b50;
+ private const uint CentralDirectoryHeaderSigniature = 0x02014b50;
+ private const uint EndOfCentralDirSignature = 0x06054b50;
+ private const uint Zip64EndOfCentralDirSignatue = 0x06064b50;
+ private const uint Zip64EndOfCentralDirectoryLocator = 0x07064b50;
+ private readonly List _localFiles = new List();
+
+
+ private FileInfo _zipFileInfo;
+
+ private ulong _centerDirStart;
+ private ulong _centerDirSize;
+ private ulong _endOfCenterDir64;
+
+ private byte[] _fileComment;
+ private Stream _zipFs;
+ private Stream _compressionStream;
+
+ private uint _localFilesCount;
+
+ private bool _zip64;
+
+ public string ZipFilename => _zipFileInfo != null ? _zipFileInfo.FullName : "";
+
+ public long TimeStamp => _zipFileInfo?.LastWriteTime ?? 0;
+
+ public ZipOpenType ZipOpen { get; private set; }
+
+
+ public ZipStatus ZipStatus { get; set; }
+
+ public int LocalFilesCount()
+ {
+ return _localFiles.Count;
+ }
+
+ public string Filename(int i)
+ {
+ return _localFiles[i].FileName;
+ }
+
+ public ulong UncompressedSize(int i)
+ {
+ return _localFiles[i].UncompressedSize;
+ }
+
+ public ulong? LocalHeader(int i)
+ {
+ return (_localFiles[i].GeneralPurposeBitFlag & 8) == 0 ? (ulong?)_localFiles[i].RelativeOffsetOfLocalHeader : null;
+ }
+
+ public byte[] CRC32(int i)
+ {
+ return _localFiles[i].CRC;
+ }
+
+ public bool IsDirectory(int i)
+ {
+ try
+ {
+ if (_localFiles[i].UncompressedSize != 0)
+ return false;
+ string filename = _localFiles[i].FileName;
+ char lastChar = filename[filename.Length - 1];
+ return lastChar == '/' || lastChar == '\\';
+ }
+ catch (Exception ex)
+ {
+ ArgumentException argEx = new ArgumentException("Error in file " + _zipFileInfo?.FullName + " : " + ex.Message, ex.InnerException);
+ throw argEx;
+ }
+
+ }
+
+ public DateTime LastModified(int i)
+ {
+ return _localFiles[i].DateTime;
+ }
+
+ public ZipReturn ZipFileCreate(string newFilename)
+ {
+ if (ZipOpen != ZipOpenType.Closed)
+ {
+ return ZipReturn.ZipFileAlreadyOpen;
+ }
+
+ CreateDirForFile(newFilename);
+ _zipFileInfo = new FileInfo(newFilename);
+
+ int errorCode = FileStream.OpenFileWrite(newFilename, out _zipFs);
+ if (errorCode != 0)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+ ZipOpen = ZipOpenType.OpenWrite;
+ return ZipReturn.ZipGood;
+ }
+
+ public void ZipFileClose()
+ {
+ if (ZipOpen == ZipOpenType.Closed)
+ {
+ return;
+ }
+
+ if (ZipOpen == ZipOpenType.OpenRead)
+ {
+ if (_zipFs != null)
+ {
+ _zipFs.Close();
+ _zipFs.Dispose();
+ }
+ ZipOpen = ZipOpenType.Closed;
+ return;
+ }
+
+ _zip64 = false;
+ bool lTrrntzip = true;
+
+ _centerDirStart = (ulong)_zipFs.Position;
+ if (_centerDirStart >= 0xffffffff)
+ {
+ _zip64 = true;
+ }
+
+ using (CrcCalculatorStream crcCs = new CrcCalculatorStream(_zipFs, true))
+ {
+ foreach (LocalFile t in _localFiles)
+ {
+ t.CenteralDirectoryWrite(crcCs);
+ _zip64 |= t.Zip64;
+ lTrrntzip &= t.TrrntZip;
+ }
+
+ crcCs.Flush();
+ crcCs.Close();
+
+ _centerDirSize = (ulong)_zipFs.Position - _centerDirStart;
+
+ _fileComment = lTrrntzip ? GetBytes("TORRENTZIPPED-" + crcCs.Crc.ToString("X8")) : new byte[0];
+ ZipStatus = lTrrntzip ? ZipStatus.TrrntZip : ZipStatus.None;
+ }
+
+ if (_zip64)
+ {
+ _endOfCenterDir64 = (ulong)_zipFs.Position;
+ Zip64EndOfCentralDirWrite();
+ Zip64EndOfCentralDirectoryLocatorWrite();
+ }
+ EndOfCentralDirWrite();
+
+ _zipFs.SetLength(_zipFs.Position);
+ _zipFs.Flush();
+ _zipFs.Close();
+ _zipFs.Dispose();
+ _zipFileInfo = new FileInfo(_zipFileInfo.FullName);
+ ZipOpen = ZipOpenType.Closed;
+ }
+
+
+ public void ZipFileCloseFailed()
+ {
+ switch (ZipOpen)
+ {
+ case ZipOpenType.Closed:
+ return;
+ case ZipOpenType.OpenRead:
+ if (_zipFs != null)
+ {
+ _zipFs.Close();
+ _zipFs.Dispose();
+ }
+ break;
+ case ZipOpenType.OpenWrite:
+ _zipFs.Flush();
+ _zipFs.Close();
+ _zipFs.Dispose();
+ if (_zipFileInfo != null)
+ RVIO.File.Delete(_zipFileInfo.FullName);
+ _zipFileInfo = null;
+ break;
+ }
+
+ ZipOpen = ZipOpenType.Closed;
+ }
+
+ public ZipReturn ZipFileCloseReadStream()
+ {
+ if (_compressionStream == null)
+ return ZipReturn.ZipGood;
+ if (_compressionStream is ZlibBaseStream dfStream)
+ {
+ dfStream.Close();
+ dfStream.Dispose();
+ }
+ _compressionStream = null;
+
+ return ZipReturn.ZipGood;
+ }
+
+ // TODO: Figure out how to re-add the file time functionality to this
+ public ZipReturn ZipFileOpenWriteStream(bool raw, bool trrntzip, string filename, ulong uncompressedSize, ushort compressionMethod, out Stream stream)
+ {
+ stream = null;
+ if (ZipOpen != ZipOpenType.OpenWrite)
+ {
+ return ZipReturn.ZipWritingToInputFile;
+ }
+
+ LocalFile lf = new LocalFile(filename);
+
+ ZipReturn retVal = lf.LocalFileOpenWriteStream(_zipFs, raw, trrntzip, uncompressedSize, compressionMethod, out stream);
+
+ _compressionStream = stream;
+ _localFiles.Add(lf);
+
+ return retVal;
+ }
+
+
+ public ZipReturn ZipFileCloseWriteStream(byte[] crc32)
+ {
+ if (_compressionStream is ZlibBaseStream dfStream)
+ {
+ dfStream.Flush();
+ dfStream.Close();
+ dfStream.Dispose();
+ }
+ _compressionStream = null;
+
+ return _localFiles[_localFiles.Count - 1].LocalFileCloseWriteStream(_zipFs, crc32);
+ }
+
+ public ZipReturn ZipFileRollBack()
+ {
+ if (ZipOpen != ZipOpenType.OpenWrite)
+ {
+ return ZipReturn.ZipWritingToInputFile;
+ }
+
+ int fileCount = _localFiles.Count;
+ if (fileCount == 0)
+ {
+ return ZipReturn.ZipErrorRollBackFile;
+ }
+
+ LocalFile lf = _localFiles[fileCount - 1];
+
+ _localFiles.RemoveAt(fileCount - 1);
+ _zipFs.Position = (long)lf.LocalFilePos;
+ return ZipReturn.ZipGood;
+ }
+
+ public void ZipFileAddDirectory()
+ {
+ LocalFile.LocalFileAddDirectory(_zipFs);
+ }
+
+ /*
+ public void BreakTrrntZip(string filename)
+ {
+ _zipFs = new FileStream(filename, FileMode.Open, FileAccess.ReadWrite);
+ using (BinaryReader zipBr = new BinaryReader(_zipFs,Encoding.UTF8,true))
+ {
+ _zipFs.Position = _zipFs.Length - 22;
+ byte[] fileComment = zipBr.ReadBytes(22);
+ if (GetString(fileComment).Substring(0, 14) == "TORRENTZIPPED-")
+ {
+ _zipFs.Position = _zipFs.Length - 8;
+ _zipFs.WriteByte(48); _zipFs.WriteByte(48); _zipFs.WriteByte(48); _zipFs.WriteByte(48);
+ _zipFs.WriteByte(48); _zipFs.WriteByte(48); _zipFs.WriteByte(48); _zipFs.WriteByte(48);
+ }
+ }
+ _zipFs.Flush();
+ _zipFs.Close();
+ }
+ */
+
+ ~ZipFile()
+ {
+ if (_zipFs != null)
+ {
+ _zipFs.Close();
+ _zipFs.Dispose();
+ }
+ }
+
+
+ private ZipReturn FindEndOfCentralDirSignature()
+ {
+ long fileSize = _zipFs.Length;
+ long maxBackSearch = 0xffff;
+
+ if (_zipFs.Length < maxBackSearch)
+ {
+ maxBackSearch = fileSize;
+ }
+
+ const long buffSize = 0x400;
+
+ byte[] buffer = new byte[buffSize + 4];
+
+ long backPosition = 4;
+ while (backPosition < maxBackSearch)
+ {
+ backPosition += buffSize;
+ if (backPosition > maxBackSearch)
+ {
+ backPosition = maxBackSearch;
+ }
+
+ long readSize = backPosition > buffSize + 4 ? buffSize + 4 : backPosition;
+
+ _zipFs.Position = fileSize - backPosition;
+
+ _zipFs.Read(buffer, 0, (int)readSize);
+
+
+ for (long i = readSize - 4; i >= 0; i--)
+ {
+ if (buffer[i] != 0x50 || buffer[i + 1] != 0x4b || buffer[i + 2] != 0x05 || buffer[i + 3] != 0x06)
+ {
+ continue;
+ }
+
+ _zipFs.Position = fileSize - backPosition + i;
+ return ZipReturn.ZipGood;
+ }
+ }
+ return ZipReturn.ZipCentralDirError;
+ }
+
+
+ private ZipReturn EndOfCentralDirRead()
+ {
+ using (BinaryReader zipBr = new BinaryReader(_zipFs, Encoding.UTF8, true))
+ {
+ uint thisSignature = zipBr.ReadUInt32();
+ if (thisSignature != EndOfCentralDirSignature)
+ {
+ return ZipReturn.ZipEndOfCentralDirectoryError;
+ }
+
+ ushort tushort = zipBr.ReadUInt16(); // NumberOfThisDisk
+ if (tushort != 0)
+ {
+ return ZipReturn.ZipEndOfCentralDirectoryError;
+ }
+
+ tushort = zipBr.ReadUInt16(); // NumberOfThisDiskCenterDir
+ if (tushort != 0)
+ {
+ return ZipReturn.ZipEndOfCentralDirectoryError;
+ }
+
+ _localFilesCount = zipBr.ReadUInt16(); // TotalNumberOfEnteriesDisk
+
+ tushort = zipBr.ReadUInt16(); // TotalNumber of enteries in the central directory
+ if (tushort != _localFilesCount)
+ {
+ return ZipReturn.ZipEndOfCentralDirectoryError;
+ }
+
+ _centerDirSize = zipBr.ReadUInt32(); // SizeOfCenteralDir
+ _centerDirStart = zipBr.ReadUInt32(); // Offset
+
+ ushort zipFileCommentLength = zipBr.ReadUInt16();
+
+ _fileComment = zipBr.ReadBytes(zipFileCommentLength);
+
+ if (_zipFs.Position != _zipFs.Length)
+ {
+ ZipStatus |= ZipStatus.ExtraData;
+ }
+
+ return ZipReturn.ZipGood;
+ }
+ }
+
+ private void EndOfCentralDirWrite()
+ {
+ using (BinaryWriter bw = new BinaryWriter(_zipFs, Encoding.UTF8, true))
+ {
+ bw.Write(EndOfCentralDirSignature);
+ bw.Write((ushort)0); // NumberOfThisDisk
+ bw.Write((ushort)0); // NumberOfThisDiskCenterDir
+ bw.Write((ushort)(_localFiles.Count >= 0xffff ? 0xffff : _localFiles.Count)); // TotalNumberOfEnteriesDisk
+ bw.Write((ushort)(_localFiles.Count >= 0xffff ? 0xffff : _localFiles.Count)); // TotalNumber of enteries in the central directory
+ bw.Write((uint)(_centerDirSize >= 0xffffffff ? 0xffffffff : _centerDirSize));
+ bw.Write((uint)(_centerDirStart >= 0xffffffff ? 0xffffffff : _centerDirStart));
+ bw.Write((ushort)_fileComment.Length);
+ bw.Write(_fileComment, 0, _fileComment.Length);
+ }
+ }
+
+ private ZipReturn Zip64EndOfCentralDirRead()
+ {
+ using (BinaryReader zipBr = new BinaryReader(_zipFs, Encoding.UTF8, true))
+ {
+ _zip64 = true;
+ uint thisSignature = zipBr.ReadUInt32();
+ if (thisSignature != Zip64EndOfCentralDirSignatue)
+ {
+ return ZipReturn.ZipEndOfCentralDirectoryError;
+ }
+
+ ulong tulong = zipBr.ReadUInt64(); // Size of zip64 end of central directory record
+ if (tulong != 44)
+ {
+ return ZipReturn.Zip64EndOfCentralDirError;
+ }
+
+ zipBr.ReadUInt16(); // version made by
+
+ ushort tushort = zipBr.ReadUInt16(); // version needed to extract
+ if (tushort != 45)
+ {
+ return ZipReturn.Zip64EndOfCentralDirError;
+ }
+
+ uint tuint = zipBr.ReadUInt32(); // number of this disk
+ if (tuint != 0)
+ {
+ return ZipReturn.Zip64EndOfCentralDirError;
+ }
+
+ tuint = zipBr.ReadUInt32(); // number of the disk with the start of the central directory
+ if (tuint != 0)
+ {
+ return ZipReturn.Zip64EndOfCentralDirError;
+ }
+
+ _localFilesCount =
+ (uint)zipBr.ReadUInt64(); // total number of entries in the central directory on this disk
+
+ tulong = zipBr.ReadUInt64(); // total number of entries in the central directory
+ if (tulong != _localFilesCount)
+ {
+ return ZipReturn.Zip64EndOfCentralDirError;
+ }
+
+ _centerDirSize = zipBr.ReadUInt64(); // size of central directory
+
+ _centerDirStart =
+ zipBr.ReadUInt64(); // offset of start of central directory with respect to the starting disk number
+
+ return ZipReturn.ZipGood;
+ }
+ }
+
+ private void Zip64EndOfCentralDirWrite()
+ {
+ using (BinaryWriter bw = new BinaryWriter(_zipFs, Encoding.UTF8, true))
+ {
+ bw.Write(Zip64EndOfCentralDirSignatue);
+ bw.Write((ulong)44); // Size of zip64 end of central directory record
+ bw.Write((ushort)45); // version made by
+ bw.Write((ushort)45); // version needed to extract
+ bw.Write((uint)0); // number of this disk
+ bw.Write((uint)0); // number of the disk with the start of the central directroy
+ bw.Write((ulong)_localFiles.Count); // total number of entries in the central directory on this disk
+ bw.Write((ulong)_localFiles.Count); // total number of entries in the central directory
+ bw.Write(_centerDirSize); // size of central directory
+ bw.Write(_centerDirStart); // offset of start of central directory with respect to the starting disk number
+ }
+ }
+
+ private ZipReturn Zip64EndOfCentralDirectoryLocatorRead()
+ {
+ using (BinaryReader zipBr = new BinaryReader(_zipFs, Encoding.UTF8, true))
+ {
+ _zip64 = true;
+ uint thisSignature = zipBr.ReadUInt32();
+ if (thisSignature != Zip64EndOfCentralDirectoryLocator)
+ {
+ return ZipReturn.ZipEndOfCentralDirectoryError;
+ }
+
+ uint tuint =
+ zipBr.ReadUInt32(); // number of the disk with the start of the zip64 end of centeral directory
+ if (tuint != 0)
+ {
+ return ZipReturn.Zip64EndOfCentralDirectoryLocatorError;
+ }
+
+ _endOfCenterDir64 = zipBr.ReadUInt64(); // relative offset of the zip64 end of central directroy record
+
+ tuint = zipBr.ReadUInt32(); // total number of disks
+ if (tuint != 1)
+ {
+ return ZipReturn.Zip64EndOfCentralDirectoryLocatorError;
+ }
+
+ return ZipReturn.ZipGood;
+ }
+ }
+
+ private void Zip64EndOfCentralDirectoryLocatorWrite()
+ {
+ using (BinaryWriter bw = new BinaryWriter(_zipFs, Encoding.UTF8, true))
+ {
+ bw.Write(Zip64EndOfCentralDirectoryLocator);
+ bw.Write((uint)0); // number of the disk with the start of the zip64 end of centeral directory
+ bw.Write(_endOfCenterDir64); // relative offset of the zip64 end of central directroy record
+ bw.Write((uint)1); // total number of disks
+ }
+ }
+
+
+
+
+ public ZipReturn ZipFileOpen(string newFilename, long timestamp, bool readHeaders)
+ {
+ ZipFileClose();
+ ZipStatus = ZipStatus.None;
+ _zip64 = false;
+ _centerDirStart = 0;
+ _centerDirSize = 0;
+ _zipFileInfo = null;
+
+ try
+ {
+ if (!RVIO.File.Exists(newFilename))
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorFileNotFound;
+ }
+ _zipFileInfo = new FileInfo(newFilename);
+ if (timestamp != -1 && _zipFileInfo.LastWriteTime != timestamp)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorTimeStamp;
+ }
+ int errorCode = FileStream.OpenFileRead(newFilename, out _zipFs);
+ if (errorCode != 0)
+ {
+ ZipFileClose();
+ if (errorCode == 32)
+ {
+ return ZipReturn.ZipFileLocked;
+ }
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+ }
+ catch (PathTooLongException)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipFileNameToLong;
+ }
+ catch (IOException)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+ ZipOpen = ZipOpenType.OpenRead;
+
+ if (!readHeaders)
+ {
+ return ZipReturn.ZipGood;
+ }
+
+
+ return ZipFileReadHeaders();
+ }
+
+
+ public ZipReturn ZipFileOpen(Stream inStream)
+ {
+ ZipFileClose();
+ ZipStatus = ZipStatus.None;
+ _zip64 = false;
+ _centerDirStart = 0;
+ _centerDirSize = 0;
+ _zipFileInfo = null;
+ _zipFs = inStream;
+
+ ZipOpen = ZipOpenType.OpenRead;
+ return ZipFileReadHeaders();
+ }
+
+
+ private ZipReturn ZipFileReadHeaders()
+ {
+ try
+ {
+ ZipReturn zRet = FindEndOfCentralDirSignature();
+ if (zRet != ZipReturn.ZipGood)
+ {
+ ZipFileClose();
+ return zRet;
+ }
+
+ long endOfCentralDir = _zipFs.Position;
+ zRet = EndOfCentralDirRead();
+ if (zRet != ZipReturn.ZipGood)
+ {
+ ZipFileClose();
+ return zRet;
+ }
+
+ // check if this is a ZIP64 zip and if it is read the Zip64 End Of Central Dir Info
+ if (_centerDirStart == 0xffffffff || _centerDirSize == 0xffffffff || _localFilesCount == 0xffff)
+ {
+ _zip64 = true;
+ _zipFs.Position = endOfCentralDir - 20;
+ zRet = Zip64EndOfCentralDirectoryLocatorRead();
+ if (zRet != ZipReturn.ZipGood)
+ {
+ ZipFileClose();
+ return zRet;
+ }
+ _zipFs.Position = (long)_endOfCenterDir64;
+ zRet = Zip64EndOfCentralDirRead();
+ if (zRet != ZipReturn.ZipGood)
+ {
+ ZipFileClose();
+ return zRet;
+ }
+ }
+
+ bool trrntzip = false;
+
+ // check if the ZIP has a valid TorrentZip file comment
+ if (_fileComment.Length == 22)
+ {
+ if (GetString(_fileComment).Substring(0, 14) == "TORRENTZIPPED-")
+ {
+ CrcCalculatorStream crcCs = new CrcCalculatorStream(_zipFs, true);
+ byte[] buffer = new byte[_centerDirSize];
+ _zipFs.Position = (long)_centerDirStart;
+ crcCs.Read(buffer, 0, (int)_centerDirSize);
+ crcCs.Flush();
+ crcCs.Close();
+
+ uint r = (uint)crcCs.Crc;
+ crcCs.Dispose();
+
+ string tcrc = GetString(_fileComment).Substring(14, 8);
+ string zcrc = r.ToString("X8");
+ if (string.Compare(tcrc, zcrc, StringComparison.Ordinal) == 0)
+ {
+ trrntzip = true;
+ }
+ }
+ }
+
+
+ // now read the central directory
+ _zipFs.Position = (long)_centerDirStart;
+
+ _localFiles.Clear();
+ _localFiles.Capacity = (int)_localFilesCount;
+ for (int i = 0; i < _localFilesCount; i++)
+ {
+ LocalFile lc = new LocalFile();
+ zRet = lc.CenteralDirectoryRead(_zipFs);
+ if (zRet != ZipReturn.ZipGood)
+ {
+ ZipFileClose();
+ return zRet;
+ }
+ _zip64 |= lc.Zip64;
+ _localFiles.Add(lc);
+ }
+
+ for (int i = 0; i < _localFilesCount; i++)
+ {
+ zRet = _localFiles[i].LocalFileHeaderRead(_zipFs);
+ if (zRet != ZipReturn.ZipGood)
+ {
+ ZipFileClose();
+ return zRet;
+ }
+ trrntzip &= _localFiles[i].TrrntZip;
+ }
+
+ // check trrntzip file order
+ if (trrntzip)
+ {
+ for (int i = 0; i < _localFilesCount - 1; i++)
+ {
+ if (TrrntZipStringCompare(_localFiles[i].FileName, _localFiles[i + 1].FileName) < 0)
+ {
+ continue;
+ }
+ trrntzip = false;
+ break;
+ }
+ }
+
+ // check trrntzip directories
+ if (trrntzip)
+ {
+ for (int i = 0; i < _localFilesCount - 1; i++)
+ {
+ // see if we found a directory
+ string filename0 = _localFiles[i].FileName;
+ if (filename0.Substring(filename0.Length - 1, 1) != "/")
+ {
+ continue;
+ }
+
+ // see if the next file is in that directory
+ string filename1 = _localFiles[i + 1].FileName;
+ if (filename1.Length <= filename0.Length)
+ {
+ continue;
+ }
+ if (TrrntZipStringCompare(filename0, filename1.Substring(0, filename0.Length)) != 0)
+ {
+ continue;
+ }
+
+ // if we found a file in the directory then we do not need the directory entry
+ trrntzip = false;
+ break;
+ }
+ }
+
+ if (trrntzip)
+ {
+ ZipStatus |= ZipStatus.TrrntZip;
+ }
+
+ return ZipReturn.ZipGood;
+ }
+ catch
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorReadingFile;
+ }
+ }
+
+ public void ZipCreateFake()
+ {
+ if (ZipOpen != ZipOpenType.Closed)
+ {
+ return;
+ }
+
+ ZipOpen = ZipOpenType.OpenFakeWrite;
+ }
+
+ public void ZipFileCloseFake(ulong fileOffset, out byte[] centeralDir)
+ {
+ centeralDir = null;
+ if (ZipOpen != ZipOpenType.OpenFakeWrite)
+ {
+ return;
+ }
+
+ _zip64 = false;
+ bool lTrrntzip = true;
+
+ _zipFs = new MemoryStream();
+
+ _centerDirStart = fileOffset;
+ if (_centerDirStart >= 0xffffffff)
+ {
+ _zip64 = true;
+ }
+
+ CrcCalculatorStream crcCs = new CrcCalculatorStream(_zipFs, true);
+
+ foreach (LocalFile t in _localFiles)
+ {
+ t.CenteralDirectoryWrite(crcCs);
+ _zip64 |= t.Zip64;
+ lTrrntzip &= t.TrrntZip;
+ }
+
+ crcCs.Flush();
+ crcCs.Close();
+
+ _centerDirSize = (ulong)_zipFs.Position;
+
+ _fileComment = lTrrntzip ? GetBytes("TORRENTZIPPED-" + crcCs.Crc.ToString("X8")) : new byte[0];
+ ZipStatus = lTrrntzip ? ZipStatus.TrrntZip : ZipStatus.None;
+
+ crcCs.Dispose();
+
+ if (_zip64)
+ {
+ _endOfCenterDir64 = fileOffset + (ulong)_zipFs.Position;
+ Zip64EndOfCentralDirWrite();
+ Zip64EndOfCentralDirectoryLocatorWrite();
+ }
+ EndOfCentralDirWrite();
+
+ centeralDir = ((MemoryStream)_zipFs).ToArray();
+ _zipFs.Close();
+ _zipFs.Dispose();
+ ZipOpen = ZipOpenType.Closed;
+ }
+
+
+
+ public ZipReturn ZipFileOpenReadStream(int index, out Stream stream, out ulong streamSize)
+ {
+ return ZipFileOpenReadStream(index, false, out stream, out streamSize, out ushort _);
+ }
+
+ public ZipReturn ZipFileOpenReadStream(int index, bool raw, out Stream stream, out ulong streamSize, out ushort compressionMethod)
+ {
+ ZipFileCloseReadStream();
+
+ streamSize = 0;
+ compressionMethod = 0;
+ stream = null;
+ if (ZipOpen != ZipOpenType.OpenRead)
+ {
+ return ZipReturn.ZipReadingFromOutputFile;
+ }
+
+ ZipReturn zRet = _localFiles[index].LocalFileHeaderRead(_zipFs);
+ if (zRet != ZipReturn.ZipGood)
+ {
+ ZipFileClose();
+ return zRet;
+ }
+
+ zRet = _localFiles[index].LocalFileOpenReadStream(_zipFs, raw, out stream, out streamSize, out compressionMethod);
+ _compressionStream = stream;
+ return zRet;
+ }
+
+ public ZipReturn ZipFileOpenReadStreamQuick(ulong pos, bool raw, out Stream stream, out ulong streamSize, out ushort compressionMethod)
+ {
+ ZipFileCloseReadStream();
+
+ LocalFile tmpFile = new LocalFile { LocalFilePos = pos };
+ _localFiles.Clear();
+ _localFiles.Add(tmpFile);
+ ZipReturn zRet = tmpFile.LocalFileHeaderReadQuick(_zipFs);
+ if (zRet != ZipReturn.ZipGood)
+ {
+ stream = null;
+ streamSize = 0;
+ compressionMethod = 0;
+ return zRet;
+ }
+
+ zRet = tmpFile.LocalFileOpenReadStream(_zipFs, raw, out stream, out streamSize, out compressionMethod);
+ _compressionStream = stream;
+ return zRet;
+ }
+
+ public ZipReturn ZipFileAddFake(string filename, ulong fileOffset, ulong uncompressedSize, ulong compressedSize, byte[] crc32, out byte[] localHeader)
+ {
+ localHeader = null;
+
+ if (ZipOpen != ZipOpenType.OpenFakeWrite)
+ {
+ return ZipReturn.ZipWritingToInputFile;
+ }
+
+ LocalFile lf = new LocalFile(filename);
+ _localFiles.Add(lf);
+
+ MemoryStream ms = new MemoryStream();
+ lf.LocalFileHeaderFake(fileOffset, uncompressedSize, compressedSize, crc32, ms);
+
+ localHeader = ms.ToArray();
+ ms.Close();
+
+ return ZipReturn.ZipGood;
+ }
+
+ public static void CreateDirForFile(string sFilename)
+ {
+ string strTemp = Path.GetDirectoryName(sFilename);
+
+ if (string.IsNullOrEmpty(strTemp))
+ {
+ return;
+ }
+
+ if (Directory.Exists(strTemp))
+ {
+ return;
+ }
+
+
+ while (strTemp.Length > 0 && !Directory.Exists(strTemp))
+ {
+ int pos = strTemp.LastIndexOf(Path.DirectorySeparatorChar);
+ if (pos < 0)
+ {
+ pos = 0;
+ }
+ strTemp = strTemp.Substring(0, pos);
+ }
+
+ while (sFilename.IndexOf(Path.DirectorySeparatorChar, strTemp.Length + 1) > 0)
+ {
+ strTemp = sFilename.Substring(0, sFilename.IndexOf(Path.DirectorySeparatorChar, strTemp.Length + 1));
+ Directory.CreateDirectory(strTemp);
+ }
+ }
+
+
+ public static string ZipErrorMessageText(ZipReturn zS)
+ {
+ string ret = "Unknown";
+ switch (zS)
+ {
+ case ZipReturn.ZipGood:
+ ret = "";
+ break;
+ case ZipReturn.ZipFileCountError:
+ ret = "The number of file in the Zip does not mach the number of files in the Zips Centeral Directory";
+ break;
+ case ZipReturn.ZipSignatureError:
+ ret = "An unknown Signature Block was found in the Zip";
+ break;
+ case ZipReturn.ZipExtraDataOnEndOfZip:
+ ret = "Extra Data was found on the end of the Zip";
+ break;
+ case ZipReturn.ZipUnsupportedCompression:
+ ret = "An unsupported Compression method was found in the Zip, if you recompress this zip it will be usable";
+ break;
+ case ZipReturn.ZipLocalFileHeaderError:
+ ret = "Error reading a zipped file header information";
+ break;
+ case ZipReturn.ZipCentralDirError:
+ ret = "There is an error in the Zip Centeral Directory";
+ break;
+ case ZipReturn.ZipReadingFromOutputFile:
+ ret = "Trying to write to a Zip file open for output only";
+ break;
+ case ZipReturn.ZipWritingToInputFile:
+ ret = "Tring to read from a Zip file open for input only";
+ break;
+ case ZipReturn.ZipErrorGettingDataStream:
+ ret = "Error creating Data Stream";
+ break;
+ case ZipReturn.ZipCRCDecodeError:
+ ret = "CRC error";
+ break;
+ case ZipReturn.ZipDecodeError:
+ ret = "Error unzipping a file";
+ break;
+ }
+
+ return ret;
+ }
+
+ private static byte[] GetBytes(string s)
+ {
+ char[] c = s.ToCharArray();
+ byte[] b = new byte[c.Length];
+ for (int i = 0; i < c.Length; i++)
+ {
+ char t = c[i];
+ b[i] = t > 255 ? (byte)'?' : (byte)c[i];
+ }
+ return b;
+ }
+
+ private static bool IsUnicode(string s)
+ {
+ char[] charArr = s.ToCharArray();
+ foreach (char ch in charArr)
+ {
+ if (ch > 127)
+ {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private static string GetString(byte[] byteArr)
+ {
+ string s = "";
+ foreach (byte by in byteArr)
+ {
+ s += (char)by;
+ }
+ return s;
+ }
+
+ private static bool CompareString(string s1, string s2)
+ {
+ char[] c1 = s1.ToCharArray();
+ char[] c2 = s2.ToCharArray();
+
+ if (c1.Length != c2.Length)
+ {
+ return false;
+ }
+
+ for (int i = 0; i < c1.Length; i++)
+ {
+ if (c1[i] != c2[i])
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+
+
+ private static bool ByteArrCompare(byte[] b0, byte[] b1)
+ {
+ if ((b0 == null) || (b1 == null))
+ {
+ return false;
+ }
+ if (b0.Length != b1.Length)
+ {
+ return false;
+ }
+
+ for (int i = 0; i < b0.Length; i++)
+ {
+ if (b0[i] != b1[i])
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public static int TrrntZipStringCompare(string string1, string string2)
+ {
+ char[] bytes1 = string1.ToCharArray();
+ char[] bytes2 = string2.ToCharArray();
+
+ int pos1 = 0;
+ int pos2 = 0;
+
+ for (; ; )
+ {
+ if (pos1 == bytes1.Length)
+ {
+ return pos2 == bytes2.Length ? 0 : -1;
+ }
+ if (pos2 == bytes2.Length)
+ {
+ return 1;
+ }
+
+ int byte1 = bytes1[pos1++];
+ int byte2 = bytes2[pos2++];
+
+ if (byte1 >= 65 && byte1 <= 90)
+ {
+ byte1 += 0x20;
+ }
+ if (byte2 >= 65 && byte2 <= 90)
+ {
+ byte2 += 0x20;
+ }
+
+ if (byte1 < byte2)
+ {
+ return -1;
+ }
+ if (byte1 > byte2)
+ {
+ return 1;
+ }
+ }
+ }
+
+ private class LocalFile
+ {
+ private ushort _compressionMethod;
+ private ushort _lastModFileTime;
+ private ushort _lastModFileDate;
+ private ulong _compressedSize;
+ public ulong RelativeOffsetOfLocalHeader; // only in centeral directory
+
+ private ulong _crc32Location;
+ private ulong _extraLocation;
+ private ulong _dataLocation;
+
+ public LocalFile()
+ {
+ }
+
+ public LocalFile(string filename)
+ {
+ Zip64 = false;
+ GeneralPurposeBitFlag = 2; // Maximum Compression Deflating
+ _compressionMethod = 8; // Compression Method Deflate
+ _lastModFileTime = 48128;
+ _lastModFileDate = 8600;
+
+ FileName = filename;
+ }
+
+ public string FileName { get; private set; }
+ public ushort GeneralPurposeBitFlag { get; private set; }
+ public byte[] CRC { get; private set; }
+ public ulong UncompressedSize { get; private set; }
+
+ public bool Zip64 { get; private set; }
+ public bool TrrntZip { get; private set; }
+
+ public DateTime DateTime
+ {
+ get
+ {
+ int second = (_lastModFileTime & 0x1f) * 2;
+ int minute = (_lastModFileTime >> 5) & 0x3f;
+ int hour = (_lastModFileTime >> 11) & 0x1f;
+
+ int day = _lastModFileDate & 0x1f;
+ int month = (_lastModFileDate >> 5) & 0x0f;
+ int year = ((_lastModFileDate >> 9) & 0x7f) + 1980;
+
+ return new DateTime(year, month, day, hour, minute, second);
+ }
+ }
+
+ public ulong LocalFilePos
+ {
+ get => RelativeOffsetOfLocalHeader;
+ set => RelativeOffsetOfLocalHeader = value;
+ }
+
+
+ public ZipReturn CenteralDirectoryRead(Stream zipFs)
+ {
+ try
+ {
+ using (BinaryReader br = new BinaryReader(zipFs, Encoding.UTF8, true))
+ {
+ uint thisSignature = br.ReadUInt32();
+ if (thisSignature != CentralDirectoryHeaderSigniature)
+ {
+ return ZipReturn.ZipCentralDirError;
+ }
+
+ br.ReadUInt16(); // Version Made By
+
+ br.ReadUInt16(); // Version Needed To Extract
+
+
+ GeneralPurposeBitFlag = br.ReadUInt16();
+ _compressionMethod = br.ReadUInt16();
+ if (_compressionMethod != 8 && _compressionMethod != 0)
+ {
+ return ZipReturn.ZipUnsupportedCompression;
+ }
+
+ _lastModFileTime = br.ReadUInt16();
+ _lastModFileDate = br.ReadUInt16();
+ CRC = ReadCRC(br);
+
+ _compressedSize = br.ReadUInt32();
+ UncompressedSize = br.ReadUInt32();
+
+ ushort fileNameLength = br.ReadUInt16();
+ ushort extraFieldLength = br.ReadUInt16();
+ ushort fileCommentLength = br.ReadUInt16();
+
+ br.ReadUInt16(); // diskNumberStart
+ br.ReadUInt16(); // internalFileAttributes
+ br.ReadUInt32(); // externalFileAttributes
+
+ RelativeOffsetOfLocalHeader = br.ReadUInt32();
+
+ byte[] bFileName = br.ReadBytes(fileNameLength);
+ FileName = (GeneralPurposeBitFlag & (1 << 11)) == 0
+ ? GetString(bFileName)
+ : Encoding.UTF8.GetString(bFileName, 0, fileNameLength);
+
+ byte[] extraField = br.ReadBytes(extraFieldLength);
+ br.ReadBytes(fileCommentLength); // File Comments
+
+ int pos = 0;
+ while (extraFieldLength > pos)
+ {
+ ushort type = BitConverter.ToUInt16(extraField, pos);
+ pos += 2;
+ ushort blockLength = BitConverter.ToUInt16(extraField, pos);
+ pos += 2;
+ switch (type)
+ {
+ case 0x0001:
+ Zip64 = true;
+ if (UncompressedSize == 0xffffffff)
+ {
+ UncompressedSize = BitConverter.ToUInt64(extraField, pos);
+ pos += 8;
+ }
+
+ if (_compressedSize == 0xffffffff)
+ {
+ _compressedSize = BitConverter.ToUInt64(extraField, pos);
+ pos += 8;
+ }
+
+ if (RelativeOffsetOfLocalHeader == 0xffffffff)
+ {
+ RelativeOffsetOfLocalHeader = BitConverter.ToUInt64(extraField, pos);
+ pos += 8;
+ }
+
+ break;
+ case 0x7075:
+ //byte version = extraField[pos];
+ pos += 1;
+ uint nameCRC32 = BitConverter.ToUInt32(extraField, pos);
+ pos += 4;
+
+ CRC crcTest = new CRC();
+ crcTest.SlurpBlock(bFileName, 0, fileNameLength);
+ uint fCRC = crcTest.Crc32ResultU;
+
+ if (nameCRC32 != fCRC)
+ {
+ return ZipReturn.ZipCentralDirError;
+ }
+
+ int charLen = blockLength - 5;
+
+ FileName = Encoding.UTF8.GetString(extraField, pos, charLen);
+ pos += charLen;
+
+ break;
+ default:
+ pos += blockLength;
+ break;
+ }
+ }
+
+ return ZipReturn.ZipGood;
+ }
+ }
+ catch
+ {
+ return ZipReturn.ZipCentralDirError;
+ }
+ }
+
+ public void CenteralDirectoryWrite(Stream crcStream)
+ {
+ using (BinaryWriter bw = new BinaryWriter(crcStream, Encoding.UTF8, true))
+ {
+ const uint header = 0x2014B50;
+
+ List extraField = new List();
+
+ uint cdUncompressedSize;
+ if (UncompressedSize >= 0xffffffff)
+ {
+ Zip64 = true;
+ cdUncompressedSize = 0xffffffff;
+ extraField.AddRange(BitConverter.GetBytes(UncompressedSize));
+ }
+ else
+ {
+ cdUncompressedSize = (uint)UncompressedSize;
+ }
+
+ uint cdCompressedSize;
+ if (_compressedSize >= 0xffffffff)
+ {
+ Zip64 = true;
+ cdCompressedSize = 0xffffffff;
+ extraField.AddRange(BitConverter.GetBytes(_compressedSize));
+ }
+ else
+ {
+ cdCompressedSize = (uint)_compressedSize;
+ }
+
+ uint cdRelativeOffsetOfLocalHeader;
+ if (RelativeOffsetOfLocalHeader >= 0xffffffff)
+ {
+ Zip64 = true;
+ cdRelativeOffsetOfLocalHeader = 0xffffffff;
+ extraField.AddRange(BitConverter.GetBytes(RelativeOffsetOfLocalHeader));
+ }
+ else
+ {
+ cdRelativeOffsetOfLocalHeader = (uint)RelativeOffsetOfLocalHeader;
+ }
+
+
+ if (extraField.Count > 0)
+ {
+ ushort exfl = (ushort)extraField.Count;
+ extraField.InsertRange(0, BitConverter.GetBytes((ushort)0x0001));
+ extraField.InsertRange(2, BitConverter.GetBytes(exfl));
+ }
+
+ ushort extraFieldLength = (ushort)extraField.Count;
+
+ byte[] bFileName;
+ if (IsUnicode(FileName))
+ {
+ GeneralPurposeBitFlag |= 1 << 11;
+ bFileName = Encoding.UTF8.GetBytes(FileName);
+ }
+ else
+ {
+ bFileName = GetBytes(FileName);
+ }
+
+ ushort fileNameLength = (ushort)bFileName.Length;
+
+ ushort versionNeededToExtract = (ushort)(Zip64 ? 45 : 20);
+
+ bw.Write(header);
+ bw.Write((ushort)0);
+ bw.Write(versionNeededToExtract);
+ bw.Write(GeneralPurposeBitFlag);
+ bw.Write(_compressionMethod);
+ bw.Write(_lastModFileTime);
+ bw.Write(_lastModFileDate);
+ bw.Write(CRC[3]);
+ bw.Write(CRC[2]);
+ bw.Write(CRC[1]);
+ bw.Write(CRC[0]);
+ bw.Write(cdCompressedSize);
+ bw.Write(cdUncompressedSize);
+ bw.Write(fileNameLength);
+ bw.Write(extraFieldLength);
+ bw.Write((ushort)0); // file comment length
+ bw.Write((ushort)0); // disk number start
+ bw.Write((ushort)0); // internal file attributes
+ bw.Write((uint)0); // external file attributes
+ bw.Write(cdRelativeOffsetOfLocalHeader);
+
+ bw.Write(bFileName, 0, fileNameLength);
+ bw.Write(extraField.ToArray(), 0, extraFieldLength);
+ // No File Comment
+ }
+ }
+ public ZipReturn LocalFileHeaderRead(Stream zipFs)
+ {
+ try
+ {
+ using (BinaryReader br = new BinaryReader(zipFs, Encoding.UTF8, true))
+ {
+
+ TrrntZip = true;
+
+ zipFs.Position = (long)RelativeOffsetOfLocalHeader;
+ uint thisSignature = br.ReadUInt32();
+ if (thisSignature != LocalFileHeaderSignature)
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+
+ br.ReadUInt16(); // version needed to extract
+ ushort generalPurposeBitFlagLocal = br.ReadUInt16();
+ if (generalPurposeBitFlagLocal != GeneralPurposeBitFlag)
+ {
+ TrrntZip = false;
+ }
+
+ ushort tshort = br.ReadUInt16();
+ if (tshort != _compressionMethod)
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+
+ tshort = br.ReadUInt16();
+ if (tshort != _lastModFileTime)
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+
+ tshort = br.ReadUInt16();
+ if (tshort != _lastModFileDate)
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+
+ byte[] tCRC = ReadCRC(br);
+ ulong tCompressedSize = br.ReadUInt32();
+ ulong tUnCompressedSize = br.ReadUInt32();
+
+ ushort fileNameLength = br.ReadUInt16();
+ ushort extraFieldLength = br.ReadUInt16();
+
+
+ byte[] bFileName = br.ReadBytes(fileNameLength);
+ string tFileName = (generalPurposeBitFlagLocal & (1 << 11)) == 0
+ ? GetString(bFileName)
+ : Encoding.UTF8.GetString(bFileName, 0, fileNameLength);
+
+ byte[] extraField = br.ReadBytes(extraFieldLength);
+
+
+ Zip64 = false;
+ int pos = 0;
+ while (extraFieldLength > pos)
+ {
+ ushort type = BitConverter.ToUInt16(extraField, pos);
+ pos += 2;
+ ushort blockLength = BitConverter.ToUInt16(extraField, pos);
+ pos += 2;
+ switch (type)
+ {
+ case 0x0001:
+ Zip64 = true;
+ if (tUnCompressedSize == 0xffffffff)
+ {
+ tUnCompressedSize = BitConverter.ToUInt64(extraField, pos);
+ pos += 8;
+ }
+
+ if (tCompressedSize == 0xffffffff)
+ {
+ tCompressedSize = BitConverter.ToUInt64(extraField, pos);
+ pos += 8;
+ }
+
+ break;
+ case 0x7075:
+ //byte version = extraField[pos];
+ pos += 1;
+ uint nameCRC32 = BitConverter.ToUInt32(extraField, pos);
+ pos += 4;
+
+ CRC crcTest = new CRC();
+ crcTest.SlurpBlock(bFileName, 0, fileNameLength);
+ uint fCRC = crcTest.Crc32ResultU;
+
+ if (nameCRC32 != fCRC)
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+
+ int charLen = blockLength - 5;
+
+ tFileName = Encoding.UTF8.GetString(extraField, pos, charLen);
+ pos += charLen;
+
+ break;
+ default:
+ pos += blockLength;
+ break;
+ }
+ }
+
+ if (!CompareString(FileName, tFileName))
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+
+ _dataLocation = (ulong)zipFs.Position;
+
+ if ((GeneralPurposeBitFlag & 8) == 8)
+ {
+ zipFs.Position += (long)_compressedSize;
+
+ tCRC = ReadCRC(br);
+ if (!ByteArrCompare(tCRC, new byte[] { 0x50, 0x4b, 0x07, 0x08 }))
+ {
+ tCRC = ReadCRC(br);
+ }
+
+ tCompressedSize = br.ReadUInt32();
+ tUnCompressedSize = br.ReadUInt32();
+ }
+
+
+
+ if (!ByteArrCompare(tCRC, CRC))
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+
+ if (tCompressedSize != _compressedSize)
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+
+ if (tUnCompressedSize != UncompressedSize)
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+
+ return ZipReturn.ZipGood;
+ }
+ }
+ catch
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+ }
+
+ public ZipReturn LocalFileHeaderReadQuick(Stream zipFs)
+ {
+ try
+ {
+
+ using (BinaryReader br = new BinaryReader(zipFs, Encoding.UTF8, true))
+ {
+ TrrntZip = true;
+
+
+ zipFs.Position = (long)RelativeOffsetOfLocalHeader;
+ uint thisSignature = br.ReadUInt32();
+ if (thisSignature != LocalFileHeaderSignature)
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+
+ br.ReadUInt16(); // version needed to extract
+ GeneralPurposeBitFlag = br.ReadUInt16();
+ if ((GeneralPurposeBitFlag & 8) == 8)
+ {
+ return ZipReturn.ZipCannotFastOpen;
+ }
+
+ _compressionMethod = br.ReadUInt16();
+ _lastModFileTime = br.ReadUInt16();
+ _lastModFileDate = br.ReadUInt16();
+ CRC = ReadCRC(br);
+ _compressedSize = br.ReadUInt32();
+ UncompressedSize = br.ReadUInt32();
+
+ ushort fileNameLength = br.ReadUInt16();
+ ushort extraFieldLength = br.ReadUInt16();
+
+ byte[] bFileName = br.ReadBytes(fileNameLength);
+
+ FileName = (GeneralPurposeBitFlag & (1 << 11)) == 0
+ ? GetString(bFileName)
+ : Encoding.UTF8.GetString(bFileName, 0, fileNameLength);
+
+ byte[] extraField = br.ReadBytes(extraFieldLength);
+
+ Zip64 = false;
+ int pos = 0;
+ while (extraFieldLength > pos)
+ {
+ ushort type = BitConverter.ToUInt16(extraField, pos);
+ pos += 2;
+ ushort blockLength = BitConverter.ToUInt16(extraField, pos);
+ pos += 2;
+ switch (type)
+ {
+ case 0x0001:
+ Zip64 = true;
+ if (UncompressedSize == 0xffffffff)
+ {
+ UncompressedSize = BitConverter.ToUInt64(extraField, pos);
+ pos += 8;
+ }
+
+ if (_compressedSize == 0xffffffff)
+ {
+ _compressedSize = BitConverter.ToUInt64(extraField, pos);
+ pos += 8;
+ }
+
+ break;
+ case 0x7075:
+ pos += 1;
+ uint nameCRC32 = BitConverter.ToUInt32(extraField, pos);
+ pos += 4;
+
+ CRC crcTest = new CRC();
+ crcTest.SlurpBlock(bFileName, 0, fileNameLength);
+ uint fCRC = crcTest.Crc32ResultU;
+
+ if (nameCRC32 != fCRC)
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+
+ int charLen = blockLength - 5;
+
+ FileName = Encoding.UTF8.GetString(extraField, pos, charLen);
+
+ pos += charLen;
+
+ break;
+ default:
+ pos += blockLength;
+ break;
+ }
+ }
+
+ _dataLocation = (ulong)zipFs.Position;
+ return ZipReturn.ZipGood;
+ }
+ }
+ catch
+ {
+ return ZipReturn.ZipLocalFileHeaderError;
+ }
+ }
+
+
+ private void LocalFileHeaderWrite(Stream zipFs)
+ {
+ using (BinaryWriter bw = new BinaryWriter(zipFs, Encoding.UTF8, true))
+ {
+ Zip64 = UncompressedSize >= 0xffffffff;
+
+ byte[] bFileName;
+ if (IsUnicode(FileName))
+ {
+ GeneralPurposeBitFlag |= 1 << 11;
+ bFileName = Encoding.UTF8.GetBytes(FileName);
+ }
+ else
+ {
+ bFileName = GetBytes(FileName);
+ }
+
+ ushort versionNeededToExtract = (ushort)(Zip64 ? 45 : 20);
+
+ RelativeOffsetOfLocalHeader = (ulong)zipFs.Position;
+ const uint header = 0x4034B50;
+ bw.Write(header);
+ bw.Write(versionNeededToExtract);
+ bw.Write(GeneralPurposeBitFlag);
+ bw.Write(_compressionMethod);
+ bw.Write(_lastModFileTime);
+ bw.Write(_lastModFileDate);
+
+ _crc32Location = (ulong)zipFs.Position;
+
+ // these 3 values will be set correctly after the file data has been written
+ bw.Write(0xffffffff);
+ bw.Write(0xffffffff);
+ bw.Write(0xffffffff);
+
+ ushort fileNameLength = (ushort)bFileName.Length;
+ bw.Write(fileNameLength);
+
+ ushort extraFieldLength = (ushort)(Zip64 ? 20 : 0);
+ bw.Write(extraFieldLength);
+
+ bw.Write(bFileName, 0, fileNameLength);
+
+ _extraLocation = (ulong)zipFs.Position;
+ if (Zip64)
+ bw.Write(new byte[20], 0, extraFieldLength);
+ }
+ }
+
+ public void LocalFileHeaderFake(ulong filePosition, ulong uncompressedSize, ulong compressedSize, byte[] crc32, MemoryStream ms)
+ {
+ using (BinaryWriter bw = new BinaryWriter(ms, Encoding.UTF8, true))
+ {
+ RelativeOffsetOfLocalHeader = filePosition;
+ TrrntZip = true;
+ UncompressedSize = uncompressedSize;
+ _compressedSize = compressedSize;
+ CRC = crc32;
+
+ Zip64 = UncompressedSize >= 0xffffffff || _compressedSize >= 0xffffffff;
+
+ byte[] bFileName;
+ if (IsUnicode(FileName))
+ {
+ GeneralPurposeBitFlag |= 1 << 11;
+ bFileName = Encoding.UTF8.GetBytes(FileName);
+ }
+ else
+ {
+ bFileName = GetBytes(FileName);
+ }
+
+ ushort versionNeededToExtract = (ushort)(Zip64 ? 45 : 20);
+
+ const uint header = 0x4034B50;
+ bw.Write(header);
+ bw.Write(versionNeededToExtract);
+ bw.Write(GeneralPurposeBitFlag);
+ bw.Write(_compressionMethod);
+ bw.Write(_lastModFileTime);
+ bw.Write(_lastModFileDate);
+
+ uint tCompressedSize;
+ uint tUncompressedSize;
+ if (Zip64)
+ {
+ tCompressedSize = 0xffffffff;
+ tUncompressedSize = 0xffffffff;
+ }
+ else
+ {
+ tCompressedSize = (uint)_compressedSize;
+ tUncompressedSize = (uint)UncompressedSize;
+ }
+
+ bw.Write(CRC[3]);
+ bw.Write(CRC[2]);
+ bw.Write(CRC[1]);
+ bw.Write(CRC[0]);
+ bw.Write(tCompressedSize);
+ bw.Write(tUncompressedSize);
+
+ ushort fileNameLength = (ushort)bFileName.Length;
+ bw.Write(fileNameLength);
+
+ ushort extraFieldLength = (ushort)(Zip64 ? 20 : 0);
+ bw.Write(extraFieldLength);
+
+ bw.Write(bFileName, 0, fileNameLength);
+
+ if (Zip64)
+ {
+ bw.Write((ushort)0x0001); // id
+ bw.Write((ushort)16); // data length
+ bw.Write(UncompressedSize);
+ bw.Write(_compressedSize);
+ }
+ }
+ }
+ public ZipReturn LocalFileOpenReadStream(Stream zipFs, bool raw, out Stream readStream, out ulong streamSize, out ushort compressionMethod)
+ {
+ streamSize = 0;
+ compressionMethod = _compressionMethod;
+
+ readStream = null;
+ zipFs.Seek((long)_dataLocation, SeekOrigin.Begin);
+
+ switch (_compressionMethod)
+ {
+ case 8:
+ if (raw)
+ {
+ readStream = zipFs;
+ streamSize = _compressedSize;
+ }
+ else
+ {
+ readStream = new ZlibBaseStream(zipFs, CompressionMode.Decompress, CompressionLevel.Default, ZlibStreamFlavor.DEFLATE, true);
+ streamSize = UncompressedSize;
+ }
+ break;
+ case 0:
+ readStream = zipFs;
+ streamSize = _compressedSize; // same as UncompressedSize
+ break;
+ }
+
+ return readStream == null ? ZipReturn.ZipErrorGettingDataStream : ZipReturn.ZipGood;
+ }
+
+ public ZipReturn LocalFileOpenWriteStream(Stream zipFs, bool raw, bool trrntZip, ulong uncompressedSize, ushort compressionMethod, out Stream writeStream)
+ {
+ UncompressedSize = uncompressedSize;
+ _compressionMethod = compressionMethod;
+
+ LocalFileHeaderWrite(zipFs);
+ _dataLocation = (ulong)zipFs.Position;
+
+ if (raw)
+ {
+ writeStream = zipFs;
+ TrrntZip = trrntZip;
+ }
+ else
+ {
+ if (compressionMethod == 0)
+ {
+ writeStream = zipFs;
+ TrrntZip = false;
+ }
+ else
+ {
+ writeStream = new ZlibBaseStream(zipFs, CompressionMode.Compress, CompressionLevel.BestCompression, ZlibStreamFlavor.DEFLATE, true);
+ TrrntZip = true;
+ }
+ }
+
+ return writeStream == null ? ZipReturn.ZipErrorGettingDataStream : ZipReturn.ZipGood;
+ }
+
+ public ZipReturn LocalFileCloseWriteStream(Stream zipFs, byte[] crc32)
+ {
+ _compressedSize = (ulong)zipFs.Position - _dataLocation;
+
+ if (_compressedSize == 0 && UncompressedSize == 0)
+ {
+ LocalFileAddDirectory(zipFs);
+ _compressedSize = (ulong)zipFs.Position - _dataLocation;
+ }
+
+ CRC = crc32;
+ WriteCompressedSize(zipFs);
+
+ return ZipReturn.ZipGood;
+ }
+
+ private void FixFileForZip64(Stream zipFs)
+ {
+ long posNow = zipFs.Position;
+ using (BinaryWriter bw = new BinaryWriter(zipFs, Encoding.UTF8, true))
+ {
+ // _crc32Loction - 10 needs set to 45
+ zipFs.Seek((long)_crc32Location - 10, SeekOrigin.Begin);
+ ushort versionNeededToExtract = 45;
+ bw.Write(versionNeededToExtract);
+
+ zipFs.Seek((long)_crc32Location + 14, SeekOrigin.Begin);
+ ushort extraFieldLength = 20;
+ bw.Write(extraFieldLength);
+ }
+ ExpandFile(zipFs, (long)_extraLocation, posNow, 20);
+ zipFs.Position = posNow + 20;
+ }
+
+ private static void ExpandFile(Stream stream, long offset, long length, int extraBytes)
+ {
+ const int bufferSize = 40960;
+ byte[] buffer = new byte[bufferSize];
+ // Expand file
+ long pos = length;
+ while (pos > offset)
+ {
+ int toRead = pos - bufferSize >= offset ? bufferSize : (int)(pos - offset);
+ pos -= toRead;
+ stream.Position = pos;
+ stream.Read(buffer, 0, toRead);
+ stream.Position = pos + extraBytes;
+ stream.Write(buffer, 0, toRead);
+ }
+ }
+
+ private void WriteCompressedSize(Stream zipFs)
+ {
+ if (_compressedSize >= 0xffffffff && !Zip64)
+ {
+ Zip64 = true;
+ FixFileForZip64(zipFs);
+ }
+
+
+ long posNow = zipFs.Position;
+ zipFs.Seek((long)_crc32Location, SeekOrigin.Begin);
+ using (BinaryWriter bw = new BinaryWriter(zipFs, Encoding.UTF8, true))
+ {
+ uint tCompressedSize;
+ uint tUncompressedSize;
+ if (Zip64)
+ {
+ tCompressedSize = 0xffffffff;
+ tUncompressedSize = 0xffffffff;
+ }
+ else
+ {
+ tCompressedSize = (uint)_compressedSize;
+ tUncompressedSize = (uint)UncompressedSize;
+ }
+
+ bw.Write(CRC[3]);
+ bw.Write(CRC[2]);
+ bw.Write(CRC[1]);
+ bw.Write(CRC[0]);
+ bw.Write(tCompressedSize);
+ bw.Write(tUncompressedSize);
+
+
+ // also need to write extradata
+ if (Zip64)
+ {
+ zipFs.Seek((long)_extraLocation, SeekOrigin.Begin);
+ bw.Write((ushort)0x0001); // id
+ bw.Write((ushort)16); // data length
+ bw.Write(UncompressedSize);
+ bw.Write(_compressedSize);
+ }
+ }
+
+ zipFs.Seek(posNow, SeekOrigin.Begin);
+ }
+
+ public static void LocalFileAddDirectory(Stream zipFs)
+ {
+ zipFs.WriteByte(03);
+ zipFs.WriteByte(00);
+ }
+
+ private static byte[] ReadCRC(BinaryReader br)
+ {
+ byte[] tCRC = new byte[4];
+ tCRC[3] = br.ReadByte();
+ tCRC[2] = br.ReadByte();
+ tCRC[1] = br.ReadByte();
+ tCRC[0] = br.ReadByte();
+ return tCRC;
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Compress/gZip/gZip.cs b/SabreTools.Library/External/Compress/gZip/gZip.cs
new file mode 100644
index 00000000..7b5b04ff
--- /dev/null
+++ b/SabreTools.Library/External/Compress/gZip/gZip.cs
@@ -0,0 +1,458 @@
+using System;
+using System.IO;
+using System.Text;
+using Compress.ZipFile.ZLib;
+using Directory = RVIO.Directory;
+using FileInfo = RVIO.FileInfo;
+using FileStream = RVIO.FileStream;
+using Path = RVIO.Path;
+
+
+namespace Compress.gZip
+{
+ public class gZip : ICompress
+ {
+ private FileInfo _zipFileInfo;
+ private Stream _zipFs;
+ private Stream _compressionStream;
+
+ public byte[] CRC { get; private set; }
+ public ulong UnCompressedSize { get; private set; }
+ public ulong CompressedSize { get; private set; }
+
+ private long headerStartPos;
+ private long dataStartPos;
+
+ public int LocalFilesCount()
+ {
+ return 1;
+ }
+
+ public string Filename(int i)
+ {
+ return Path.GetFileName(ZipFilename);
+ }
+
+ public ulong? LocalHeader(int i)
+ {
+ return 0;
+ }
+
+ public ulong UncompressedSize(int i)
+ {
+ return UnCompressedSize;
+ }
+
+ public byte[] CRC32(int i)
+ {
+ return CRC;
+ }
+
+ public bool IsDirectory(int i)
+ {
+ return false;
+ }
+
+ public ZipOpenType ZipOpen { get; private set; }
+
+ public ZipReturn ZipFileOpen(string newFilename, long timestamp = -1, bool readHeaders = true)
+ {
+ ZipFileClose();
+ ZipStatus = ZipStatus.None;
+
+ try
+ {
+ if (!RVIO.File.Exists(newFilename))
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorFileNotFound;
+ }
+ _zipFileInfo = new FileInfo(newFilename);
+ if (timestamp != -1 && _zipFileInfo.LastWriteTime != timestamp)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorTimeStamp;
+ }
+ int errorCode = FileStream.OpenFileRead(newFilename, out _zipFs);
+ if (errorCode != 0)
+ {
+ ZipFileClose();
+ if (errorCode == 32)
+ {
+ return ZipReturn.ZipFileLocked;
+ }
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+ }
+ catch (PathTooLongException)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipFileNameToLong;
+ }
+ catch (IOException)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+ ZipOpen = ZipOpenType.OpenRead;
+
+ if (!readHeaders)
+ {
+ return ZipReturn.ZipGood;
+ }
+ return ZipFileReadHeaders();
+ }
+
+ public ZipReturn ZipFileOpen(Stream inStream)
+ {
+ ZipFileClose();
+ ZipStatus = ZipStatus.None;
+ _zipFileInfo = null;
+ _zipFs = inStream;
+
+ ZipOpen = ZipOpenType.OpenRead;
+ return ZipFileReadHeaders();
+ }
+
+ private ZipReturn ZipFileReadHeaders()
+ {
+ using (BinaryReader zipBr = new BinaryReader(_zipFs, Encoding.UTF8, true))
+ {
+
+ byte ID1 = zipBr.ReadByte();
+ byte ID2 = zipBr.ReadByte();
+
+ if ((ID1 != 0x1f) || (ID2 != 0x8b))
+ {
+ _zipFs.Close();
+ return ZipReturn.ZipSignatureError;
+ }
+
+ byte CM = zipBr.ReadByte();
+ if (CM != 8)
+ {
+ _zipFs.Close();
+ return ZipReturn.ZipUnsupportedCompression;
+ }
+
+ byte FLG = zipBr.ReadByte();
+
+
+ uint MTime = zipBr.ReadUInt32();
+ byte XFL = zipBr.ReadByte();
+ byte OS = zipBr.ReadByte();
+
+ ExtraData = null;
+ //if FLG.FEXTRA set
+ if ((FLG & 0x4) == 0x4)
+ {
+ int XLen = zipBr.ReadInt16();
+ ExtraData = zipBr.ReadBytes(XLen);
+
+ switch (XLen)
+ {
+ case 12:
+ CRC = new byte[4];
+ Array.Copy(ExtraData, 0, CRC, 0, 4);
+ UnCompressedSize = BitConverter.ToUInt64(ExtraData, 4);
+ break;
+ case 28:
+ CRC = new byte[4];
+ Array.Copy(ExtraData, 16, CRC, 0, 4);
+ UnCompressedSize = BitConverter.ToUInt64(ExtraData, 20);
+ break;
+ case 77:
+ CRC = new byte[4];
+ Array.Copy(ExtraData, 16, CRC, 0, 4);
+ UnCompressedSize = BitConverter.ToUInt64(ExtraData, 20);
+ break;
+ }
+ }
+
+ //if FLG.FNAME set
+ if ((FLG & 0x8) == 0x8)
+ {
+ int XLen = zipBr.ReadInt16();
+ byte[] bytes = zipBr.ReadBytes(XLen);
+ }
+
+ //if FLG.FComment set
+ if ((FLG & 0x10) == 0x10)
+ {
+ int XLen = zipBr.ReadInt16();
+ byte[] bytes = zipBr.ReadBytes(XLen);
+ }
+
+ //if FLG.FHCRC set
+ if ((FLG & 0x2) == 0x2)
+ {
+ uint crc16 = zipBr.ReadUInt16();
+ }
+
+ CompressedSize = (ulong) (_zipFs.Length - _zipFs.Position) - 8;
+
+ dataStartPos = _zipFs.Position;
+
+ _zipFs.Position = _zipFs.Length - 8;
+ byte[] gzcrc = zipBr.ReadBytes(4);
+ uint gzLength = zipBr.ReadUInt32();
+
+ if (CRC != null)
+ {
+ for (int i = 0; i < 4; i++)
+ {
+ if (gzcrc[3 - i] == CRC[i])
+ {
+ continue;
+ }
+
+ _zipFs.Close();
+ return ZipReturn.ZipDecodeError;
+ }
+ }
+ else
+ {
+ CRC = new[] {gzcrc[3], gzcrc[2], gzcrc[1], gzcrc[0]};
+ }
+
+ if (UnCompressedSize != 0)
+ {
+ if (gzLength != (UnCompressedSize & 0xffffffff))
+ {
+ _zipFs.Close();
+ return ZipReturn.ZipDecodeError;
+ }
+ }
+
+ return ZipReturn.ZipGood;
+ }
+ }
+
+ public void ZipFileClose()
+ {
+ if (ZipOpen == ZipOpenType.Closed)
+ {
+ return;
+ }
+
+ if (ZipOpen == ZipOpenType.OpenRead)
+ {
+ if (_zipFs != null)
+ {
+ _zipFs.Close();
+ _zipFs.Dispose();
+ }
+ ZipOpen = ZipOpenType.Closed;
+ return;
+ }
+
+ }
+
+ public ZipReturn ZipFileOpenReadStream(int index, out Stream stream, out ulong streamSize)
+ {
+ ZipFileCloseReadStream();
+
+ _zipFs.Position = dataStartPos;
+
+ _compressionStream = new ZlibBaseStream(_zipFs, CompressionMode.Decompress, CompressionLevel.Default, ZlibStreamFlavor.DEFLATE, true);
+ stream = _compressionStream;
+ streamSize = UnCompressedSize;
+
+ return ZipReturn.ZipGood;
+ }
+
+ public bool hasAltFileHeader;
+
+
+ public byte[] ExtraData;
+
+ public ZipReturn ZipFileOpenWriteStream(bool raw, bool trrntzip, string filename, ulong unCompressedSize, ushort compressionMethod, out Stream stream)
+ {
+ using (BinaryWriter zipBw = new BinaryWriter(_zipFs, Encoding.UTF8, true))
+ {
+ UnCompressedSize = unCompressedSize;
+
+ zipBw.Write((byte) 0x1f); // ID1 = 0x1f
+ zipBw.Write((byte) 0x8b); // ID2 = 0x8b
+ zipBw.Write((byte) 0x08); // CM = 0x08
+ zipBw.Write((byte) 0x04); // FLG = 0x04
+ zipBw.Write((uint) 0); // MTime = 0
+ zipBw.Write((byte) 0x00); // XFL = 0x00
+ zipBw.Write((byte) 0xff); // OS = 0x00
+
+ if (ExtraData == null)
+ {
+ zipBw.Write((short) 12);
+ headerStartPos = zipBw.BaseStream.Position;
+ zipBw.Write(new byte[12]);
+ }
+ else
+ {
+ zipBw.Write((short) ExtraData.Length); // XLEN 16+4+8+1+16+20+4+8
+ headerStartPos = zipBw.BaseStream.Position;
+ zipBw.Write(ExtraData);
+ }
+
+
+ dataStartPos = zipBw.BaseStream.Position;
+ stream = raw
+ ? _zipFs
+ : new ZlibBaseStream(_zipFs, CompressionMode.Compress, CompressionLevel.BestCompression, ZlibStreamFlavor.DEFLATE, true);
+
+ zipBw.Flush();
+ zipBw.Close();
+ }
+ return ZipReturn.ZipGood;
+ }
+
+ public ZipReturn ZipFileCloseReadStream()
+ {
+
+ if (_compressionStream == null)
+ return ZipReturn.ZipGood;
+ if (_compressionStream is ZlibBaseStream dfStream)
+ {
+ dfStream.Close();
+ dfStream.Dispose();
+ }
+ _compressionStream = null;
+
+ return ZipReturn.ZipGood;
+ }
+
+ public ZipStatus ZipStatus { get; private set; }
+
+ public string ZipFilename => _zipFileInfo != null ? _zipFileInfo.FullName : "";
+
+ public long TimeStamp => _zipFileInfo?.LastWriteTime ?? 0;
+
+ public void ZipFileAddDirectory()
+ {
+ throw new NotImplementedException();
+ }
+
+ public ZipReturn ZipFileCreate(string newFilename)
+ {
+ if (ZipOpen != ZipOpenType.Closed)
+ {
+ return ZipReturn.ZipFileAlreadyOpen;
+ }
+
+ CreateDirForFile(newFilename);
+ _zipFileInfo = new FileInfo(newFilename);
+
+ int errorCode = FileStream.OpenFileWrite(newFilename, out _zipFs);
+ if (errorCode != 0)
+ {
+ ZipFileClose();
+ return ZipReturn.ZipErrorOpeningFile;
+ }
+ ZipOpen = ZipOpenType.OpenWrite;
+ return ZipReturn.ZipGood;
+ }
+
+
+ public ZipReturn ZipFileCloseWriteStream(byte[] crc32)
+ {
+ using (BinaryWriter zipBw = new BinaryWriter(_zipFs,Encoding.UTF8,true))
+ {
+ CompressedSize = (ulong) (zipBw.BaseStream.Position - dataStartPos);
+
+ zipBw.Write(CRC[3]);
+ zipBw.Write(CRC[2]);
+ zipBw.Write(CRC[1]);
+ zipBw.Write(CRC[0]);
+ zipBw.Write((uint) UnCompressedSize);
+
+ long endpos = _zipFs.Position;
+
+ _zipFs.Position = headerStartPos;
+
+ if (ExtraData == null)
+ {
+ zipBw.Write(CRC); // 4 bytes
+ zipBw.Write(UnCompressedSize); // 8 bytes
+ }
+ else
+ {
+ zipBw.Write(ExtraData);
+ }
+
+ _zipFs.Position = endpos;
+
+ zipBw.Flush();
+ zipBw.Close();
+ }
+
+ _zipFs.Close();
+
+ return ZipReturn.ZipGood;
+ }
+
+ public ZipReturn ZipFileRollBack()
+ {
+ _zipFs.Position = dataStartPos;
+ return ZipReturn.ZipGood;
+ }
+
+ public void ZipFileCloseFailed()
+ {
+ if (ZipOpen == ZipOpenType.Closed)
+ {
+ return;
+ }
+
+ if (ZipOpen == ZipOpenType.OpenRead)
+ {
+ if (_zipFs != null)
+ {
+ _zipFs.Close();
+ _zipFs.Dispose();
+ }
+ ZipOpen = ZipOpenType.Closed;
+ return;
+ }
+
+ _zipFs.Flush();
+ _zipFs.Close();
+ _zipFs.Dispose();
+ RVIO.File.Delete(_zipFileInfo.FullName);
+ _zipFileInfo = null;
+ ZipOpen = ZipOpenType.Closed;
+ }
+
+
+ private static void CreateDirForFile(string sFilename)
+ {
+ string strTemp = Path.GetDirectoryName(sFilename);
+
+ if (string.IsNullOrEmpty(strTemp))
+ {
+ return;
+ }
+
+ if (Directory.Exists(strTemp))
+ {
+ return;
+ }
+
+
+ while (strTemp.Length > 0 && !Directory.Exists(strTemp))
+ {
+ int pos = strTemp.LastIndexOf(Path.DirectorySeparatorChar);
+ if (pos < 0)
+ {
+ pos = 0;
+ }
+ strTemp = strTemp.Substring(0, pos);
+ }
+
+ while (sFilename.IndexOf(Path.DirectorySeparatorChar, strTemp.Length + 1) > 0)
+ {
+ strTemp = sFilename.Substring(0, sFilename.IndexOf(Path.DirectorySeparatorChar, strTemp.Length + 1));
+ Directory.CreateDirectory(strTemp);
+ }
+ }
+
+ }
+}
diff --git a/SabreTools.Library/External/RVIO/RVIO.cs b/SabreTools.Library/External/RVIO/RVIO.cs
new file mode 100644
index 00000000..e9eb0985
--- /dev/null
+++ b/SabreTools.Library/External/RVIO/RVIO.cs
@@ -0,0 +1,784 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Runtime.InteropServices;
+using System.Text;
+using Microsoft.Win32.SafeHandles;
+
+namespace RVIO
+{
+ [Flags]
+ [ComVisible(true)]
+ [Serializable]
+ public enum FileAttributes
+ {
+ ReadOnly = 1,
+ Hidden = 2,
+ System = 4,
+ Directory = 16,
+ Archive = 32,
+ Device = 64,
+ Normal = 128,
+ Temporary = 256,
+ SparseFile = 512,
+ ReparsePoint = 1024,
+ Compressed = 2048,
+ Offline = 4096,
+ NotContentIndexed = 8192,
+ Encrypted = 16384,
+ }
+ public static class Error
+ {
+ public static int GetLastError()
+ {
+ return Marshal.GetLastWin32Error();
+ }
+ }
+
+ public static class unix
+ {
+ public static bool IsUnix
+ {
+ get
+ {
+ int p = (int)Environment.OSVersion.Platform;
+ return ((p == 4) || (p == 6) || (p == 128));
+ }
+ }
+ }
+
+ public class FileInfo
+ {
+
+ public string Name;
+ public string FullName;
+ public long LastWriteTime;
+ public long Length;
+
+ public FileInfo()
+ { }
+
+ public FileInfo(string path)
+ {
+ FullName = path;
+ Name = Path.GetFileName(path);
+
+ if (unix.IsUnix)
+ {
+ System.IO.FileInfo fi = new System.IO.FileInfo(path);
+
+ if (!fi.Exists) return;
+
+ Length = fi.Length;
+ LastWriteTime = fi.LastWriteTimeUtc.Ticks;
+ return;
+ }
+
+ string fileName = NameFix.AddLongPathPrefix(path);
+ Win32Native.WIN32_FILE_ATTRIBUTE_DATA wIn32FileAttributeData = new Win32Native.WIN32_FILE_ATTRIBUTE_DATA();
+
+ bool b = Win32Native.GetFileAttributesEx(fileName, 0, ref wIn32FileAttributeData);
+
+ if (!b || (wIn32FileAttributeData.fileAttributes & Win32Native.FILE_ATTRIBUTE_DIRECTORY) != 0) return;
+
+ Length = Convert.Length(wIn32FileAttributeData.fileSizeHigh, wIn32FileAttributeData.fileSizeLow);
+ LastWriteTime = Convert.Time(wIn32FileAttributeData.ftLastWriteTimeHigh, wIn32FileAttributeData.ftLastWriteTimeLow);
+ }
+
+ }
+
+ public class DirectoryInfo
+ {
+ public string Name;
+ public string FullName;
+ public long LastWriteTime;
+
+ public DirectoryInfo()
+ { }
+ public DirectoryInfo(string path)
+ {
+ FullName = path;
+ Name = Path.GetFileName(path);
+
+ if (unix.IsUnix)
+ {
+ System.IO.DirectoryInfo fi = new System.IO.DirectoryInfo(path);
+
+ if (!fi.Exists) return;
+
+ LastWriteTime = fi.LastWriteTimeUtc.Ticks;
+ return;
+ }
+
+ string fileName = NameFix.AddLongPathPrefix(path);
+ Win32Native.WIN32_FILE_ATTRIBUTE_DATA wIn32FileAttributeData = new Win32Native.WIN32_FILE_ATTRIBUTE_DATA();
+
+ bool b = Win32Native.GetFileAttributesEx(fileName, 0, ref wIn32FileAttributeData);
+
+ if (!b || (wIn32FileAttributeData.fileAttributes & Win32Native.FILE_ATTRIBUTE_DIRECTORY) == 0) return;
+ LastWriteTime = Convert.Time(wIn32FileAttributeData.ftLastWriteTimeHigh, wIn32FileAttributeData.ftLastWriteTimeLow);
+ }
+
+
+
+ public DirectoryInfo[] GetDirectories(bool includeHidden = true)
+ {
+ return GetDirectories("*", includeHidden);
+ }
+ public DirectoryInfo[] GetDirectories(string SearchPattern, bool includeHidden = true)
+ {
+ List dirs = new List();
+
+ if (unix.IsUnix)
+ {
+ System.IO.DirectoryInfo di = new System.IO.DirectoryInfo(FullName);
+ System.IO.DirectoryInfo[] arrDi = di.GetDirectories(SearchPattern);
+ foreach (System.IO.DirectoryInfo tDi in arrDi)
+ {
+ DirectoryInfo lDi = new DirectoryInfo
+ {
+ Name = tDi.Name,
+ FullName = Path.Combine(FullName, tDi.Name),
+ LastWriteTime = tDi.LastWriteTimeUtc.Ticks
+ };
+ dirs.Add(lDi);
+ }
+ return dirs.ToArray();
+ }
+
+
+
+ string dirName = NameFix.AddLongPathPrefix(FullName);
+
+ Win32Native.WIN32_FIND_DATA findData = new Win32Native.WIN32_FIND_DATA();
+ SafeFindHandle findHandle = Win32Native.FindFirstFile(dirName + @"\" + SearchPattern, findData);
+
+ if (!findHandle.IsInvalid)
+ {
+ do
+ {
+ string currentFileName = findData.cFileName;
+
+ // if this is a directory, find its contents
+ if ((findData.dwFileAttributes & Win32Native.FILE_ATTRIBUTE_DIRECTORY) == 0) continue;
+ if (currentFileName == "." || currentFileName == "..") continue;
+ if (!includeHidden && (findData.dwFileAttributes & Win32Native.FILE_ATTRIBUTE_HIDDEN) != 0) continue;
+
+ DirectoryInfo di = new DirectoryInfo
+ {
+ Name = currentFileName,
+ FullName = Path.Combine(FullName, currentFileName),
+ LastWriteTime = Convert.Time(findData.ftLastWriteTimeHigh, findData.ftLastWriteTimeLow)
+ };
+ dirs.Add(di);
+ }
+ while (Win32Native.FindNextFile(findHandle, findData));
+ }
+
+ // close the find handle
+ findHandle.Dispose();
+
+ return dirs.ToArray();
+ }
+
+ public FileInfo[] GetFiles()
+ {
+ return GetFiles("*");
+ }
+ public FileInfo[] GetFiles(string SearchPattern, bool includeHidden = true)
+ {
+ List files = new List();
+
+ if (unix.IsUnix)
+ {
+ System.IO.DirectoryInfo di = new System.IO.DirectoryInfo(FullName);
+ System.IO.FileInfo[] arrDi = di.GetFiles(SearchPattern);
+ foreach (System.IO.FileInfo tDi in arrDi)
+ {
+ FileInfo lDi = new FileInfo
+ {
+ Name = tDi.Name,
+ FullName = Path.Combine(FullName, tDi.Name),
+ Length = tDi.Length,
+ LastWriteTime = tDi.LastWriteTimeUtc.Ticks
+ };
+ files.Add(lDi);
+ }
+ return files.ToArray();
+ }
+
+ string dirName = NameFix.AddLongPathPrefix(FullName);
+
+ Win32Native.WIN32_FIND_DATA findData = new Win32Native.WIN32_FIND_DATA();
+ SafeFindHandle findHandle = Win32Native.FindFirstFile(dirName + @"\" + SearchPattern, findData);
+
+ if (!findHandle.IsInvalid)
+ {
+ do
+ {
+ string currentFileName = findData.cFileName;
+
+ // if this is a directory, find its contents
+ if ((findData.dwFileAttributes & Win32Native.FILE_ATTRIBUTE_DIRECTORY) != 0) continue;
+ if (!includeHidden && (findData.dwFileAttributes & Win32Native.FILE_ATTRIBUTE_HIDDEN) != 0) continue;
+
+ FileInfo fi = new FileInfo
+ {
+ Name = currentFileName,
+ FullName = Path.Combine(FullName, currentFileName),
+ Length = Convert.Length(findData.nFileSizeHigh, findData.nFileSizeLow),
+ LastWriteTime = Convert.Time(findData.ftLastWriteTimeHigh, findData.ftLastWriteTimeLow)
+ };
+ files.Add(fi);
+ }
+ while (Win32Native.FindNextFile(findHandle, findData));
+ }
+
+ // close the find handle
+ findHandle.Dispose();
+
+ return files.ToArray();
+ }
+ }
+
+ public static class Directory
+ {
+ public static bool Exists(string path)
+ {
+ if (unix.IsUnix)
+ return System.IO.Directory.Exists(path);
+
+
+ string fixPath = NameFix.AddLongPathPrefix(path);
+
+ Win32Native.WIN32_FILE_ATTRIBUTE_DATA wIn32FileAttributeData = new Win32Native.WIN32_FILE_ATTRIBUTE_DATA();
+
+ bool b = Win32Native.GetFileAttributesEx(fixPath, 0, ref wIn32FileAttributeData);
+ return b && (wIn32FileAttributeData.fileAttributes & Win32Native.FILE_ATTRIBUTE_DIRECTORY) != 0;
+ }
+ public static void Move(string sourceDirName, string destDirName)
+ {
+ if (unix.IsUnix)
+ {
+ System.IO.Directory.Move(sourceDirName, destDirName);
+ return;
+ }
+
+
+ if (sourceDirName == null)
+ throw new ArgumentNullException("sourceDirName");
+ if (sourceDirName.Length == 0)
+ throw new ArgumentException("Argument_EmptyFileName", "sourceDirName");
+
+ if (destDirName == null)
+ throw new ArgumentNullException("destDirName");
+ if (destDirName.Length == 0)
+ throw new ArgumentException("Argument_EmptyFileName", "destDirName");
+
+ string fullsourceDirName = NameFix.AddLongPathPrefix(sourceDirName);
+
+ string fulldestDirName = NameFix.AddLongPathPrefix(destDirName);
+
+ if (!Win32Native.MoveFile(fullsourceDirName, fulldestDirName))
+ {
+ int hr = Marshal.GetLastWin32Error();
+ if (hr == Win32Native.ERROR_FILE_NOT_FOUND) // Source dir not found
+ {
+ throw new Exception("ERROR_PATH_NOT_FOUND " + fullsourceDirName);
+ }
+ if (hr == Win32Native.ERROR_ACCESS_DENIED) // WinNT throws IOException. This check is for Win9x. We can't change it for backcomp.
+ {
+ throw new Exception("UnauthorizedAccess_IODenied_Path" + sourceDirName);
+ }
+ }
+ }
+ public static void Delete(string path)
+ {
+ if (unix.IsUnix)
+ {
+ System.IO.Directory.Delete(path);
+ return;
+ }
+
+ string fullPath = NameFix.AddLongPathPrefix(path);
+
+ Win32Native.RemoveDirectory(fullPath);
+ }
+
+ public static void CreateDirectory(string path)
+ {
+ if (unix.IsUnix)
+ {
+ System.IO.Directory.CreateDirectory(path);
+ return;
+ }
+
+
+ if (path == null)
+ throw new ArgumentNullException("path");
+ if (path.Length == 0)
+ throw new ArgumentException("Argument_PathEmpty");
+
+ string fullPath = NameFix.AddLongPathPrefix(path);
+
+ Win32Native.CreateDirectory(fullPath, IntPtr.Zero);
+ }
+ }
+
+ public static class File
+ {
+ public static bool Exists(string path)
+ {
+ if (unix.IsUnix)
+ return System.IO.File.Exists(path);
+
+
+ string fixPath = NameFix.AddLongPathPrefix(path);
+
+ Win32Native.WIN32_FILE_ATTRIBUTE_DATA wIn32FileAttributeData = new Win32Native.WIN32_FILE_ATTRIBUTE_DATA();
+
+ bool b = Win32Native.GetFileAttributesEx(fixPath, 0, ref wIn32FileAttributeData);
+ return b && (wIn32FileAttributeData.fileAttributes & Win32Native.FILE_ATTRIBUTE_DIRECTORY) == 0;
+ }
+ public static void Copy(string sourceFileName, string destfileName)
+ {
+ Copy(sourceFileName, destfileName, true);
+ }
+ public static void Copy(string sourceFileName, string destFileName, bool overwrite)
+ {
+ if (unix.IsUnix)
+ {
+ System.IO.File.Copy(sourceFileName, destFileName, overwrite);
+ return;
+ }
+
+ if (sourceFileName == null || destFileName == null)
+ throw new ArgumentNullException((sourceFileName == null ? "sourceFileName" : "destFileName"), "ArgumentNull_FileName");
+ if (sourceFileName.Length == 0 || destFileName.Length == 0)
+ throw new ArgumentException("Argument_EmptyFileName", (sourceFileName.Length == 0 ? "sourceFileName" : "destFileName"));
+
+ string fullSourceFileName = NameFix.AddLongPathPrefix(sourceFileName);
+ string fullDestFileName = NameFix.AddLongPathPrefix(destFileName);
+
+ bool r = Win32Native.CopyFile(fullSourceFileName, fullDestFileName, !overwrite);
+ if (!r)
+ {
+ // Save Win32 error because subsequent checks will overwrite this HRESULT.
+ int errorCode = Marshal.GetLastWin32Error();
+ string fileName = destFileName;
+
+ /*
+ if (errorCode != Win32Native.ERROR_FILE_EXISTS)
+ {
+ // For a number of error codes (sharing violation, path
+ // not found, etc) we don't know if the problem was with
+ // the source or dest file. Try reading the source file.
+ using (SafeFileHandle handle = Win32Native.UnsafeCreateFile(fullSourceFileName, FileStream.GENERIC_READ, FileShare.Read, null, FileMode.Open, 0, IntPtr.Zero))
+ {
+ if (handle.IsInvalid)
+ fileName = sourceFileName;
+ }
+
+ if (errorCode == Win32Native.ERROR_ACCESS_DENIED)
+ {
+ if (Directory.InternalExists(fullDestFileName))
+ throw new IOException(string.Format(CultureInfo.CurrentCulture, Environment.GetResourceString("Arg_FileIsDirectory_Name"), destFileName), Win32Native.ERROR_ACCESS_DENIED, fullDestFileName);
+ }
+ }
+
+ __Error.WinIOError(errorCode, fileName);
+
+ */
+ }
+ }
+ public static void Move(string sourceFileName, string destFileName)
+ {
+ if (unix.IsUnix)
+ {
+ System.IO.File.Move(sourceFileName, destFileName);
+ return;
+ }
+
+ if (sourceFileName == null || destFileName == null)
+ throw new ArgumentNullException((sourceFileName == null ? "sourceFileName" : "destFileName"), "ArgumentNull_FileName");
+ if (sourceFileName.Length == 0 || destFileName.Length == 0)
+ throw new ArgumentException("Argument_EmptyFileName", (sourceFileName.Length == 0 ? "sourceFileName" : "destFileName"));
+
+ string fullSourceFileName = NameFix.AddLongPathPrefix(sourceFileName);
+ string fullDestFileName = NameFix.AddLongPathPrefix(destFileName);
+
+ if (!Exists(fullSourceFileName))
+ throw new Exception("ERROR_FILE_NOT_FOUND" + fullSourceFileName);
+
+ if (!Win32Native.MoveFile(fullSourceFileName, fullDestFileName))
+ {
+ int hr = Marshal.GetLastWin32Error();
+ throw new Exception(GetErrorCode(hr), new Exception("ERROR_MOVING_FILE. (" + fullSourceFileName + " to " + fullDestFileName + ")"));
+ }
+ }
+
+ public static void Delete(string path)
+ {
+ if (unix.IsUnix)
+ {
+ System.IO.File.Delete(path);
+ return;
+ }
+
+
+ string fixPath = NameFix.AddLongPathPrefix(path);
+
+ if (!Win32Native.DeleteFile(fixPath))
+ {
+ int hr = Marshal.GetLastWin32Error();
+ if (hr != Win32Native.ERROR_FILE_NOT_FOUND)
+ throw new Exception(GetErrorCode(hr), new Exception("ERROR_DELETING_FILE. (" + path + ")"));
+ }
+ }
+
+
+ private static string GetErrorCode(int hr)
+ {
+ switch (hr)
+ {
+ case 5: return "ERROR_ACCESS_DENIED: Access is denied.";
+ case 32: return "ERROR_FILE_IN_USE: The file is in use by another process.";
+ case 123: return "ERROR_INVALID_NAME: The filename, directory name, or volume label syntax is incorrect.";
+ case 183: return "ERROR_ALREADY_EXISTS: Cannot create a file when that file already exists.";
+ }
+
+ return hr.ToString();
+ }
+
+ public static bool SetAttributes(string path, FileAttributes fileAttributes)
+ {
+ if (unix.IsUnix)
+ {
+ try
+ {
+ System.IO.File.SetAttributes(path, (System.IO.FileAttributes)fileAttributes);
+ return true;
+ }
+ catch (Exception)
+ {
+ return false;
+ }
+ }
+
+ string fullPath = NameFix.AddLongPathPrefix(path);
+ return Win32Native.SetFileAttributes(fullPath, (int)fileAttributes);
+ }
+ public static StreamWriter CreateText(string filename)
+ {
+ int errorCode = FileStream.OpenFileWrite(filename, out Stream fStream);
+ return errorCode != 0 ? null : new StreamWriter(fStream);
+ }
+ public static StreamReader OpenText(string filename, Encoding Enc)
+ {
+ int errorCode = FileStream.OpenFileRead(filename, out Stream fStream);
+ return errorCode != 0 ? null : new StreamReader(fStream, Enc);
+ }
+
+ private const int ERROR_INVALID_PARAMETER = 87;
+ private const int ERROR_ACCESS_DENIED = 0x5;
+ }
+
+ public static class Path
+ {
+ public static readonly char DirectorySeparatorChar = '\\';
+ public static readonly char AltDirectorySeparatorChar = '/';
+ public static readonly char VolumeSeparatorChar = ':';
+
+ public static string GetExtension(string path)
+ {
+ return System.IO.Path.GetExtension(path);
+ }
+ public static string Combine(string path1, string path2)
+ {
+ if (unix.IsUnix)
+ return System.IO.Path.Combine(path1, path2);
+
+ if (path1 == null || path2 == null)
+ throw new ArgumentNullException((path1 == null) ? "path1" : "path2");
+ //CheckInvalidPathChars(path1);
+ //CheckInvalidPathChars(path2);
+
+ if (path2.Length == 0)
+ return path1;
+
+ if (path1.Length == 0)
+ return path2;
+
+ if (IsPathRooted(path2))
+ return path2;
+
+ char ch = path1[path1.Length - 1];
+ if (ch != DirectorySeparatorChar && ch != AltDirectorySeparatorChar && ch != VolumeSeparatorChar)
+ return path1 + DirectorySeparatorChar + path2;
+ return path1 + path2;
+ }
+ private static bool IsPathRooted(string path)
+ {
+ if (path != null)
+ {
+ //CheckInvalidPathChars(path);
+
+ int length = path.Length;
+ if (
+ (length >= 1 && (path[0] == DirectorySeparatorChar ||
+ path[0] == AltDirectorySeparatorChar)) ||
+ (length >= 2 && path[1] == VolumeSeparatorChar)
+ ) return true;
+ }
+ return false;
+ }
+ /*
+ private static void CheckInvalidPathChars(string path)
+ {
+ for (int index = 0; index < path.Length; ++index)
+ {
+ int num = path[index];
+ switch (num)
+ {
+ case 34:
+ case 60:
+ case 62:
+ case 124:
+ ReportError.SendErrorMessage("Invalid Character " + num + " in filename " + path);
+ continue;
+ default:
+ if (num >= 32)
+ continue;
+
+ goto case 34;
+ }
+ }
+ }
+ */
+
+ public static string GetFileNameWithoutExtension(string path)
+ {
+ return System.IO.Path.GetFileNameWithoutExtension(path);
+ }
+
+ public static string GetFileName(string path)
+ {
+ return System.IO.Path.GetFileName(path);
+ }
+ public static string GetDirectoryName(string path)
+ {
+ if (unix.IsUnix)
+ return System.IO.Path.GetDirectoryName(path);
+
+
+ if (path != null)
+ {
+ int root = GetRootLength(path);
+ int i = path.Length;
+ if (i > root)
+ {
+ i = path.Length;
+ if (i == root) return null;
+ while (i > root && path[--i] != DirectorySeparatorChar && path[i] != AltDirectorySeparatorChar) ;
+ return path.Substring(0, i);
+ }
+ }
+ return null;
+ }
+
+ private static int GetRootLength(string path)
+ {
+ int i = 0;
+ int length = path.Length;
+
+ if (length >= 1 && (IsDirectorySeparator(path[0])))
+ {
+ // handles UNC names and directories off current drive's root.
+ i = 1;
+ if (length >= 2 && (IsDirectorySeparator(path[1])))
+ {
+ i = 2;
+ int n = 2;
+ while (i < length && ((path[i] != DirectorySeparatorChar && path[i] != AltDirectorySeparatorChar) || --n > 0)) i++;
+ }
+ }
+ else if (length >= 2 && path[1] == VolumeSeparatorChar)
+ {
+ // handles A:\foo.
+ i = 2;
+ if (length >= 3 && (IsDirectorySeparator(path[2]))) i++;
+ }
+ return i;
+ }
+ private static bool IsDirectorySeparator(char c)
+ {
+ return (c == DirectorySeparatorChar || c == AltDirectorySeparatorChar);
+ }
+
+ }
+
+
+ public static class FileStream
+ {
+ private const uint GENERIC_READ = 0x80000000;
+ private const uint GENERIC_WRITE = 0x40000000;
+
+ private const uint FILE_ATTRIBUTE_NORMAL = 0x80;
+
+ // errorMessage = new Win32Exception(errorCode).Message;
+
+ public static Stream OpenFileRead(string path, out int result)
+ {
+ result = OpenFileRead(path, out Stream stream);
+ return stream;
+ }
+
+ public static int OpenFileRead(string path, out Stream stream)
+ {
+ if (unix.IsUnix)
+ {
+ try
+ {
+ stream = new System.IO.FileStream(path, FileMode.Open, FileAccess.Read);
+ return 0;
+ }
+ catch (Exception)
+ {
+ stream = null;
+ return Marshal.GetLastWin32Error();
+ }
+ }
+
+ string filename = NameFix.AddLongPathPrefix(path);
+ SafeFileHandle hFile = Win32Native.CreateFile(filename,
+ GENERIC_READ,
+ System.IO.FileShare.Read,
+ IntPtr.Zero,
+ FileMode.Open,
+ FILE_ATTRIBUTE_NORMAL,
+ IntPtr.Zero);
+
+ if (hFile.IsInvalid)
+ {
+ stream = null;
+ return Marshal.GetLastWin32Error();
+ }
+ stream = new System.IO.FileStream(hFile, FileAccess.Read);
+
+ return 0;
+ }
+
+ public static int OpenFileWrite(string path, out Stream stream)
+ {
+ if (unix.IsUnix)
+ {
+ try
+ {
+ stream = new System.IO.FileStream(path, FileMode.Create, FileAccess.ReadWrite);
+ return 0;
+ }
+ catch (Exception)
+ {
+ stream = null;
+ return Marshal.GetLastWin32Error();
+ }
+ }
+
+
+ string filename = NameFix.AddLongPathPrefix(path);
+ SafeFileHandle hFile = Win32Native.CreateFile(filename,
+ GENERIC_READ | GENERIC_WRITE,
+ System.IO.FileShare.None,
+ IntPtr.Zero,
+ FileMode.Create,
+ FILE_ATTRIBUTE_NORMAL,
+ IntPtr.Zero);
+
+ if (hFile.IsInvalid)
+ {
+ stream = null;
+ return Marshal.GetLastWin32Error();
+ }
+
+ stream = new System.IO.FileStream(hFile, FileAccess.ReadWrite);
+ return 0;
+ }
+
+
+ }
+
+ public static class NameFix
+ {
+ public static string GetShortPath(string path)
+ {
+ if (unix.IsUnix)
+ return path;
+
+ int remove = 0;
+ string retPath;
+ if (path.StartsWith(@"\\"))
+ {
+ retPath = @"\\?\UNC\" + path.Substring(2);
+ remove = 8;
+ }
+ else
+ {
+ retPath = path;
+ if (path.Substring(1, 1) != ":")
+ retPath = Path.Combine(System.IO.Directory.GetCurrentDirectory(), retPath);
+
+ retPath = cleandots(retPath);
+ retPath = @"\\?\" + retPath;
+ remove = 4;
+ }
+
+
+ const int MAX_PATH = 300;
+ StringBuilder shortPath = new StringBuilder(MAX_PATH);
+ Win32Native.GetShortPathName(retPath, shortPath, MAX_PATH);
+ retPath = shortPath.ToString();
+
+ retPath = retPath.Substring(remove);
+ if (remove == 8) retPath = "\\" + retPath;
+
+ return retPath;
+ }
+
+
+
+ internal static string AddLongPathPrefix(string path)
+ {
+ if (string.IsNullOrEmpty(path) || path.StartsWith(@"\\?\"))
+ return path;
+
+ if (path.StartsWith(@"\\"))
+ return @"\\?\UNC\" + path.Substring(2);
+
+ string retPath = path;
+ if (path.Substring(1, 1) != ":")
+ retPath = Path.Combine(System.IO.Directory.GetCurrentDirectory(), retPath);
+
+ retPath = cleandots(retPath);
+
+ return @"\\?\" + retPath;
+
+ }
+
+ private static string cleandots(string path)
+ {
+ string retPath = path;
+ while (retPath.Contains(@"\..\"))
+ {
+ int index = retPath.IndexOf(@"\..\");
+ string path1 = retPath.Substring(0, index);
+ string path2 = retPath.Substring(index + 4);
+
+ int path1Back = path1.LastIndexOf(@"\");
+
+ retPath = path1.Substring(0, path1Back + 1) + path2;
+ }
+ return retPath;
+
+ }
+ }
+}
diff --git a/SabreTools.Library/External/RVIO/Win32Native.cs b/SabreTools.Library/External/RVIO/Win32Native.cs
new file mode 100644
index 00000000..08d4359c
--- /dev/null
+++ b/SabreTools.Library/External/RVIO/Win32Native.cs
@@ -0,0 +1,178 @@
+/******************************************************
+ * ROMVault3 is written by Gordon J. *
+ * Contact gordon@romvault.com *
+ * Copyright 2019 *
+ ******************************************************/
+
+using System;
+using System.IO;
+using System.Runtime.ConstrainedExecution;
+using System.Runtime.InteropServices;
+using System.Runtime.Versioning;
+using System.Security.Permissions;
+using System.Text;
+using Microsoft.Win32.SafeHandles;
+
+namespace RVIO
+{
+ internal static class Win32Native
+ {
+ private const string KERNEL32 = "kernel32.dll";
+
+ public const int FILE_ATTRIBUTE_DIRECTORY = 0x00000010;
+ public const int FILE_ATTRIBUTE_HIDDEN = 0x00000002;
+
+
+ internal const int ERROR_FILE_NOT_FOUND = 0x2;
+ internal const int ERROR_ACCESS_DENIED = 0x5;
+ internal const int ERROR_FILE_EXISTS = 0x50;
+
+ [DllImport(KERNEL32, SetLastError = true, CharSet = CharSet.Auto, BestFitMapping = false)]
+ [ResourceExposure(ResourceScope.None)]
+ internal static extern bool GetFileAttributesEx(string fileName, int fileInfoLevel, ref WIN32_FILE_ATTRIBUTE_DATA lpFileInformation);
+
+ [DllImport(KERNEL32, SetLastError = true, CharSet = CharSet.Auto, BestFitMapping = false)]
+ [ResourceExposure(ResourceScope.None)]
+ internal static extern SafeFindHandle FindFirstFile(string fileName, [In] [Out] WIN32_FIND_DATA data);
+
+ [DllImport(KERNEL32, SetLastError = true, CharSet = CharSet.Auto, BestFitMapping = false)]
+ [ResourceExposure(ResourceScope.None)]
+ internal static extern bool FindNextFile(SafeFindHandle hndFindFile, [In] [Out] [MarshalAs(UnmanagedType.LPStruct)] WIN32_FIND_DATA lpFindFileData);
+
+ [DllImport(KERNEL32)]
+ [ResourceExposure(ResourceScope.None)]
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.Success)]
+ internal static extern bool FindClose(IntPtr handle);
+
+ [DllImport(KERNEL32, SetLastError = true, CharSet = CharSet.Auto, BestFitMapping = false)]
+ [ResourceExposure(ResourceScope.Machine)]
+ internal static extern SafeFileHandle CreateFile(string lpFileName,
+ uint dwDesiredAccess, FileShare dwShareMode,
+ IntPtr securityAttrs, FileMode dwCreationDisposition,
+ uint dwFlagsAndAttributes, IntPtr hTemplateFile);
+
+ [DllImport(KERNEL32, SetLastError = true, CharSet = CharSet.Auto, BestFitMapping = false)]
+ [ResourceExposure(ResourceScope.Machine)]
+ internal static extern bool CreateDirectory(string path, IntPtr lpSecurityAttributes);
+
+ [DllImport(KERNEL32, SetLastError = true, CharSet = CharSet.Auto, BestFitMapping = false)]
+ [ResourceExposure(ResourceScope.Machine)]
+ internal static extern bool RemoveDirectory(string path);
+
+
+ [DllImport(KERNEL32, SetLastError = true, CharSet = CharSet.Auto, BestFitMapping = false)]
+ [ResourceExposure(ResourceScope.Machine)]
+ internal static extern bool CopyFile(string src, string dst, bool failIfExists);
+
+ [DllImport(KERNEL32, SetLastError = true, CharSet = CharSet.Auto, BestFitMapping = false)]
+ [ResourceExposure(ResourceScope.Machine)]
+ internal static extern bool MoveFile(string src, string dst);
+
+ [DllImport(KERNEL32, SetLastError = true, CharSet = CharSet.Auto, BestFitMapping = false)]
+ [ResourceExposure(ResourceScope.Machine)]
+ internal static extern bool DeleteFile(string path);
+
+ [DllImport(KERNEL32, SetLastError = true, CharSet = CharSet.Auto, BestFitMapping = false)]
+ [ResourceExposure(ResourceScope.None)]
+ internal static extern bool SetFileAttributes(string name, int attr);
+
+
+ [DllImport(KERNEL32, SetLastError = true, CharSet = CharSet.Auto, BestFitMapping = false)]
+ [ResourceExposure(ResourceScope.Machine)]
+ internal static extern int GetShortPathName(
+ [MarshalAs(UnmanagedType.LPTStr)] string path,
+ [MarshalAs(UnmanagedType.LPTStr)] StringBuilder shortPath,
+ int shortPathLength
+ );
+
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Auto)]
+ [BestFitMapping(false)]
+ internal class WIN32_FIND_DATA
+ {
+ internal int dwFileAttributes = 0;
+ internal uint ftCreationTimeLow;
+ internal uint ftCreationTimeHigh;
+ internal uint ftLastAccessTimeLow;
+ internal uint ftLastAccessTimeHigh;
+ internal uint ftLastWriteTimeLow;
+ internal uint ftLastWriteTimeHigh;
+ internal int nFileSizeHigh = 0;
+ internal int nFileSizeLow = 0;
+ internal int dwReserved0 = 0;
+ internal int dwReserved1 = 0;
+
+ [MarshalAs(UnmanagedType.ByValTStr, SizeConst = 260)] internal string cFileName = null;
+
+ [MarshalAs(UnmanagedType.ByValTStr, SizeConst = 14)] internal string cAlternateFileName = null;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ [Serializable]
+ internal struct WIN32_FILE_ATTRIBUTE_DATA
+ {
+ internal int fileAttributes;
+ internal uint ftCreationTimeLow;
+ internal uint ftCreationTimeHigh;
+ internal uint ftLastAccessTimeLow;
+ internal uint ftLastAccessTimeHigh;
+ internal uint ftLastWriteTimeLow;
+ internal uint ftLastWriteTimeHigh;
+ internal int fileSizeHigh;
+ internal int fileSizeLow;
+ }
+ }
+
+ internal sealed class SafeFindHandle : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ [SecurityPermission(SecurityAction.LinkDemand, UnmanagedCode = true)]
+ internal SafeFindHandle() : base(true)
+ {
+ }
+
+ protected override bool ReleaseHandle()
+ {
+ return Win32Native.FindClose(handle);
+ }
+ }
+
+
+ internal static class Convert
+ {
+ private const long TicksPerMillisecond = 10000;
+ private const long TicksPerSecond = TicksPerMillisecond*1000;
+ private const long TicksPerMinute = TicksPerSecond*60;
+ private const long TicksPerHour = TicksPerMinute*60;
+ private const long TicksPerDay = TicksPerHour*24;
+
+ // Number of days in a non-leap year
+ private const int DaysPerYear = 365;
+ // Number of days in 4 years
+ private const int DaysPer4Years = DaysPerYear*4 + 1;
+ // Number of days in 100 years
+ private const int DaysPer100Years = DaysPer4Years*25 - 1;
+ // Number of days in 400 years
+ private const int DaysPer400Years = DaysPer100Years*4 + 1;
+
+ // Number of days from 1/1/0001 to 12/31/1600
+ private const int DaysTo1601 = DaysPer400Years*4;
+ public const long FileTimeOffset = DaysTo1601*TicksPerDay;
+
+
+ // Number of days from 1/1/0001 to 12/31/9999
+ private const int DaysTo10000 = DaysPer400Years*25 - 366;
+ private const long MinTicks = 0;
+ private const long MaxTicks = DaysTo10000*TicksPerDay - 1;
+
+
+ public static long Length(int high, int low)
+ {
+ return ((long) high << 32) | (low & 0xFFFFFFFFL);
+ }
+
+ public static long Time(uint high, uint low)
+ {
+ return ((long) high << 32) | low;
+ }
+ }
+}
\ No newline at end of file
diff --git a/SabreTools.Library/External/SupportedFiles/ZipFile.cs b/SabreTools.Library/External/SupportedFiles/ZipFile.cs
deleted file mode 100644
index a674a8ce..00000000
--- a/SabreTools.Library/External/SupportedFiles/ZipFile.cs
+++ /dev/null
@@ -1,1035 +0,0 @@
-using System;
-using System.Collections.Generic;
-using System.Text;
-
-using SabreTools.Library.Data;
-using SabreTools.Library.Tools;
-
-#if MONO
-using System.IO;
-#else
-using Alphaleonis.Win32.Filesystem;
-
-using BinaryReader = System.IO.BinaryReader;
-using BinaryWriter = System.IO.BinaryWriter;
-using IOException = System.IO.IOException;
-using MemoryStream = System.IO.MemoryStream;
-using PathTooLongException = System.IO.PathTooLongException;
-using Stream = System.IO.Stream;
-#endif
-
-namespace ROMVault2.SupportedFiles.Zip
-{
- ///
- /// Based on work by GordonJ for RomVault
- /// https://github.com/gjefferyes/RomVault/blob/master/ROMVault2/SupportedFiles/Zip/zipFile.cs
- ///
- public class ZipFile : IDisposable
- {
- #region Private instance variables
-
- private FileInfo _zipFileInfo;
- private ulong _centerDirStart;
- private ulong _centerDirSize;
- private ulong _endOfCenterDir64;
- private byte[] _fileComment;
- private Stream _zipstream;
- private uint _entriesCount;
- private readonly List _entries = new List();
- private ZipStatus _zipStatus;
- private bool _zip64;
- private ZipOpenType _zipOpen;
- private int _readIndex;
-
- #endregion
-
- #region Public facing variables
-
- public string ZipFilename
- {
- get { return (_zipFileInfo != null ? _zipFileInfo.FullName : ""); }
- }
- public long TimeStamp
- {
- get { return (_zipFileInfo != null ? _zipFileInfo.LastWriteTime.Ticks : 0); }
- }
- public ZipOpenType ZipOpen
- {
- get { return _zipOpen; }
- set { _zipOpen = value; }
- }
- public ZipStatus ZipStatus
- {
- get { return _zipStatus; }
- }
- public List Entries
- {
- get { return _entries; }
- }
- public int EntriesCount
- {
- get { return _entries.Count; }
- }
- public string Filename(int i)
- {
- return _entries[i].FileName;
- }
- public ulong UncompressedSize(int i)
- {
- return _entries[i].UncompressedSize;
- }
- public ulong? LocalHeader(int i)
- {
- return ((_entries[i].GeneralPurposeBitFlag & GeneralPurposeBitFlag.LanguageEncodingFlag) == 0
- ? (ulong?)_entries[i].RelativeOffset
- : null);
- }
- public ZipReturn FileStatus(int i)
- {
- return _entries[i].FileStatus;
- }
- public byte[] CRC32(int i)
- {
- return _entries[i].CRC;
- }
- public byte[] MD5(int i)
- {
- return _entries[i].MD5;
- }
- public byte[] SHA1(int i)
- {
- return _entries[i].SHA1;
- }
- public bool Contains(string n)
- {
- return _entries.Contains(new ZipFileEntry(new MemoryStream(), n));
- }
-
- #endregion
-
- #region Destructors
-
- ~ZipFile()
- {
- Dispose();
- }
-
- public void Dispose()
- {
- if (_zipstream != null)
- {
- _zipstream.Close();
- _zipstream.Dispose();
- }
- }
-
- #endregion
-
- #region Central Directory
-
- ///
- /// Find the end of the central directory signature
- ///
- /// Status of the given stream
- private ZipReturn FindEndOfCentralDirSignature()
- {
- long fileSize = _zipstream.Length;
- long maxBackSearch = 0xffff;
-
- if (_zipstream.Length < maxBackSearch)
- {
- maxBackSearch = _zipstream.Length;
- }
-
- const long buffsize = 0x400;
- byte[] buffer = new byte[buffsize + 4];
-
- long backPosition = 4;
- while (backPosition < maxBackSearch)
- {
- backPosition += buffsize;
- if (backPosition > maxBackSearch) backPosition = maxBackSearch;
-
- long readSize = backPosition > (buffsize + 4) ? (buffsize + 4) : backPosition;
-
- _zipstream.Position = fileSize - backPosition;
-
- _zipstream.Read(buffer, 0, (int)readSize);
-
-
- for (long i = readSize - 4; i >= 0; i--)
- {
- if ((buffer[i] != 0x50) || (buffer[i + 1] != 0x4b) || (buffer[i + 2] != 0x05) || (buffer[i + 3] != 0x06))
- {
- continue;
- }
-
- _zipstream.Position = (fileSize - backPosition) + i;
- return ZipReturn.ZipGood;
- }
- }
- return ZipReturn.ZipCentralDirError;
- }
-
- ///
- /// Read the end of the central directory
- ///
- /// Status of the given stream
- private ZipReturn ReadEndOfCentralDir()
- {
- // Open the stream for reading
- BinaryReader br = new BinaryReader(_zipstream);
-
- // If the stream doesn't start with the correct signature, return
- uint thisSignature = br.ReadUInt32();
- if (thisSignature != Constants.EndOfCentralDirSignature)
- {
- return ZipReturn.ZipEndOfCentralDirectoryError;
- }
-
- // If this is part of a spanned archive, return
- ushort tushort = br.ReadUInt16(); // NumberOfThisDisk
- if (tushort != 0)
- {
- return ZipReturn.ZipEndOfCentralDirectoryError;
- }
- tushort = br.ReadUInt16(); // NumberOfThisDiskCenterDir
- if (tushort != 0)
- {
- return ZipReturn.ZipEndOfCentralDirectoryError;
- }
-
- // If the number of entries in the current disk doesn't match up with the total entries, return
- _entriesCount = br.ReadUInt16(); // TotalNumberOfEntriesDisk
- tushort = br.ReadUInt16(); // TotalNumber of entries in the central directory
- if (tushort != _entriesCount)
- {
- return ZipReturn.ZipEndOfCentralDirectoryError;
- }
-
- _centerDirSize = br.ReadUInt32(); // SizeOfCenteralDir
- _centerDirStart = br.ReadUInt32(); // Offset
-
- // Get the file comment
- ushort zipFileCommentLength = br.ReadUInt16();
- _fileComment = br.ReadBytes(zipFileCommentLength);
-
- // If there's extra data past the comment, flag that we have extra data
- if (_zipstream.Position != _zipstream.Length)
- {
- _zipStatus |= ZipStatus.ExtraData;
- }
-
- return ZipReturn.ZipGood;
- }
-
- ///
- /// Write the end of the central directory
- ///
- private void WriteEndOfCentralDir()
- {
- // Open the stream for writing
- BinaryWriter bw = new BinaryWriter(_zipstream);
-
- // Now write out all of the data
- bw.Write(Constants.EndOfCentralDirSignature);
- bw.Write((ushort)0); // NumberOfThisDisk
- bw.Write((ushort)0); // NumberOfThisDiskCenterDir
- bw.Write((ushort)(_entries.Count >= 0xffff ? 0xffff : _entries.Count)); // TotalNumberOfEnteriesDisk
- bw.Write((ushort)(_entries.Count >= 0xffff ? 0xffff : _entries.Count)); // TotalNumber of enteries in the central directory
- bw.Write((uint)(_centerDirSize >= 0xffffffff ? 0xffffffff : _centerDirSize));
- bw.Write((uint)(_centerDirStart >= 0xffffffff ? 0xffffffff : _centerDirStart));
- bw.Write((ushort)_fileComment.Length);
- bw.Write(_fileComment, 0, _fileComment.Length);
- }
-
- #endregion
-
- #region Zip64 Central Directory
-
- ///
- /// Read the end of the Zip64 central directory
- ///
- /// Status of the given stream
- private ZipReturn ReadZip64EndOfCentralDir()
- {
- // Set the type of the archive to Zip64
- _zip64 = true;
-
- // Open the stream for reading
- BinaryReader br = new BinaryReader(_zipstream);
-
- // If the signature doesn't match, then return
- uint thisSignature = br.ReadUInt32();
- if (thisSignature != Constants.Zip64EndOfCentralDirSignature)
- {
- return ZipReturn.ZipEndOfCentralDirectoryError;
- }
-
- // If the size of the central dir record isn't right, return
- ulong tulong = br.ReadUInt64(); // Size of zip64 end of central directory record
- if (tulong != 44)
- {
- return ZipReturn.Zip64EndOfCentralDirError;
- }
-
- br.ReadUInt16(); // version made by
-
- // If the version needed to extract isn't correct, return
- ushort tushort = br.ReadUInt16(); // version needed to extract
- if (tushort != (ushort)ArchiveVersion.TorrentZip64)
- {
- return ZipReturn.Zip64EndOfCentralDirError;
- }
-
- // If this is part of a spanned archive, return
- uint tuint = br.ReadUInt32(); // number of this disk
- if (tuint != 0)
- {
- return ZipReturn.Zip64EndOfCentralDirError;
- }
- tuint = br.ReadUInt32(); // number of the disk with the start of the central directory
- if (tuint != 0)
- {
- return ZipReturn.Zip64EndOfCentralDirError;
- }
-
- // If the number of entries in the current disk doesn't match up with the total entries, return
- _entriesCount = (uint)br.ReadUInt64(); // total number of entries in the central directory on this disk
- tulong = br.ReadUInt64(); // total number of entries in the central directory
- if (tulong != _entriesCount)
- {
- return ZipReturn.Zip64EndOfCentralDirError;
- }
-
- _centerDirSize = br.ReadUInt64(); // size of central directory
- _centerDirStart = br.ReadUInt64(); // offset of start of central directory with respect to the starting disk number
-
- return ZipReturn.ZipGood;
- }
-
- ///
- /// Write the end of the Zip64 central directory
- ///
- private void WriteZip64EndOfCentralDir()
- {
- // Open the stream for writing
- BinaryWriter bw = new BinaryWriter(_zipstream);
-
- // Now write out all of the data
- bw.Write(Constants.Zip64EndOfCentralDirSignature);
- bw.Write((ulong)44); // Size of zip64 end of central directory record
- bw.Write((ushort)ArchiveVersion.TorrentZip64); // version made by
- bw.Write((ushort)ArchiveVersion.TorrentZip64); // version needed to extract
- bw.Write((uint)0); // number of this disk
- bw.Write((uint)0); // number of the disk with the start of the central directroy
- bw.Write((ulong)_entries.Count); // total number of entries in the central directory on this disk
- bw.Write((ulong)_entries.Count); // total number of entries in the central directory
- bw.Write(_centerDirSize); // size of central directory
- bw.Write(_centerDirStart); // offset of start of central directory with respect to the starting disk number
- }
-
- ///
- /// Read the end of the Zip64 central directory locator
- ///
- ///
- private ZipReturn ReadZip64EndOfCentralDirectoryLocator()
- {
- // Set the current archive type to Zip64
- _zip64 = true;
-
- // Open the stream for reading
- BinaryReader br = new BinaryReader(_zipstream);
-
- // If the signature doesn't match, return
- uint thisSignature = br.ReadUInt32();
- if (thisSignature != Constants.Zip64EndOfCentralDirectoryLocator)
- {
- return ZipReturn.ZipEndOfCentralDirectoryError;
- }
-
- // If the disk isn't the first and only, then return
- uint tuint = br.ReadUInt32(); // number of the disk with the start of the zip64 end of centeral directory
- if (tuint != 0)
- {
- return ZipReturn.Zip64EndOfCentralDirectoryLocatorError;
- }
-
- _endOfCenterDir64 = br.ReadUInt64(); // relative offset of the zip64 end of central directory record
-
- tuint = br.ReadUInt32(); // total number of disks
- if (tuint != 1)
- {
- return ZipReturn.Zip64EndOfCentralDirectoryLocatorError;
- }
-
- return ZipReturn.ZipGood;
- }
-
- ///
- /// Write the end of the Zip64 central directory locator
- ///
- private void WriteZip64EndOfCentralDirectoryLocator()
- {
- // Open the stream for writing
- BinaryWriter bw = new BinaryWriter(_zipstream);
-
- // Now write the data
- bw.Write(Constants.Zip64EndOfCentralDirectoryLocator);
- bw.Write((uint)0); // number of the disk with the start of the zip64 end of centeral directory
- bw.Write(_endOfCenterDir64); // relative offset of the zip64 end of central directroy record
- bw.Write((uint)1); // total number of disks
- }
-
- #endregion
-
- #region Open, Create, Close
-
- ///
- /// Open a new file as an archive
- ///
- /// Name of the new file to open
- /// Timestamp the file should have
- /// True if file headers should be read, false otherwise
- /// Status of the underlying stream
- public ZipReturn Open(string filename, long timestamp, bool readHeaders)
- {
- // If a stream already exists, close it
- Close();
-
- // Now, reset the archive information
- _zipStatus = ZipStatus.None;
- _zip64 = false;
- _centerDirStart = 0;
- _centerDirSize = 0;
- _zipFileInfo = null;
-
- // Then, attempt to open the file and get information from it
- try
- {
- // If the input file doesn't exist, close the stream and return
- if (!File.Exists(filename))
- {
- Close();
- return ZipReturn.ZipErrorFileNotFound;
- }
-
- // Get the fileinfo object
- _zipFileInfo = new FileInfo(filename);
-
- // If the timestamps don't match, close the stream and return
- if (_zipFileInfo.LastWriteTime.Ticks != timestamp)
- {
- Close();
- return ZipReturn.ZipErrorTimeStamp;
- }
-
- // Now try to open the file for reading
- _zipstream = Utilities.TryOpenRead(filename);
- int read = _zipstream.Read(new byte[1], 0, 1);
- if (read != 1)
- {
- Close();
- return ZipReturn.ZipErrorOpeningFile;
- }
- _zipstream.Position = 0;
- }
- catch (PathTooLongException)
- {
- Close();
- return ZipReturn.ZipFileNameToLong;
- }
- catch (IOException)
- {
- Close();
- return ZipReturn.ZipErrorOpeningFile;
- }
-
- // If we succeeded, set the flag for read
- _zipOpen = ZipOpenType.OpenRead;
-
- // If we're not reading the headers, return
- if (!readHeaders)
- {
- return ZipReturn.ZipGood;
- }
-
- //Otherwise, we want to get all of the archive information
- try
- {
- // First, try to get the end of the central directory
- ZipReturn zr = FindEndOfCentralDirSignature();
- if (zr != ZipReturn.ZipGood)
- {
- Close();
- return zr;
- }
-
- // Now read the end of the central directory
- long eocd = _zipstream.Position;
- zr = ReadEndOfCentralDir();
- if (zr != ZipReturn.ZipGood)
- {
- Close();
- return zr;
- }
-
- // If we have any indicators of Zip64, check for the Zip64 EOCD
- if (_centerDirStart == 0xffffffff || _centerDirSize == 0xffffffff || _entriesCount == 0xffff)
- {
- _zip64 = true;
-
- // Check for the Zip64 EOCD locator
- _zipstream.Position = eocd - 20;
- zr = ReadZip64EndOfCentralDirectoryLocator();
- if (zr != ZipReturn.ZipGood)
- {
- Close();
- return zr;
- }
-
- // If it was found, read the Zip64 EOCD
- _zipstream.Position = (long)_endOfCenterDir64;
- zr = ReadZip64EndOfCentralDir();
- if (zr != ZipReturn.ZipGood)
- {
- Close();
- return zr;
- }
- }
-
- // Now that we have the rest of the information, check for TorrentZip
- bool torrentZip = false;
- if (_fileComment.Length == 22)
- {
- if (Encoding.ASCII.GetString(_fileComment).Substring(0, 14) == "TORRENTZIPPED-")
- {
- // First get to the right part of the stream
- OptimizedCRC ocrc = new OptimizedCRC();
- byte[] buffer = new byte[_centerDirSize];
- _zipstream.Position = (long)_centerDirStart;
-
- // Then read in the central directory and hash
- BinaryReader br = new BinaryReader(_zipstream);
- buffer = br.ReadBytes((int)_centerDirSize);
- ocrc.Update(buffer, 0, (int)_centerDirSize);
- string calculatedCrc = ocrc.Value.ToString("X8");
-
- // If the hashes match, then we have a torrentzip file
- string extractedCrc = Encoding.ASCII.GetString(_fileComment).Substring(14, 8);
- if (String.Equals(calculatedCrc, extractedCrc, StringComparison.Ordinal))
- {
- torrentZip = true;
- }
- }
- }
-
- // With potential torrentzip out of the way, read the central directory
- _zipstream.Position = (long)_centerDirStart;
-
- // Remove any entries already listed in the archive
- _entries.Clear();
- _entries.Capacity = (int)_entriesCount;
-
- // Now populate the entries from the central directory
- for (int i = 0; i < _entriesCount; i++)
- {
- ZipFileEntry zfe = new ZipFileEntry(_zipstream);
- zr = zfe.ReadCentralDirectory();
-
- // If we get any errors, close and return
- if (zr != ZipReturn.ZipGood)
- {
- Close();
- return zr;
- }
-
- // If we have a Zip64 entry, make sure the archive is
- _zip64 |= zfe.Zip64;
-
- // Now add the entry to the archive
- _entries.Add(zfe);
- }
-
- // Now that the entries are populated, verify against the actual headers
- for (int i = 0; i < _entriesCount; i++)
- {
- zr = _entries[i].ReadHeader();
-
- // If we get any errors, close and return
- if (zr != ZipReturn.ZipGood)
- {
- Close();
- return zr;
- }
-
- // If we have a torrentzipped entry, make sure the archive is
- torrentZip &= _entries[i].TorrentZip;
- }
-
- // If we have a torrentzipped file, check the file order
- if (torrentZip)
- {
- for (int i = 0; i < _entriesCount - 1; i++)
- {
- if (TorrentZipStringCompare(_entries[i].FileName, _entries[i + 1].FileName) < 0)
- {
- continue;
- }
- torrentZip = false;
- break;
- }
- }
-
- // Now check for torrentzipped directories if we still have a torrentZip file
- if (torrentZip)
- {
- for (int i = 0; i < _entriesCount - 1; i++)
- {
- // See if we found a directory
- string filename0 = _entries[i].FileName;
- if (filename0.Substring(filename0.Length - 1, 1) != "/")
- {
- continue;
- }
-
- // See if the next file is in that directory
- string filename1 = _entries[i + 1].FileName;
- if (filename1.Length <= filename0.Length)
- {
- continue;
- }
- if (TorrentZipStringCompare(filename0, filename1.Substring(0, filename0.Length)) == 0)
- {
- continue;
- }
-
- // If we found a file in the directory, then we don't need the directory entry
- torrentZip = false;
- break;
- }
- }
-
- // If we still have torrentzip, say the archive is too
- if (torrentZip)
- {
- _zipStatus |= ZipStatus.TorrentZip;
- }
-
- return ZipReturn.ZipGood;
- }
- catch
- {
- Close();
- return ZipReturn.ZipErrorReadingFile;
- }
- }
-
- ///
- /// Create a new file as an archive
- ///
- /// Name of the new file to create
- /// Status of the underlying stream
- public ZipReturn Create(string filename)
- {
- // If the file is already open, return
- if (_zipOpen != ZipOpenType.Closed)
- {
- return ZipReturn.ZipFileAlreadyOpen;
- }
-
- // Otherwise, create the directory for the file
- Directory.CreateDirectory(Path.GetDirectoryName(filename));
- _zipFileInfo = new FileInfo(filename);
-
- // Now try to open the file
- _zipstream = File.Open(filename, System.IO.FileMode.OpenOrCreate, System.IO.FileAccess.ReadWrite);
- ZipOpen = ZipOpenType.OpenWrite;
- return ZipReturn.ZipGood;
- }
-
- ///
- /// Close the file that the stream refers to
- ///
- public void Close()
- {
- // If the stream is already closed, then just return
- if (_zipOpen == ZipOpenType.Closed)
- {
- return;
- }
-
- // If the stream is opened for read, close it
- if (_zipOpen == ZipOpenType.OpenRead)
- {
- Dispose();
- _zipOpen = ZipOpenType.Closed;
- return;
- }
-
- // Now, the only other choice is open for writing so we check everything is correct
- _zip64 = false;
- bool torrentZip = true;
-
- // Check the central directory
- _centerDirStart = (ulong)_zipstream.Position;
- if (_centerDirStart >= 0xffffffff)
- {
- _zip64 = true;
- }
-
- // Now loop through and add all of the central directory entries
- foreach (ZipFileEntry zfe in _entries)
- {
- zfe.WriteCentralDirectory(_zipstream);
- _zip64 |= zfe.Zip64;
- torrentZip &= zfe.TorrentZip;
- }
-
- _centerDirSize = (ulong)_zipstream.Position - _centerDirStart;
-
- // Then get the central directory hash
- OptimizedCRC ocrc = new OptimizedCRC();
- byte[] buffer = new byte[_centerDirSize];
- long currentPosition = _zipstream.Position;
- _zipstream.Position = (long)_centerDirStart;
-
- // Then read in the central directory and hash
- BinaryReader br = new BinaryReader(_zipstream);
- buffer = br.ReadBytes((int)_centerDirSize);
- ocrc.Update(buffer, 0, (int)_centerDirSize);
- string calculatedCrc = ocrc.Value.ToString("X8");
-
- // Finally get back to the original position
- _zipstream.Position = currentPosition;
-
- // Now set more of the information
- _fileComment = (torrentZip ? Encoding.ASCII.GetBytes(("TORRENTZIPPED-" + calculatedCrc).ToCharArray()) : new byte[0]);
- _zipStatus = (torrentZip ? ZipStatus.TorrentZip : ZipStatus.None);
-
- // If we have a Zip64 archive, write the correct information
- if (_zip64)
- {
- _endOfCenterDir64 = (ulong)_zipstream.Position;
- WriteZip64EndOfCentralDir();
- WriteZip64EndOfCentralDirectoryLocator();
- }
-
- // Now write out the end of the central directory
- WriteEndOfCentralDir();
-
- // Finally, close and dispose of the stream
- _zipstream.SetLength(_zipstream.Position);
- _zipstream.Flush();
- _zipstream.Close();
- _zipstream.Dispose();
-
- // Get the new file information
- _zipFileInfo = new FileInfo(_zipFileInfo.FullName);
-
- // And set the stream to closed
- _zipOpen = ZipOpenType.Closed;
- }
-
- ///
- /// Close a failed stream
- ///
- public void CloseFailed()
- {
- // If the stream is already closed, return
- if (_zipOpen == ZipOpenType.Closed)
- {
- return;
- }
-
- // If we're open for read, close the underlying stream
- if (_zipOpen == ZipOpenType.OpenRead)
- {
- Dispose();
- _zipOpen = ZipOpenType.Closed;
- return;
- }
-
- // Otherwise, we only have an open for write left
- _zipstream.Flush();
- _zipstream.Close();
- _zipstream.Dispose();
-
- // Delete the failed file
- Utilities.TryDeleteFile(_zipFileInfo.FullName);
- _zipFileInfo = null;
- _zipOpen = ZipOpenType.Closed;
- }
-
- #endregion
-
- #region Read and Write
-
- ///
- /// Open the read file stream
- ///
- /// Index of entry to read
- /// If compression mode is deflate, use the zipstream as is, otherwise decompress
- /// Output stream representing the correctly compressed stream
- /// Size of the stream regardless of compression
- /// Compression method to compare against
- /// Status of the underlying stream
- public ZipReturn OpenReadStream(int index, bool raw, out Stream stream, out ulong streamSize, out CompressionMethod compressionMethod, out uint lastMod)
- {
- // Set all of the defaults
- streamSize = 0;
- compressionMethod = CompressionMethod.Stored;
- lastMod = 0;
- _readIndex = index;
- stream = null;
-
- // If the file isn't open for read, return
- if (_zipOpen != ZipOpenType.OpenRead)
- {
- return ZipReturn.ZipReadingFromOutputFile;
- }
-
- // Now try to read the local file header
- ZipReturn zr = _entries[index].ReadHeader();
- if (zr != ZipReturn.ZipGood)
- {
- Close();
- return zr;
- }
-
- // Now return the results of opening the local file
- return _entries[index].OpenReadStream(raw, out stream, out streamSize, out compressionMethod, out lastMod);
- }
-
- ///
- /// Open the read file stream wihtout verification, if possible
- ///
- /// Index of entry to read
- /// If compression mode is deflate, use the zipstream as is, otherwise decompress
- /// Output stream representing the correctly compressed stream
- /// Size of the stream regardless of compression
- /// Compression method to compare against
- /// Status of the underlying stream
- public ZipReturn OpenReadStreamQuick(ulong pos, bool raw, out Stream stream, out ulong streamSize, out CompressionMethod compressionMethod, out uint lastMod)
- {
- // Get the temporary entry based on the defined position
- ZipFileEntry tempEntry = new ZipFileEntry(_zipstream);
- tempEntry.RelativeOffset = pos;
-
- // Clear the local files and add this file instead
- _entries.Clear();
- _entries.Add(tempEntry);
-
- // Now try to read the header quickly
- ZipReturn zr = tempEntry.ReadHeaderQuick();
- if (zr != ZipReturn.ZipGood)
- {
- stream = null;
- streamSize = 0;
- compressionMethod = CompressionMethod.Stored;
- lastMod = 0;
- return zr;
- }
- _readIndex = 0;
-
- // Return the file stream if it worked
- return tempEntry.OpenReadStream(raw, out stream, out streamSize, out compressionMethod, out lastMod);
- }
-
- ///
- /// Close the read file stream
- ///
- ///
- public ZipReturn CloseReadStream()
- {
- return _entries[_readIndex].CloseReadStream();
- }
-
- ///
- /// Open the write file stream
- ///
- /// If compression mode is deflate, use the zipstream as is, otherwise decompress
- /// True if outputted stream should be torrentzipped, false otherwise
- /// Uncompressed size of the stream
- /// Compression method to compare against
- /// Output stream representing the correctly compressed stream
- /// Status of the underlying stream
- public ZipReturn OpenWriteStream(bool raw, bool torrentZip, string filename, ulong uncompressedSize,
- CompressionMethod compressionMethod, out Stream stream, uint lastMod = Constants.TorrentZipFileDateTime)
- {
- // Check to see if the stream is writable
- stream = null;
- if (_zipOpen != ZipOpenType.OpenWrite)
- {
- return ZipReturn.ZipWritingToInputFile;
- }
-
- // Open the entry stream based on the current position
- ZipFileEntry zfe = new ZipFileEntry(_zipstream, filename, lastMod: lastMod);
- ZipReturn zr = zfe.OpenWriteStream(raw, torrentZip, uncompressedSize, compressionMethod, out stream);
- _entries.Add(zfe);
-
- return zr;
- }
-
- ///
- /// Close the write file stream
- ///
- /// CRC to assign to the current stream
- /// Status of the underlying stream
- public ZipReturn CloseWriteStream(uint crc32)
- {
- return _entries[_entries.Count - 1].CloseWriteStream(crc32);
- }
-
- ///
- /// Remove the last added entry, if possible
- ///
- /// Status of the underlying stream
- public ZipReturn RollBack()
- {
- // If the stream isn't writable, return
- if (_zipOpen != ZipOpenType.OpenWrite)
- {
- return ZipReturn.ZipWritingToInputFile;
- }
-
- // Otherwise, make sure there are entries to roll back
- int fileCount = _entries.Count;
- if (fileCount == 0)
- {
- return ZipReturn.ZipErrorRollBackFile;
- }
-
- // Get the last added entry and remove
- ZipFileEntry zfe = _entries[fileCount - 1];
- _entries.RemoveAt(fileCount - 1);
- _zipstream.Position = (long)zfe.RelativeOffset;
- return ZipReturn.ZipGood;
- }
-
- ///
- /// Add a directory marking to a local file
- ///
- public void AddDirectory()
- {
- _entries[_entries.Count - 1].AddDirectory();
- }
-
- #endregion
-
- #region Helpers
-
- ///
- /// Scan every individual entry for validity
- ///
- public void DeepScan()
- {
- foreach (ZipFileEntry zfe in _entries)
- {
- zfe.Check();
- }
- }
-
- ///
- /// Get the text associated with a return status
- ///
- /// ZipReturn status to parse
- /// String associated with the ZipReturn
- public static string ZipErrorMessageText(ZipReturn zr)
- {
- string ret = "Unknown";
- switch (zr)
- {
- case ZipReturn.ZipGood:
- ret = "";
- break;
- case ZipReturn.ZipFileCountError:
- ret = "The number of file in the Zip does not mach the number of files in the Zips Centeral Directory";
- break;
- case ZipReturn.ZipSignatureError:
- ret = "An unknown Signature Block was found in the Zip";
- break;
- case ZipReturn.ZipExtraDataOnEndOfZip:
- ret = "Extra Data was found on the end of the Zip";
- break;
- case ZipReturn.ZipUnsupportedCompression:
- ret = "An unsupported Compression method was found in the Zip, if you recompress this zip it will be usable";
- break;
- case ZipReturn.ZipLocalFileHeaderError:
- ret = "Error reading a zipped file header information";
- break;
- case ZipReturn.ZipCentralDirError:
- ret = "There is an error in the Zip Centeral Directory";
- break;
- case ZipReturn.ZipReadingFromOutputFile:
- ret = "Trying to write to a Zip file open for output only";
- break;
- case ZipReturn.ZipWritingToInputFile:
- ret = "Tring to read from a Zip file open for input only";
- break;
- case ZipReturn.ZipErrorGettingDataStream:
- ret = "Error creating Data Stream";
- break;
- case ZipReturn.ZipCRCDecodeError:
- ret = "CRC error";
- break;
- case ZipReturn.ZipDecodeError:
- ret = "Error unzipping a file";
- break;
- }
-
- return ret;
- }
-
- ///
- /// Compare two strings in TorrentZip format
- ///
- ///
- ///
- ///
- public static int TorrentZipStringCompare(string string1, string string2)
- {
- char[] bytes1 = string1.ToCharArray();
- char[] bytes2 = string2.ToCharArray();
-
- int pos1 = 0;
- int pos2 = 0;
-
- for (;;)
- {
- if (pos1 == bytes1.Length)
- {
- return ((pos2 == bytes2.Length) ? 0 : -1);
- }
- if (pos2 == bytes2.Length)
- {
- return 1;
- }
-
- int byte1 = bytes1[pos1++];
- int byte2 = bytes2[pos2++];
-
- if (byte1 >= 65 && byte1 <= 90)
- {
- byte1 += 0x20;
- }
- if (byte2 >= 65 && byte2 <= 90)
- {
- byte2 += 0x20;
- }
-
- if (byte1 < byte2)
- {
- return -1;
- }
- if (byte1 > byte2)
- {
- return 1;
- }
- }
- }
-
- #endregion
- }
-}
diff --git a/SabreTools.Library/External/SupportedFiles/ZipFileEntry.cs b/SabreTools.Library/External/SupportedFiles/ZipFileEntry.cs
deleted file mode 100644
index 19d79322..00000000
--- a/SabreTools.Library/External/SupportedFiles/ZipFileEntry.cs
+++ /dev/null
@@ -1,1046 +0,0 @@
-using System;
-using System.Collections.Generic;
-using System.IO;
-using System.Security.Cryptography;
-using System.Text;
-
-using SabreTools.Library.Data;
-using SabreTools.Library.Tools;
-
-using Ionic.Zlib;
-
-namespace ROMVault2.SupportedFiles.Zip
-{
- ///
- /// Based on work by GordonJ for RomVault
- /// https://github.com/gjefferyes/RomVault/blob/master/ROMVault2/SupportedFiles/Zip/zipFile.cs
- ///
- public class ZipFileEntry : IEquatable
- {
- #region Private instance variables
-
- private readonly Stream _zipstream;
- private Stream _readStream;
- private Stream _writeStream;
- private string _fileName;
- private CompressionMethod _compressionMethod;
- private ArchiveVersion _versionMadeBy;
- private ArchiveVersion _versionNeeded;
- private GeneralPurposeBitFlag _generalPurposeBitFlag;
- private uint _lastMod;
- private uint _crc;
- private ulong _compressedSize;
- private ulong _uncompressedSize;
- private byte[] _extraField;
- private byte[] _comment;
- private InternalFileAttributes _internalFileAttributes;
- private uint _externalFileAttributes;
- private ulong _relativeOffset;
- private ulong _crc32Location;
- private ulong _extraLocation;
- private ulong _dataLocation;
- private bool _zip64;
- private bool _torrentZip;
- private byte[] _md5;
- private byte[] _sha1;
- private ZipReturn _fileStatus = ZipReturn.ZipUntested;
-
- #endregion
-
- #region Public facing variables
-
- public string FileName
- {
- get { return _fileName; }
- private set { _fileName = value; }
- }
- public GeneralPurposeBitFlag GeneralPurposeBitFlag
- {
- get { return _generalPurposeBitFlag; }
- private set { _generalPurposeBitFlag = value; }
- }
- public uint LastMod
- {
- get { return _lastMod; }
- set { _lastMod = value; }
- }
- public byte[] CRC
- {
- get { return BitConverter.GetBytes(_crc); }
- private set { _crc = BitConverter.ToUInt32(value, 0); }
- }
- public ulong UncompressedSize
- {
- get { return _uncompressedSize; }
- private set { _uncompressedSize = value; }
- }
- public string ExtraField
- {
- get { return Encoding.GetEncoding(858).GetString(_extraField); }
- set { _extraField = Utilities.StringToByteArray(Utilities.ConvertAsciiToHex(value)); }
- }
- public string Comment
- {
- get { return Encoding.GetEncoding(858).GetString(_comment); }
- set { _comment = Utilities.StringToByteArray(Utilities.ConvertAsciiToHex(value)); }
- }
- public ulong RelativeOffset
- {
- get { return _relativeOffset; }
- set { _relativeOffset = value; }
- }
- public bool Zip64
- {
- get { return _zip64; }
- private set { _zip64 = value; }
- }
- public bool TorrentZip
- {
- get { return _torrentZip; }
- private set { _torrentZip = value; }
- }
- public byte[] MD5
- {
- get { return _md5; }
- private set { _md5 = value; }
- }
- public byte[] SHA1
- {
- get { return _sha1; }
- private set { _sha1 = value; }
- }
- public ZipReturn FileStatus
- {
- get { return _fileStatus; }
- set { _fileStatus = value; }
- }
-
- #endregion
-
- #region Constructors
-
- ///
- /// Create a new ZipFileEntry using just a stream
- ///
- /// Stream representing the entry
- public ZipFileEntry(Stream zipstream)
- {
- _zipstream = zipstream;
- }
-
- ///
- /// Create a new ZipFileEntry from a stream and a filename
- ///
- /// Stream representing the entry
- /// Internal filename to use
- public ZipFileEntry(Stream zipstream, string filename, uint lastMod = Constants.TorrentZipFileDateTime)
- {
- _zip64 = false;
- _zipstream = zipstream;
- _generalPurposeBitFlag = GeneralPurposeBitFlag.DeflatingMaximumCompression;
- _compressionMethod = CompressionMethod.Deflated;
- _lastMod = lastMod;
-
- FileName = filename;
- }
-
- #endregion
-
- #region Central Directory
-
- ///
- /// Read the central directory entry from the input stream
- ///
- /// Status of the underlying stream
- public ZipReturn ReadCentralDirectory()
- {
- try
- {
- // Open the stream for reading
- BinaryReader br = new BinaryReader(_zipstream);
-
- // If the first bytes aren't a central directory header, log and return
- if (br.ReadUInt32() != Constants.CentralDirectoryHeaderSignature)
- {
- return ZipReturn.ZipCentralDirError;
- }
-
- // Now read in available information, skipping the unnecessary
- _versionMadeBy = (ArchiveVersion)br.ReadUInt16();
- _versionNeeded = (ArchiveVersion)br.ReadUInt16();
- _generalPurposeBitFlag = (GeneralPurposeBitFlag)br.ReadUInt16();
- _compressionMethod = (CompressionMethod)br.ReadUInt16();
-
- // If we have an unsupported compression method, log and return
- if (_compressionMethod != CompressionMethod.Stored && _compressionMethod != CompressionMethod.Deflated)
- {
- return ZipReturn.ZipCentralDirError;
- }
-
- // Keep reading available information, skipping the unnecessary
- _lastMod = br.ReadUInt32();
- _crc = br.ReadUInt32();
- _compressedSize = br.ReadUInt32();
- _uncompressedSize = br.ReadUInt32();
-
- // Now store some temp vars to find the filename, extra field, and comment
- ushort fileNameLength = br.ReadUInt16();
- ushort extraFieldLength = br.ReadUInt16();
- ushort fileCommentLength = br.ReadUInt16();
-
- // Even more reading available information, skipping the unnecessary
- br.ReadUInt16(); // Disk number start
- _internalFileAttributes = (InternalFileAttributes)br.ReadUInt16();
- _externalFileAttributes = br.ReadUInt32();
- _relativeOffset = br.ReadUInt32();
- byte[] fileNameBytes = br.ReadBytes(fileNameLength);
- _fileName = ((_generalPurposeBitFlag & GeneralPurposeBitFlag.LanguageEncodingFlag) == 0
- ? Encoding.GetEncoding(858).GetString(fileNameBytes)
- : Encoding.UTF8.GetString(fileNameBytes, 0, fileNameLength));
- _extraField = br.ReadBytes(extraFieldLength);
- _comment = br.ReadBytes(fileCommentLength);
-
- /*
- Full disclosure: this next section is in GordonJ's work but I honestly
- have no idea everything that it does. It seems to do something to figure
- out if it's Zip64, or possibly check for random things but it uses the
- extra field for this, which I do not fully understand. It's copied in
- its entirety below in the hope that it makes things better...
- */
-
- int pos = 0;
- while (extraFieldLength > pos)
- {
- ushort type = BitConverter.ToUInt16(_extraField, pos);
- pos += 2;
- ushort blockLength = BitConverter.ToUInt16(_extraField, pos);
- pos += 2;
- switch (type)
- {
- case 0x0001:
- Zip64 = true;
- if (UncompressedSize == 0xffffffff)
- {
- UncompressedSize = BitConverter.ToUInt64(_extraField, pos);
- pos += 8;
- }
- if (_compressedSize == 0xffffffff)
- {
- _compressedSize = BitConverter.ToUInt64(_extraField, pos);
- pos += 8;
- }
- if (_relativeOffset == 0xffffffff)
- {
- _relativeOffset = BitConverter.ToUInt64(_extraField, pos);
- pos += 8;
- }
- break;
- case 0x7075:
- //byte version = extraField[pos];
- pos += 1;
- uint nameCRC32 = BitConverter.ToUInt32(_extraField, pos);
- pos += 4;
-
- CRC32 crcTest = new CRC32();
- crcTest.SlurpBlock(fileNameBytes, 0, fileNameLength);
- uint fCRC = (uint)crcTest.Crc32Result;
-
- if (nameCRC32 != fCRC)
- {
- return ZipReturn.ZipCentralDirError;
- }
-
- int charLen = blockLength - 5;
-
- _fileName = Encoding.UTF8.GetString(_extraField, pos, charLen);
- pos += charLen;
-
- break;
- default:
- pos += blockLength;
- break;
- }
- }
- }
- catch
- {
- return ZipReturn.ZipCentralDirError;
- }
-
- return ZipReturn.ZipGood;
- }
-
- ///
- /// Write the central directory entry from the included stream
- ///
- /// Write out the data from the internal stream to the output stream
- public void WriteCentralDirectory(Stream output)
- {
- // Open the output stream for writing
- BinaryWriter bw = new BinaryWriter(output);
-
- // Create an empty extra field to start out with
- List extraField = new List();
-
- // Now get the uncompressed size (for Zip64 compatibility)
- uint uncompressedSize32;
- if (_uncompressedSize >= 0xffffffff)
- {
- _zip64 = true;
- uncompressedSize32 = 0xffffffff;
- extraField.AddRange(BitConverter.GetBytes(_uncompressedSize));
- }
- else
- {
- uncompressedSize32 = (uint)_uncompressedSize;
- }
-
- // Now get the compressed size (for Zip64 compatibility)
- uint compressedSize32;
- if (_compressedSize >= 0xffffffff)
- {
- _zip64 = true;
- compressedSize32 = 0xffffffff;
- extraField.AddRange(BitConverter.GetBytes(_compressedSize));
- }
- else
- {
- compressedSize32 = (uint)_compressedSize;
- }
-
- // Now get the relative offset (for Zip64 compatibility)
- uint relativeOffset32;
- if (_relativeOffset >= 0xffffffff)
- {
- _zip64 = true;
- relativeOffset32 = 0xffffffff;
- extraField.AddRange(BitConverter.GetBytes(_relativeOffset));
- }
- else
- {
- relativeOffset32 = (uint)_relativeOffset;
- }
-
- // If we wrote anything to the extra field, set the flag and size
- if (extraField.Count > 0)
- {
- ushort extraFieldLengthInternal = (ushort)extraField.Count;
- extraField.InsertRange(0, BitConverter.GetBytes((ushort)0x0001)); // id
- extraField.InsertRange(2, BitConverter.GetBytes(extraFieldLengthInternal)); // data length
- }
- ushort extraFieldLength = (ushort)extraField.Count;
-
- // Now check for a unicode filename and set the flag accordingly
- byte[] fileNameBytes;
- if (Utilities.IsUnicode(_fileName))
- {
- _generalPurposeBitFlag |= GeneralPurposeBitFlag.LanguageEncodingFlag;
- fileNameBytes = Encoding.UTF8.GetBytes(_fileName);
- }
- else
- {
- fileNameBytes = Encoding.GetEncoding(858).GetBytes(_fileName);
- }
- ushort fileNameLength = (ushort)fileNameBytes.Length;
-
- // Set the version needed to extract according to if it's Zip64
- ushort versionNeededToExtract = (ushort)(_zip64 ? ArchiveVersion.TorrentZip64 : ArchiveVersion.TorrentZip);
-
- // Now, write all of the data to the stream
- bw.Write(Constants.CentralDirectoryHeaderSignature);
- bw.Write((ushort)ArchiveVersion.MSDOSandOS2);
- bw.Write(versionNeededToExtract);
- bw.Write((ushort)_generalPurposeBitFlag);
- bw.Write((ushort)_compressionMethod);
- bw.Write(_lastMod);
- bw.Write(_crc);
- bw.Write(compressedSize32);
- bw.Write(uncompressedSize32);
- bw.Write(fileNameLength);
- bw.Write(extraFieldLength);
- bw.Write((ushort)0); // File comment length
- bw.Write((ushort)0); // Disk number start
- bw.Write((ushort)0); // Internal file attributes
- bw.Write((uint)0); // External file attributes
- bw.Write(relativeOffset32);
- bw.Write(fileNameBytes, 0, fileNameLength); // Only write first bytes if longer than allowed
- bw.Write(extraField.ToArray(), 0, extraFieldLength); // Only write the first bytes if longer than allowed
- // We have no file comment, so we don't have to write more
- }
-
- #endregion
-
- #region Header
-
- ///
- /// Read the local file header from the input stream
- ///
- /// Status of the underlying stream
- public ZipReturn ReadHeader()
- {
- try
- {
- // We assume that the file is torrentzip until proven otherwise
- _torrentZip = true;
-
- // Open the stream for reading
- BinaryReader br = new BinaryReader(_zipstream);
-
- // Set the position of the writer based on the entry information
- br.BaseStream.Seek((long)_relativeOffset, SeekOrigin.Begin);
-
- // If the first bytes aren't a local file header, log and return
- if (br.ReadUInt32() != Constants.LocalFileHeaderSignature)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
-
- // Now read in available information, comparing to the known data
- if (br.ReadUInt16() != (ushort)_versionNeeded)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- if (br.ReadUInt16() != (ushort)_generalPurposeBitFlag)
- {
- _torrentZip = false;
- }
- if (br.ReadUInt16() != (ushort)_compressionMethod)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- if (br.ReadUInt32() != _lastMod)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- if ((_generalPurposeBitFlag & GeneralPurposeBitFlag.ZeroedCRCAndSize) == 0 && br.ReadUInt32() != _crc)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
-
- uint readCompressedSize = br.ReadUInt32();
- // If we have Zip64, the compressed size should be 0xffffffff
- if (_zip64 && readCompressedSize != 0xffffffff && readCompressedSize != _compressedSize)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- // If we have the zeroed flag set, then no size should be included
- if ((_generalPurposeBitFlag & GeneralPurposeBitFlag.ZeroedCRCAndSize) == GeneralPurposeBitFlag.ZeroedCRCAndSize && readCompressedSize != 0)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- // If we don't have the zeroed flag set, then the size should match
- if (!_zip64 && (_generalPurposeBitFlag & GeneralPurposeBitFlag.ZeroedCRCAndSize) == 0 && readCompressedSize != _compressedSize)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
-
- uint readUncompressedSize = br.ReadUInt32();
- // If we have Zip64, the uncompressed size should be 0xffffffff
- if (_zip64 && readUncompressedSize != 0xffffffff && readUncompressedSize != _compressedSize)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- // If we have the zeroed flag set, then no size should be included
- if ((_generalPurposeBitFlag & GeneralPurposeBitFlag.ZeroedCRCAndSize) == GeneralPurposeBitFlag.ZeroedCRCAndSize && readUncompressedSize != 0)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- // If we don't have the zeroed flag set, then the size should match
- if (!_zip64 && (_generalPurposeBitFlag & GeneralPurposeBitFlag.ZeroedCRCAndSize) == 0 && readUncompressedSize != _uncompressedSize)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
-
- ushort fileNameLength = br.ReadUInt16();
- ushort extraFieldLength = br.ReadUInt16();
-
- byte[] fileNameBytes = br.ReadBytes(fileNameLength);
- string tempFileName = ((_generalPurposeBitFlag & GeneralPurposeBitFlag.LanguageEncodingFlag) == 0
- ? Encoding.GetEncoding(858).GetString(fileNameBytes)
- : Encoding.UTF8.GetString(fileNameBytes, 0, fileNameLength));
-
- byte[] extraField = br.ReadBytes(extraFieldLength);
-
- /*
- Full disclosure: this next section is in GordonJ's work but I honestly
- have no idea everything that it does. It seems to do something to figure
- out if it's Zip64, or possibly check for random things but it uses the
- extra field for this, which I do not fully understand. It's copied in
- its entirety below in the hope that it makes things better...
- */
-
- _zip64 = false;
- int pos = 0;
- while (extraFieldLength > pos)
- {
- ushort type = BitConverter.ToUInt16(extraField, pos);
- pos += 2;
- ushort blockLength = BitConverter.ToUInt16(extraField, pos);
- pos += 2;
- switch (type)
- {
- case 0x0001:
- Zip64 = true;
- if (readUncompressedSize == 0xffffffff)
- {
- ulong tLong = BitConverter.ToUInt64(extraField, pos);
- if (tLong != UncompressedSize)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- pos += 8;
- }
- if (readCompressedSize == 0xffffffff)
- {
- ulong tLong = BitConverter.ToUInt64(extraField, pos);
- if (tLong != _compressedSize)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- pos += 8;
- }
- break;
- case 0x7075:
- //byte version = extraField[pos];
- pos += 1;
- uint nameCRC32 = BitConverter.ToUInt32(extraField, pos);
- pos += 4;
-
- CRC32 crcTest = new CRC32();
- crcTest.SlurpBlock(fileNameBytes, 0, fileNameLength);
- uint fCRC = (uint)crcTest.Crc32Result;
-
- if (nameCRC32 != fCRC)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
-
- int charLen = blockLength - 5;
-
- tempFileName = Encoding.UTF8.GetString(extraField, pos, charLen);
- pos += charLen;
-
- break;
- default:
- pos += blockLength;
- break;
- }
- }
-
- // Back to code I understand
- if (!String.Equals(_fileName, tempFileName, StringComparison.InvariantCulture))
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
-
- // Set the position of the data
- _dataLocation = (ulong)_zipstream.Position;
-
- // Now if no other data should be after the data, return
- if((_generalPurposeBitFlag & GeneralPurposeBitFlag.ZeroedCRCAndSize) == 0)
- {
- return ZipReturn.ZipGood;
- }
-
- // Otherwise, compare the data after the file too
- _zipstream.Position += (long)_compressedSize;
-
- // If there's no subheader, read the next thing as crc
- uint tempCrc = br.ReadUInt32();
- if (tempCrc != Constants.EndOfLocalFileHeaderSignature)
- {
- tempCrc = br.ReadUInt32();
- }
-
- if (tempCrc != _crc)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- if (br.ReadUInt32() != _compressedSize)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- if (br.ReadUInt32() != _uncompressedSize)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
- }
- catch
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
-
- return ZipReturn.ZipGood;
- }
-
- ///
- /// Read the local file header from the input stream, assuming correctness
- ///
- /// Status of the underlying stream
- public ZipReturn ReadHeaderQuick()
- {
- try
- {
- // We assume that the file is torrentzip until proven otherwise
- _torrentZip = true;
-
- // Open the stream for reading
- BinaryReader br = new BinaryReader(_zipstream);
-
- // Set the position of the writer based on the entry information
- br.BaseStream.Seek((long)_relativeOffset, SeekOrigin.Begin);
-
- // If the first bytes aren't a local file header, log and return
- if (br.ReadUInt32() != Constants.LocalFileHeaderSignature)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
-
- // Now read in available information, ignoring unneeded
- _versionNeeded = (ArchiveVersion)br.ReadUInt16();
- _generalPurposeBitFlag = (GeneralPurposeBitFlag)br.ReadUInt16();
-
- // If the flag says there's no hash data, then we can't use quick mode
- if ((_generalPurposeBitFlag & GeneralPurposeBitFlag.ZeroedCRCAndSize) == GeneralPurposeBitFlag.ZeroedCRCAndSize)
- {
- return ZipReturn.ZipCannotFastOpen;
- }
-
- _compressionMethod = (CompressionMethod)br.ReadUInt16();
- _lastMod = br.ReadUInt32();
- _crc = br.ReadUInt32();
- _compressedSize = br.ReadUInt32();
- _uncompressedSize = br.ReadUInt32();
-
- ushort fileNameLength = br.ReadUInt16();
- ushort extraFieldLength = br.ReadUInt16();
-
- byte[] fileNameBytes = br.ReadBytes(fileNameLength);
- _fileName = ((_generalPurposeBitFlag & GeneralPurposeBitFlag.LanguageEncodingFlag) == 0
- ? Encoding.GetEncoding(858).GetString(fileNameBytes)
- : Encoding.UTF8.GetString(fileNameBytes, 0, fileNameLength));
-
- byte[] extraField = br.ReadBytes(extraFieldLength);
-
- /*
- Full disclosure: this next section is in GordonJ's work but I honestly
- have no idea everything that it does. It seems to do something to figure
- out if it's Zip64, or possibly check for random things but it uses the
- extra field for this, which I do not fully understand. It's copied in
- its entirety below in the hope that it makes things better...
- */
-
- _zip64 = false;
- int pos = 0;
- while (extraFieldLength > pos)
- {
- ushort type = BitConverter.ToUInt16(extraField, pos);
- pos += 2;
- ushort blockLength = BitConverter.ToUInt16(extraField, pos);
- pos += 2;
- switch (type)
- {
- case 0x0001:
- Zip64 = true;
- if (_uncompressedSize == 0xffffffff)
- {
- _uncompressedSize = BitConverter.ToUInt64(extraField, pos);
- pos += 8;
- }
- if (_compressedSize == 0xffffffff)
- {
- _compressedSize = BitConverter.ToUInt64(extraField, pos);
- pos += 8;
- }
- break;
- case 0x7075:
- pos += 1;
- uint nameCRC32 = BitConverter.ToUInt32(extraField, pos);
- pos += 4;
-
- CRC32 crcTest = new CRC32();
- crcTest.SlurpBlock(fileNameBytes, 0, fileNameLength);
- uint fCRC = (uint)crcTest.Crc32Result;
-
- if (nameCRC32 != fCRC)
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
-
- int charLen = blockLength - 5;
-
- FileName = Encoding.UTF8.GetString(extraField, pos, charLen);
-
- pos += charLen;
-
- break;
- default:
- pos += blockLength;
- break;
- }
- }
-
- // Set the position of the data
- _dataLocation = (ulong)_zipstream.Position;
- }
- catch
- {
- return ZipReturn.ZipLocalFileHeaderError;
- }
-
- return ZipReturn.ZipGood;
- }
-
- ///
- /// Write the local file header entry to the included stream
- ///
- public void WriteHeader()
- {
- // Open the stream for writing
- BinaryWriter bw = new BinaryWriter(_zipstream);
-
- // Create an empty extra field to start out with
- List extraField = new List();
-
- // Figure out if we're in Zip64 based on the size
- _zip64 = _uncompressedSize >= 0xffffffff;
-
- // Now check for a unicode filename and set the flag accordingly
- byte[] fileNameBytes;
- if (Utilities.IsUnicode(_fileName))
- {
- _generalPurposeBitFlag |= GeneralPurposeBitFlag.LanguageEncodingFlag;
- fileNameBytes = Encoding.UTF8.GetBytes(_fileName);
- }
- else
- {
- fileNameBytes = Encoding.GetEncoding(858).GetBytes(_fileName);
- }
-
- // Set the version needed to extract according to if it's Zip64
- ushort versionNeededToExtract = (ushort)(_zip64 ? ArchiveVersion.TorrentZip64 : ArchiveVersion.TorrentZip);
-
- // Now save the relative offset and write
- _relativeOffset = (ulong)_zipstream.Position;
- bw.Write(Constants.LocalFileHeaderSignature);
- bw.Write(versionNeededToExtract);
- bw.Write((ushort)_generalPurposeBitFlag);
- bw.Write((ushort)_compressionMethod);
- bw.Write(_lastMod);
-
- _crc32Location = (ulong)_zipstream.Position;
-
- // Now, write dummy bytes for crc, compressed size, and uncompressed size
- bw.Write(0xffffffff);
- bw.Write(0xffffffff);
- bw.Write(0xffffffff);
-
- // If we have Zip64, add the right things to the extra field
- if (_zip64)
- {
- for (int i = 0; i < 20; i++)
- {
- extraField.Add(0);
- }
- }
-
- // Write out the lengths and their associated fields
- ushort fileNameLength = (ushort)fileNameBytes.Length;
- bw.Write(fileNameLength);
-
- ushort extraFieldLength = (ushort)extraField.Count;
- bw.Write(extraFieldLength);
-
- bw.Write(fileNameBytes, 0, fileNameLength);
-
- _extraLocation = (ulong)_zipstream.Position;
- bw.Write(extraField.ToArray(), 0, extraFieldLength);
- }
-
- #endregion
-
- #region Read and Write
-
- ///
- /// Open the read file stream
- ///
- /// If compression mode is deflate, use the zipstream as is, otherwise decompress
- /// Output stream representing the correctly compressed stream
- /// Size of the stream regardless of compression
- /// Compression method to compare against
- /// Status of the underlying stream
- public ZipReturn OpenReadStream(bool raw, out Stream stream, out ulong streamSize, out CompressionMethod compressionMethod, out uint lastMod)
- {
- streamSize = 0;
- compressionMethod = _compressionMethod;
- lastMod = _lastMod;
-
- _readStream = null;
- _zipstream.Seek((long)_dataLocation, SeekOrigin.Begin);
-
- switch (_compressionMethod)
- {
- case CompressionMethod.Deflated:
- if (raw)
- {
- _readStream = _zipstream;
- streamSize = _compressedSize;
- }
- else
- {
- _readStream = new DeflateStream(_zipstream, CompressionMode.Decompress, true);
- streamSize = _uncompressedSize;
- }
- break;
- case CompressionMethod.Stored:
- _readStream = _zipstream;
- streamSize = _compressedSize;
- break;
- }
- stream = _readStream;
- return (stream == null ? ZipReturn.ZipErrorGettingDataStream : ZipReturn.ZipGood);
- }
-
- ///
- /// Close the read file stream
- ///
- /// Status of the underlying stream
- public ZipReturn CloseReadStream()
- {
- DeflateStream dfStream = _readStream as DeflateStream;
- if (dfStream != null)
- {
- dfStream.Close();
- dfStream.Dispose();
- }
- else
- {
- FileStream fsStream = _readStream as FileStream;
- if (fsStream != null)
- {
- fsStream.Close();
- fsStream.Dispose();
- }
- }
- return ZipReturn.ZipGood;
- }
-
- ///
- /// Open the write file stream
- ///
- /// If compression mode is deflate, use the zipstream as is, otherwise decompress
- /// True if outputted stream should be torrentzipped, false otherwise
- /// Uncompressed size of the stream
- /// Compression method to compare against
- /// Output stream representing the correctly compressed stream
- /// True if the file should use the TorrentZip date (default), false otherwise
- /// Status of the underlying stream
- public ZipReturn OpenWriteStream(bool raw, bool torrentZip, ulong uncompressedSize, CompressionMethod compressionMethod, out Stream stream)
- {
- _uncompressedSize = uncompressedSize;
- _compressionMethod = compressionMethod;
-
- WriteHeader();
- _dataLocation = (ulong)_zipstream.Position;
-
- if (raw)
- {
- _writeStream = _zipstream;
- _torrentZip = torrentZip;
- }
- else
- {
- if (compressionMethod == CompressionMethod.Stored)
- {
- _writeStream = _zipstream;
- _torrentZip = false;
- }
- else
- {
- _writeStream = new DeflateStream(_zipstream, CompressionMode.Compress, CompressionLevel.BestCompression, true);
- _torrentZip = true;
- }
- }
-
- stream = _writeStream;
- return (stream == null ? ZipReturn.ZipErrorGettingDataStream : ZipReturn.ZipGood);
- }
-
- ///
- /// Close the write file stream
- ///
- /// CRC to assign to the current stream
- /// Status of the underlying stream
- public ZipReturn CloseWriteStream(uint crc32)
- {
- DeflateStream dfStream = _writeStream as DeflateStream;
- if (dfStream != null)
- {
- dfStream.Flush();
- dfStream.Close();
- dfStream.Dispose();
- }
-
- _compressedSize = (ulong)_zipstream.Position - _dataLocation;
-
- if (_compressedSize == 0 && _uncompressedSize == 0)
- {
- AddDirectory();
- _compressedSize = (ulong)_zipstream.Position - _dataLocation;
- }
-
- _crc = crc32;
- WriteCompressedSize();
-
- return ZipReturn.ZipGood;
- }
-
- ///
- /// Write out the compressed size of the stream
- ///
- private void WriteCompressedSize()
- {
- // Save the current position before seeking
- long posNow = _zipstream.Position;
- _zipstream.Seek((long)_crc32Location, SeekOrigin.Begin);
-
- // Open the stream for writing
- BinaryWriter bw = new BinaryWriter(_zipstream);
-
- // Get the 32-bit compatible sizes
- uint compressedSize32;
- uint uncompressedSize32;
- if (_zip64)
- {
- compressedSize32 = 0xffffffff;
- uncompressedSize32 = 0xffffffff;
- }
- else
- {
- compressedSize32 = (uint)_compressedSize;
- uncompressedSize32 = (uint)_uncompressedSize;
- }
-
- // Now write the data
- bw.Write(_crc);
- bw.Write(compressedSize32);
- bw.Write(uncompressedSize32);
-
- // If we have Zip64, write additional data
- if (_zip64)
- {
- _zipstream.Seek((long)_extraLocation, SeekOrigin.Begin);
- bw.Write((ushort)0x0001); // id
- bw.Write((ushort)16); // data length
- bw.Write(_uncompressedSize);
- bw.Write(_compressedSize);
- }
-
- // Now seek back to the original position
- _zipstream.Seek(posNow, SeekOrigin.Begin);
- }
-
- #endregion
-
- #region Helpers
-
- ///
- /// Get the data from the current file, if not already checked
- ///
- public void Check()
- {
- // If the file has been tested or has an error, return
- if (_fileStatus != ZipReturn.ZipUntested)
- {
- return;
- }
-
- try
- {
- Stream stream = null;
- _zipstream.Seek((long)_dataLocation, SeekOrigin.Begin);
-
- switch (_compressionMethod)
- {
- case CompressionMethod.Deflated:
- stream = new DeflateStream(_zipstream, CompressionMode.Decompress, true);
- break;
- case CompressionMethod.Stored:
- stream = _zipstream;
- break;
- }
-
- if (stream == null)
- {
- _fileStatus = ZipReturn.ZipErrorGettingDataStream;
- return;
- }
-
- // Create the hashers
- uint tempCrc;
- OptimizedCRC crc = new OptimizedCRC();
- MD5 md5 = System.Security.Cryptography.MD5.Create();
- SHA1 sha1 = System.Security.Cryptography.SHA1.Create();
-
- // Now get the hash of the stream
- BinaryReader fs = new BinaryReader(stream);
-
- byte[] buffer = new byte[1024];
- int read;
- while ((read = fs.Read(buffer, 0, buffer.Length)) > 0)
- {
- crc.Update(buffer, 0, read);
- md5.TransformBlock(buffer, 0, read, buffer, 0);
- sha1.TransformBlock(buffer, 0, read, buffer, 0);
- }
-
- crc.Update(buffer, 0, 0);
- md5.TransformFinalBlock(buffer, 0, 0);
- sha1.TransformFinalBlock(buffer, 0, 0);
-
- tempCrc = crc.UnsignedValue;
- _md5 = md5.Hash;
- _sha1 = sha1.Hash;
-
- // Dispose of the hashers
- crc.Dispose();
- md5.Dispose();
- sha1.Dispose();
-
- if (_compressionMethod == CompressionMethod.Deflated)
- {
- stream.Close();
- stream.Dispose();
- }
-
- _fileStatus = (_crc == tempCrc ? ZipReturn.ZipGood : ZipReturn.ZipCRCDecodeError);
- }
- catch
- {
- _fileStatus = ZipReturn.ZipDecodeError;
- }
- }
-
- ///
- /// Add a directory marking to a local file
- ///
- public void AddDirectory()
- {
- Stream ds = _zipstream;
- ds.WriteByte(03);
- ds.WriteByte(00);
- }
-
- ///
- /// Check if an entry equals another (use only name for now)
- ///
- ///
- ///
- public bool Equals(ZipFileEntry zfe)
- {
- return (String.Equals(_fileName, zfe.FileName, StringComparison.InvariantCultureIgnoreCase));
- }
-
- #endregion
- }
-}
diff --git a/SabreTools.Library/External/Zlib/CRC32.cs b/SabreTools.Library/External/Zlib/CRC32.cs
deleted file mode 100644
index 55e3c88d..00000000
--- a/SabreTools.Library/External/Zlib/CRC32.cs
+++ /dev/null
@@ -1,878 +0,0 @@
-// CRC32.cs
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2011 Dino Chiesa.
-// All rights reserved.
-//
-// This code module is part of DotNetZip, a zipfile class library.
-//
-// ------------------------------------------------------------------
-//
-// This code is licensed under the Microsoft Public License.
-// See the file License.txt for the license details.
-// More info on: http://dotnetzip.codeplex.com
-//
-// ------------------------------------------------------------------
-//
-// Last Saved: <2011-August-02 18:25:54>
-//
-// ------------------------------------------------------------------
-//
-// This module defines the CRC32 class, which can do the CRC32 algorithm, using
-// arbitrary starting polynomials, and bit reversal. The bit reversal is what
-// distinguishes this CRC-32 used in BZip2 from the CRC-32 that is used in PKZIP
-// files, or GZIP files. This class does both.
-//
-// ------------------------------------------------------------------
-
-using System;
-using System.Security.Cryptography;
-using System.Runtime.InteropServices;
-
-namespace Ionic.Zlib
-{
- ///
- /// Computes a CRC-32. The CRC-32 algorithm is parameterized - you
- /// can set the polynomial and enable or disable bit
- /// reversal. This can be used for GZIP, BZip2, or ZIP.
- ///
- ///
- /// This type is used internally by DotNetZip; it is generally not used
- /// directly by applications wishing to create, read, or manipulate zip
- /// archive files.
- ///
-
- [Guid("ebc25cf6-9120-4283-b972-0e5520d0000C")]
- [System.Runtime.InteropServices.ComVisible(true)]
-#if !NETCF
- [System.Runtime.InteropServices.ClassInterface(System.Runtime.InteropServices.ClassInterfaceType.AutoDispatch)]
-#endif
- public class CRC32
- {
- ///
- /// Indicates the total number of bytes applied to the CRC.
- ///
- public Int64 TotalBytesRead
- {
- get
- {
- return _TotalBytesRead;
- }
- }
-
- ///
- /// Indicates the current CRC for all blocks slurped in.
- ///
- public Int32 Crc32Result
- {
- get
- {
- return unchecked((Int32)(~_register));
- }
- }
- public uint Crc32ResultU
- {
- get
- {
- return ~_register;
- }
- }
-
-
- ///
- /// Returns the CRC32 for the specified stream.
- ///
- /// The stream over which to calculate the CRC32
- /// the CRC32 calculation
- public Int32 GetCrc32(System.IO.Stream input)
- {
- return GetCrc32AndCopy(input, null);
- }
-
- ///
- /// Returns the CRC32 for the specified stream, and writes the input into the
- /// output stream.
- ///
- /// The stream over which to calculate the CRC32
- /// The stream into which to deflate the input
- /// the CRC32 calculation
- public Int32 GetCrc32AndCopy(System.IO.Stream input, System.IO.Stream output)
- {
- if (input == null)
- {
- throw new Exception("The input stream must not be null.");
- }
-
- unchecked
- {
- byte[] buffer = new byte[BUFFER_SIZE];
- int readSize = BUFFER_SIZE;
-
- _TotalBytesRead = 0;
- int count = input.Read(buffer, 0, readSize);
- if (output != null)
- {
- output.Write(buffer, 0, count);
- }
- _TotalBytesRead += count;
- while (count > 0)
- {
- SlurpBlock(buffer, 0, count);
- count = input.Read(buffer, 0, readSize);
- if (output != null) output.Write(buffer, 0, count);
- _TotalBytesRead += count;
- }
-
- return (Int32)(~_register);
- }
- }
-
- ///
- /// Get the CRC32 for the given (word,byte) combo. This is a
- /// computation defined by PKzip for PKZIP 2.0 (weak) encryption.
- ///
- /// The word to start with.
- /// The byte to combine it with.
- /// The CRC-ized result.
- public Int32 ComputeCrc32(Int32 W, byte B)
- {
- return _InternalComputeCrc32((UInt32)W, B);
- }
-
- internal Int32 _InternalComputeCrc32(UInt32 W, byte B)
- {
- return (Int32)(crc32Table[(W ^ B) & 0xFF] ^ (W >> 8));
- }
-
- ///
- /// Update the value for the running CRC32 using the given block of bytes.
- /// This is useful when using the CRC32() class in a Stream.
- ///
- /// block of bytes to slurp
- /// starting point in the block
- /// how many bytes within the block to slurp
- public void SlurpBlock(byte[] block, int offset, int count)
- {
- if (block == null)
- {
- throw new Exception("The data buffer must not be null.");
- }
-
- // bzip algorithm
- for (int i = 0; i < count; i++)
- {
- int x = offset + i;
- byte b = block[x];
- if (this.reverseBits)
- {
- UInt32 temp = (_register >> 24) ^ b;
- _register = (_register << 8) ^ crc32Table[temp];
- }
- else
- {
- UInt32 temp = (_register & 0x000000FF) ^ b;
- _register = (_register >> 8) ^ crc32Table[temp];
- }
- }
- _TotalBytesRead += count;
- }
-
-
- ///
- /// Process one byte in the CRC.
- ///
- /// the byte to include into the CRC .
- public void UpdateCRC(byte b)
- {
- if (this.reverseBits)
- {
- UInt32 temp = (_register >> 24) ^ b;
- _register = (_register << 8) ^ crc32Table[temp];
- }
- else
- {
- UInt32 temp = (_register & 0x000000FF) ^ b;
- _register = (_register >> 8) ^ crc32Table[temp];
- }
- }
-
- ///
- /// Process a run of N identical bytes into the CRC.
- ///
- ///
- ///
- /// This method serves as an optimization for updating the CRC when a
- /// run of identical bytes is found. Rather than passing in a buffer of
- /// length n, containing all identical bytes b, this method accepts the
- /// byte value and the length of the (virtual) buffer - the length of
- /// the run.
- ///
- ///
- /// the byte to include into the CRC.
- /// the number of times that byte should be repeated.
- public void UpdateCRC(byte b, int n)
- {
- while (n-- > 0)
- {
- if (this.reverseBits)
- {
- uint temp = (_register >> 24) ^ b;
- _register = (_register << 8) ^ crc32Table[(temp >= 0)
- ? temp
- : (temp + 256)];
- }
- else
- {
- UInt32 temp = (_register & 0x000000FF) ^ b;
- _register = (_register >> 8) ^ crc32Table[(temp >= 0)
- ? temp
- : (temp + 256)];
-
- }
- }
- }
-
- private static uint ReverseBits(uint data)
- {
- unchecked
- {
- uint ret = data;
- ret = (ret & 0x55555555) << 1 | (ret >> 1) & 0x55555555;
- ret = (ret & 0x33333333) << 2 | (ret >> 2) & 0x33333333;
- ret = (ret & 0x0F0F0F0F) << 4 | (ret >> 4) & 0x0F0F0F0F;
- ret = (ret << 24) | ((ret & 0xFF00) << 8) | ((ret >> 8) & 0xFF00) | (ret >> 24);
- return ret;
- }
- }
-
- private static byte ReverseBits(byte data)
- {
- unchecked
- {
- uint u = (uint)data * 0x00020202;
- uint m = 0x01044010;
- uint s = u & m;
- uint t = (u << 2) & (m << 1);
- return (byte)((0x01001001 * (s + t)) >> 24);
- }
- }
-
- private void GenerateLookupTable()
- {
- crc32Table = new UInt32[256];
- unchecked
- {
- UInt32 dwCrc;
- byte i = 0;
- do
- {
- dwCrc = i;
- for (byte j = 8; j > 0; j--)
- {
- if ((dwCrc & 1) == 1)
- {
- dwCrc = (dwCrc >> 1) ^ dwPolynomial;
- }
- else
- {
- dwCrc >>= 1;
- }
- }
- if (reverseBits)
- {
- crc32Table[ReverseBits(i)] = ReverseBits(dwCrc);
- }
- else
- {
- crc32Table[i] = dwCrc;
- }
- i++;
- } while (i != 0);
- }
-
-#if VERBOSE
- Console.WriteLine();
- Console.WriteLine("private static readonly UInt32[] crc32Table = {");
- for (int i = 0; i < crc32Table.Length; i+=4)
- {
- Console.Write(" ");
- for (int j=0; j < 4; j++)
- {
- Console.Write(" 0x{0:X8}U,", crc32Table[i+j]);
- }
- Console.WriteLine();
- }
- Console.WriteLine("};");
- Console.WriteLine();
-#endif
- }
-
- private uint gf2_matrix_times(uint[] matrix, uint vec)
- {
- uint sum = 0;
- int i = 0;
- while (vec != 0)
- {
- if ((vec & 0x01) == 0x01)
- sum ^= matrix[i];
- vec >>= 1;
- i++;
- }
- return sum;
- }
-
- private void gf2_matrix_square(uint[] square, uint[] mat)
- {
- for (int i = 0; i < 32; i++)
- square[i] = gf2_matrix_times(mat, mat[i]);
- }
-
- ///
- /// Combines the given CRC32 value with the current running total.
- ///
- ///
- /// This is useful when using a divide-and-conquer approach to
- /// calculating a CRC. Multiple threads can each calculate a
- /// CRC32 on a segment of the data, and then combine the
- /// individual CRC32 values at the end.
- ///
- /// the crc value to be combined with this one
- /// the length of data the CRC value was calculated on
- public void Combine(int crc, int length)
- {
- uint[] even = new uint[32]; // even-power-of-two zeros operator
- uint[] odd = new uint[32]; // odd-power-of-two zeros operator
-
- if (length == 0)
- {
- return;
- }
-
- uint crc1 = ~_register;
- uint crc2 = (uint)crc;
-
- // put operator for one zero bit in odd
- odd[0] = this.dwPolynomial; // the CRC-32 polynomial
- uint row = 1;
- for (int i = 1; i < 32; i++)
- {
- odd[i] = row;
- row <<= 1;
- }
-
- // put operator for two zero bits in even
- gf2_matrix_square(even, odd);
-
- // put operator for four zero bits in odd
- gf2_matrix_square(odd, even);
-
- uint len2 = (uint)length;
-
- // apply len2 zeros to crc1 (first square will put the operator for one
- // zero byte, eight zero bits, in even)
- do
- {
- // apply zeros operator for this bit of len2
- gf2_matrix_square(even, odd);
-
- if ((len2 & 1) == 1)
- {
- crc1 = gf2_matrix_times(even, crc1);
- }
- len2 >>= 1;
-
- if (len2 == 0)
- {
- break;
- }
-
- // another iteration of the loop with odd and even swapped
- gf2_matrix_square(odd, even);
- if ((len2 & 1) == 1)
- {
- crc1 = gf2_matrix_times(odd, crc1);
- }
- len2 >>= 1;
- } while (len2 != 0);
-
- crc1 ^= crc2;
-
- _register = ~crc1;
-
- //return (int) crc1;
- return;
- }
-
- ///
- /// Create an instance of the CRC32 class using the default settings: no
- /// bit reversal, and a polynomial of 0xEDB88320.
- ///
- public CRC32()
- : this(false)
- {
- }
-
- ///
- /// Create an instance of the CRC32 class, specifying whether to reverse
- /// data bits or not.
- ///
- ///
- /// specify true if the instance should reverse data bits.
- ///
- ///
- ///
- /// In the CRC-32 used by BZip2, the bits are reversed. Therefore if you
- /// want a CRC32 with compatibility with BZip2, you should pass true
- /// here. In the CRC-32 used by GZIP and PKZIP, the bits are not
- /// reversed; Therefore if you want a CRC32 with compatibility with
- /// those, you should pass false.
- ///
- ///
- public CRC32(bool reverseBits) :
- this(unchecked((int)0xEDB88320), reverseBits)
- {
- }
-
- ///
- /// Create an instance of the CRC32 class, specifying the polynomial and
- /// whether to reverse data bits or not.
- ///
- ///
- /// The polynomial to use for the CRC, expressed in the reversed (LSB)
- /// format: the highest ordered bit in the polynomial value is the
- /// coefficient of the 0th power; the second-highest order bit is the
- /// coefficient of the 1 power, and so on. Expressed this way, the
- /// polynomial for the CRC-32C used in IEEE 802.3, is 0xEDB88320.
- ///
- ///
- /// specify true if the instance should reverse data bits.
- ///
- ///
- ///
- ///
- /// In the CRC-32 used by BZip2, the bits are reversed. Therefore if you
- /// want a CRC32 with compatibility with BZip2, you should pass true
- /// here for the reverseBits parameter. In the CRC-32 used by
- /// GZIP and PKZIP, the bits are not reversed; Therefore if you want a
- /// CRC32 with compatibility with those, you should pass false for the
- /// reverseBits parameter.
- ///
- ///
- public CRC32(int polynomial, bool reverseBits)
- {
- this.reverseBits = reverseBits;
- this.dwPolynomial = (uint)polynomial;
- this.GenerateLookupTable();
- }
-
- ///
- /// Reset the CRC-32 class - clear the CRC "remainder register."
- ///
- ///
- ///
- /// Use this when employing a single instance of this class to compute
- /// multiple, distinct CRCs on multiple, distinct data blocks.
- ///
- ///
- public void Reset()
- {
- _register = 0xFFFFFFFFU;
- }
-
- // private member vars
- private UInt32 dwPolynomial;
- private Int64 _TotalBytesRead;
- private bool reverseBits;
- private UInt32[] crc32Table;
- private const int BUFFER_SIZE = 8192;
- private UInt32 _register = 0xFFFFFFFFU;
- }
-
- ///
- /// A Stream that calculates a CRC32 (a checksum) on all bytes read,
- /// or on all bytes written.
- ///
- ///
- ///
- ///
- /// This class can be used to verify the CRC of a ZipEntry when
- /// reading from a stream, or to calculate a CRC when writing to a
- /// stream. The stream should be used to either read, or write, but
- /// not both. If you intermix reads and writes, the results are not
- /// defined.
- ///
- ///
- ///
- /// This class is intended primarily for use internally by the
- /// DotNetZip library.
- ///
- ///
- public class CrcCalculatorStream : System.IO.Stream, System.IDisposable
- {
- private static readonly Int64 UnsetLengthLimit = -99;
-
- internal System.IO.Stream _innerStream;
- private CRC32 _Crc32;
- private Int64 _lengthLimit = -99;
- private bool _leaveOpen;
-
- ///
- /// The default constructor.
- ///
- ///
- ///
- /// Instances returned from this constructor will leave the underlying
- /// stream open upon Close(). The stream uses the default CRC32
- /// algorithm, which implies a polynomial of 0xEDB88320.
- ///
- ///
- /// The underlying stream
- public CrcCalculatorStream(System.IO.Stream stream)
- : this(true, CrcCalculatorStream.UnsetLengthLimit, stream, null)
- {
- }
-
- ///
- /// The constructor allows the caller to specify how to handle the
- /// underlying stream at close.
- ///
- ///
- ///
- /// The stream uses the default CRC32 algorithm, which implies a
- /// polynomial of 0xEDB88320.
- ///
- ///
- /// The underlying stream
- /// true to leave the underlying stream
- /// open upon close of the CrcCalculatorStream; false otherwise.
- public CrcCalculatorStream(System.IO.Stream stream, bool leaveOpen)
- : this(leaveOpen, CrcCalculatorStream.UnsetLengthLimit, stream, null)
- {
- }
-
- ///
- /// A constructor allowing the specification of the length of the stream
- /// to read.
- ///
- ///
- ///
- /// The stream uses the default CRC32 algorithm, which implies a
- /// polynomial of 0xEDB88320.
- ///
- ///
- /// Instances returned from this constructor will leave the underlying
- /// stream open upon Close().
- ///
- ///
- /// The underlying stream
- /// The length of the stream to slurp
- public CrcCalculatorStream(System.IO.Stream stream, Int64 length)
- : this(true, length, stream, null)
- {
- if (length < 0)
- {
- throw new ArgumentException("length");
- }
- }
-
- ///
- /// A constructor allowing the specification of the length of the stream
- /// to read, as well as whether to keep the underlying stream open upon
- /// Close().
- ///
- ///
- ///
- /// The stream uses the default CRC32 algorithm, which implies a
- /// polynomial of 0xEDB88320.
- ///
- ///
- /// The underlying stream
- /// The length of the stream to slurp
- /// true to leave the underlying stream
- /// open upon close of the CrcCalculatorStream; false otherwise.
- public CrcCalculatorStream(System.IO.Stream stream, Int64 length, bool leaveOpen)
- : this(leaveOpen, length, stream, null)
- {
- if (length < 0)
- {
- throw new ArgumentException("length");
- }
- }
-
- ///
- /// A constructor allowing the specification of the length of the stream
- /// to read, as well as whether to keep the underlying stream open upon
- /// Close(), and the CRC32 instance to use.
- ///
- ///
- ///
- /// The stream uses the specified CRC32 instance, which allows the
- /// application to specify how the CRC gets calculated.
- ///
- ///
- /// The underlying stream
- /// The length of the stream to slurp
- /// true to leave the underlying stream
- /// open upon close of the CrcCalculatorStream; false otherwise.
- /// the CRC32 instance to use to calculate the CRC32
- public CrcCalculatorStream(System.IO.Stream stream, Int64 length, bool leaveOpen,
- CRC32 crc32)
- : this(leaveOpen, length, stream, crc32)
- {
- if (length < 0)
- {
- throw new ArgumentException("length");
- }
- }
-
- // This ctor is private - no validation is done here. This is to allow the use
- // of a (specific) negative value for the _lengthLimit, to indicate that there
- // is no length set. So we validate the length limit in those ctors that use an
- // explicit param, otherwise we don't validate, because it could be our special
- // value.
- private CrcCalculatorStream
- (bool leaveOpen, Int64 length, System.IO.Stream stream, CRC32 crc32)
- : base()
- {
- _innerStream = stream;
- _Crc32 = crc32 ?? new CRC32();
- _lengthLimit = length;
- _leaveOpen = leaveOpen;
- }
-
- ///
- /// Gets the total number of bytes run through the CRC32 calculator.
- ///
- ///
- ///
- /// This is either the total number of bytes read, or the total number of
- /// bytes written, depending on the direction of this stream.
- ///
- public Int64 TotalBytesSlurped
- {
- get { return _Crc32.TotalBytesRead; }
- }
-
- ///
- /// Provides the current CRC for all blocks slurped in.
- ///
- ///
- ///
- /// The running total of the CRC is kept as data is written or read
- /// through the stream. read this property after all reads or writes to
- /// get an accurate CRC for the entire stream.
- ///
- ///
- public Int32 Crc
- {
- get { return _Crc32.Crc32Result; }
- }
-
- ///
- /// Indicates whether the underlying stream will be left open when the
- /// CrcCalculatorStream is Closed.
- ///
- ///
- ///
- /// Set this at any point before calling .
- ///
- ///
- public bool LeaveOpen
- {
- get { return _leaveOpen; }
- set { _leaveOpen = value; }
- }
-
- ///
- /// Read from the stream
- ///
- /// the buffer to read
- /// the offset at which to start
- /// the number of bytes to read
- /// the number of bytes actually read
- public override int Read(byte[] buffer, int offset, int count)
- {
- int bytesToRead = count;
-
- // Need to limit the # of bytes returned, if the stream is intended to have
- // a definite length. This is especially useful when returning a stream for
- // the uncompressed data directly to the application. The app won't
- // necessarily read only the UncompressedSize number of bytes. For example
- // wrapping the stream returned from OpenReader() into a StreadReader() and
- // calling ReadToEnd() on it, We can "over-read" the zip data and get a
- // corrupt string. The length limits that, prevents that problem.
-
- if (_lengthLimit != CrcCalculatorStream.UnsetLengthLimit)
- {
- if (_Crc32.TotalBytesRead >= _lengthLimit)
- {
- return 0; // EOF
- }
- Int64 bytesRemaining = _lengthLimit - _Crc32.TotalBytesRead;
- if (bytesRemaining < count)
- {
- bytesToRead = (int)bytesRemaining;
- }
- }
- int n = _innerStream.Read(buffer, offset, bytesToRead);
- if (n > 0)
- {
- _Crc32.SlurpBlock(buffer, offset, n);
- }
- return n;
- }
-
- ///
- /// Write to the stream.
- ///
- /// the buffer from which to write
- /// the offset at which to start writing
- /// the number of bytes to write
- public override void Write(byte[] buffer, int offset, int count)
- {
- if (count > 0)
- {
- _Crc32.SlurpBlock(buffer, offset, count);
- }
- _innerStream.Write(buffer, offset, count);
- }
-
- ///
- /// Indicates whether the stream supports reading.
- ///
- public override bool CanRead
- {
- get { return _innerStream.CanRead; }
- }
-
- ///
- /// Indicates whether the stream supports seeking.
- ///
- ///
- ///
- /// Always returns false.
- ///
- ///
- public override bool CanSeek
- {
- get { return false; }
- }
-
- ///
- /// Indicates whether the stream supports writing.
- ///
- public override bool CanWrite
- {
- get { return _innerStream.CanWrite; }
- }
-
- ///
- /// Flush the stream.
- ///
- public override void Flush()
- {
- _innerStream.Flush();
- }
-
- ///
- /// Returns the length of the underlying stream.
- ///
- public override long Length
- {
- get
- {
- if (_lengthLimit == CrcCalculatorStream.UnsetLengthLimit)
- {
- return _innerStream.Length;
- }
- else
- {
- return _lengthLimit;
- }
- }
- }
-
- ///
- /// The getter for this property returns the total bytes read.
- /// If you use the setter, it will throw
- /// .
- ///
- public override long Position
- {
- get { return _Crc32.TotalBytesRead; }
- set { throw new NotSupportedException(); }
- }
-
- ///
- /// Seeking is not supported on this stream. This method always throws
- ///
- ///
- /// N/A
- /// N/A
- /// N/A
- public override long Seek(long offset, System.IO.SeekOrigin origin)
- {
- throw new NotSupportedException();
- }
-
- ///
- /// This method always throws
- ///
- ///
- /// N/A
- public override void SetLength(long value)
- {
- throw new NotSupportedException();
- }
-
- void IDisposable.Dispose()
- {
- Close();
- }
-
- ///
- /// Closes the stream.
- ///
- public override void Close()
- {
- base.Close();
- if (!_leaveOpen)
- {
- _innerStream.Close();
- }
- }
- }
-
- public class CRC32Hash : HashAlgorithm
- {
- private CRC32 _Crc32=new CRC32();
-
- public override void Initialize()
- {
- _Crc32.Reset();
- }
-
- protected override void HashCore(byte[] buffer, int start, int length)
- {
- _Crc32.SlurpBlock(buffer, start, length);
- }
-
- protected override byte[] HashFinal()
- {
- uint crcValue =(uint) _Crc32.Crc32Result;
- HashValue = new[]
- {
- (byte) ((crcValue >> 24) & 0xff),
- (byte) ((crcValue >> 16) & 0xff),
- (byte) ((crcValue >> 8) & 0xff),
- (byte) (crcValue & 0xff)
- };
- return HashValue;
- }
-
- public override int HashSize
- {
- get { return 32; }
- }
- }
-}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Zlib/Deflate.cs b/SabreTools.Library/External/Zlib/Deflate.cs
deleted file mode 100644
index 87e68dfc..00000000
--- a/SabreTools.Library/External/Zlib/Deflate.cs
+++ /dev/null
@@ -1,1936 +0,0 @@
-// Deflate.cs
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
-// All rights reserved.
-//
-// This code module is part of DotNetZip, a zipfile class library.
-//
-// ------------------------------------------------------------------
-//
-// This code is licensed under the Microsoft Public License.
-// See the file License.txt for the license details.
-// More info on: http://dotnetzip.codeplex.com
-//
-// ------------------------------------------------------------------
-//
-// last saved (in emacs):
-// Time-stamp: <2011-August-03 19:52:15>
-//
-// ------------------------------------------------------------------
-//
-// This module defines logic for handling the Deflate or compression.
-//
-// This code is based on multiple sources:
-// - the original zlib v1.2.3 source, which is Copyright (C) 1995-2005 Jean-loup Gailly.
-// - the original jzlib, which is Copyright (c) 2000-2003 ymnk, JCraft,Inc.
-//
-// However, this code is significantly different from both.
-// The object model is not the same, and many of the behaviors are different.
-//
-// In keeping with the license for these other works, the copyrights for
-// jzlib and zlib are here.
-//
-// -----------------------------------------------------------------------
-// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in
-// the documentation and/or other materials provided with the distribution.
-//
-// 3. The names of the authors may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
-// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
-// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
-// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// -----------------------------------------------------------------------
-//
-// This program is based on zlib-1.1.3; credit to authors
-// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
-// and contributors of zlib.
-//
-// -----------------------------------------------------------------------
-
-using System;
-
-namespace Ionic.Zlib
-{
- internal enum BlockState
- {
- NeedMore = 0, // block not completed, need more input or more output
- BlockDone, // block flush performed
- FinishStarted, // finish started, need only more output at next deflate
- FinishDone // finish done, accept no more input or output
- }
-
- internal enum DeflateFlavor
- {
- Store,
- Fast,
- Slow
- }
-
- internal sealed class DeflateManager
- {
- private static readonly int MEM_LEVEL_MAX = 9;
- private static readonly int MEM_LEVEL_DEFAULT = 8;
-
- internal delegate BlockState CompressFunc(FlushType flush);
-
- internal class Config
- {
- // Use a faster search when the previous match is longer than this
- internal int GoodLength; // reduce lazy search above this match length
-
- // Attempt to find a better match only when the current match is
- // strictly smaller than this value. This mechanism is used only for
- // compression levels >= 4. For levels 1,2,3: MaxLazy is actually
- // MaxInsertLength. (See DeflateFast)
-
- internal int MaxLazy; // do not perform lazy search above this match length
-
- internal int NiceLength; // quit search above this match length
-
- // To speed up deflation, hash chains are never searched beyond this
- // length. A higher limit improves compression ratio but degrades the speed.
-
- internal int MaxChainLength;
-
- internal DeflateFlavor Flavor;
-
- private Config(int goodLength, int maxLazy, int niceLength, int maxChainLength, DeflateFlavor flavor)
- {
- this.GoodLength = goodLength;
- this.MaxLazy = maxLazy;
- this.NiceLength = niceLength;
- this.MaxChainLength = maxChainLength;
- this.Flavor = flavor;
- }
-
- public static Config Lookup(CompressionLevel level)
- {
- return Table[(int)level];
- }
-
- static Config()
- {
- Table = new Config[] {
- new Config(0, 0, 0, 0, DeflateFlavor.Store),
- new Config(4, 4, 8, 4, DeflateFlavor.Fast),
- new Config(4, 5, 16, 8, DeflateFlavor.Fast),
- new Config(4, 6, 32, 32, DeflateFlavor.Fast),
-
- new Config(4, 4, 16, 16, DeflateFlavor.Slow),
- new Config(8, 16, 32, 32, DeflateFlavor.Slow),
- new Config(8, 16, 128, 128, DeflateFlavor.Slow),
- new Config(8, 32, 128, 256, DeflateFlavor.Slow),
- new Config(32, 128, 258, 1024, DeflateFlavor.Slow),
- new Config(32, 258, 258, 4096, DeflateFlavor.Slow),
- };
- }
-
- private static readonly Config[] Table;
- }
-
- private CompressFunc DeflateFunction;
-
- private static readonly System.String[] _ErrorMessage = new System.String[]
- {
- "need dictionary",
- "stream end",
- "",
- "file error",
- "stream error",
- "data error",
- "insufficient memory",
- "buffer error",
- "incompatible version",
- ""
- };
-
- // preset dictionary flag in zlib header
- private static readonly int PRESET_DICT = 0x20;
-
- private static readonly int INIT_STATE = 42;
- private static readonly int BUSY_STATE = 113;
- private static readonly int FINISH_STATE = 666;
-
- // The deflate compression method
- private static readonly int Z_DEFLATED = 8;
-
- private static readonly int STORED_BLOCK = 0;
- private static readonly int STATIC_TREES = 1;
- private static readonly int DYN_TREES = 2;
-
- // The three kinds of block type
- private static readonly int Z_BINARY = 0;
- private static readonly int Z_ASCII = 1;
- private static readonly int Z_UNKNOWN = 2;
-
- private static readonly int Buf_size = 8 * 2;
-
- private static readonly int MIN_MATCH = 3;
- private static readonly int MAX_MATCH = 258;
-
- private static readonly int MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);
-
- private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1);
-
- private static readonly int END_BLOCK = 256;
-
- internal ZlibCodec _codec; // the zlib encoder/decoder
- internal int status; // as the name implies
- internal byte[] pending; // output still pending - waiting to be compressed
- internal int nextPending; // index of next pending byte to output to the stream
- internal int pendingCount; // number of bytes in the pending buffer
-
- internal sbyte data_type; // UNKNOWN, BINARY or ASCII
- internal int last_flush; // value of flush param for previous deflate call
-
- internal int w_size; // LZ77 window size (32K by default)
- internal int w_bits; // log2(w_size) (8..16)
- internal int w_mask; // w_size - 1
-
- //internal byte[] dictionary;
- internal byte[] window;
-
- // Sliding window. Input bytes are read into the second half of the window,
- // and move to the first half later to keep a dictionary of at least wSize
- // bytes. With this organization, matches are limited to a distance of
- // wSize-MAX_MATCH bytes, but this ensures that IO is always
- // performed with a length multiple of the block size.
- //
- // To do: use the user input buffer as sliding window.
-
- internal int window_size;
- // Actual size of window: 2*wSize, except when the user input buffer
- // is directly used as sliding window.
-
- internal short[] prev;
- // Link to older string with same hash index. To limit the size of this
- // array to 64K, this link is maintained only for the last 32K strings.
- // An index in this array is thus a window index modulo 32K.
-
- internal short[] head; // Heads of the hash chains or NIL.
-
- internal int ins_h; // hash index of string to be inserted
- internal int hash_size; // number of elements in hash table
- internal int hash_bits; // log2(hash_size)
- internal int hash_mask; // hash_size-1
-
- // Number of bits by which ins_h must be shifted at each input
- // step. It must be such that after MIN_MATCH steps, the oldest
- // byte no longer takes part in the hash key, that is:
- // hash_shift * MIN_MATCH >= hash_bits
- internal int hash_shift;
-
- // Window position at the beginning of the current output block. Gets
- // negative when the window is moved backwards.
-
- internal int block_start;
-
- Config config;
- internal int match_length; // length of best match
- internal int prev_match; // previous match
- internal int match_available; // set if previous match exists
- internal int strstart; // start of string to insert into.....????
- internal int match_start; // start of matching string
- internal int lookahead; // number of valid bytes ahead in window
-
- // Length of the best match at previous step. Matches not greater than this
- // are discarded. This is used in the lazy match evaluation.
- internal int prev_length;
-
- // Insert new strings in the hash table only if the match length is not
- // greater than this length. This saves time but degrades compression.
- // max_insert_length is used only for compression levels <= 3.
-
- internal CompressionLevel compressionLevel; // compression level (1..9)
- internal CompressionStrategy compressionStrategy; // favor or force Huffman coding
-
- internal short[] dyn_ltree; // literal and length tree
- internal short[] dyn_dtree; // distance tree
- internal short[] bl_tree; // Huffman tree for bit lengths
-
- internal Tree treeLiterals = new Tree(); // desc for literal tree
- internal Tree treeDistances = new Tree(); // desc for distance tree
- internal Tree treeBitLengths = new Tree(); // desc for bit length tree
-
- // number of codes at each bit length for an optimal tree
- internal short[] bl_count = new short[InternalConstants.MAX_BITS + 1];
-
- // heap used to build the Huffman trees
- internal int[] heap = new int[2 * InternalConstants.L_CODES + 1];
-
- internal int heap_len; // number of elements in the heap
- internal int heap_max; // element of largest frequency
-
- // The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
- // The same heap array is used to build all trees.
-
- // Depth of each subtree used as tie breaker for trees of equal frequency
- internal sbyte[] depth = new sbyte[2 * InternalConstants.L_CODES + 1];
-
- internal int _lengthOffset; // index for literals or lengths
-
- // Size of match buffer for literals/lengths. There are 4 reasons for
- // limiting lit_bufsize to 64K:
- // - frequencies can be kept in 16 bit counters
- // - if compression is not successful for the first block, all input
- // data is still in the window so we can still emit a stored block even
- // when input comes from standard input. (This can also be done for
- // all blocks if lit_bufsize is not greater than 32K.)
- // - if compression is not successful for a file smaller than 64K, we can
- // even emit a stored file instead of a stored block (saving 5 bytes).
- // This is applicable only for zip (not gzip or zlib).
- // - creating new Huffman trees less frequently may not provide fast
- // adaptation to changes in the input data statistics. (Take for
- // example a binary file with poorly compressible code followed by
- // a highly compressible string table.) Smaller buffer sizes give
- // fast adaptation but have of course the overhead of transmitting
- // trees more frequently.
-
- internal int lit_bufsize;
-
- internal int last_lit; // running index in l_buf
-
- // Buffer for distances. To simplify the code, d_buf and l_buf have
- // the same number of elements. To use different lengths, an extra flag
- // array would be necessary.
-
- internal int _distanceOffset; // index into pending; points to distance data??
-
- internal int opt_len; // bit length of current block with optimal trees
- internal int static_len; // bit length of current block with static trees
- internal int matches; // number of string matches in current block
- internal int last_eob_len; // bit length of EOB code for last block
-
- // Output buffer. bits are inserted starting at the bottom (least
- // significant bits).
- internal short bi_buf;
-
- // Number of valid bits in bi_buf. All bits above the last valid bit
- // are always zero.
- internal int bi_valid;
-
- internal DeflateManager()
- {
- dyn_ltree = new short[HEAP_SIZE * 2];
- dyn_dtree = new short[(2 * InternalConstants.D_CODES + 1) * 2]; // distance tree
- bl_tree = new short[(2 * InternalConstants.BL_CODES + 1) * 2]; // Huffman tree for bit lengths
- }
-
- // lm_init
- private void _InitializeLazyMatch()
- {
- window_size = 2 * w_size;
-
- // clear the hash - workitem 9063
- Array.Clear(head, 0, hash_size);
- //for (int i = 0; i < hash_size; i++) head[i] = 0;
-
- config = Config.Lookup(compressionLevel);
- SetDeflater();
-
- strstart = 0;
- block_start = 0;
- lookahead = 0;
- match_length = prev_length = MIN_MATCH - 1;
- match_available = 0;
- ins_h = 0;
- }
-
- // Initialize the tree data structures for a new zlib stream.
- private void _InitializeTreeData()
- {
- treeLiterals.dyn_tree = dyn_ltree;
- treeLiterals.staticTree = StaticTree.Literals;
-
- treeDistances.dyn_tree = dyn_dtree;
- treeDistances.staticTree = StaticTree.Distances;
-
- treeBitLengths.dyn_tree = bl_tree;
- treeBitLengths.staticTree = StaticTree.BitLengths;
-
- bi_buf = 0;
- bi_valid = 0;
- last_eob_len = 8; // enough lookahead for inflate
-
- // Initialize the first block of the first file:
- _InitializeBlocks();
- }
-
- internal void _InitializeBlocks()
- {
- // Initialize the trees.
- for (int i = 0; i < InternalConstants.L_CODES; i++)
- {
- dyn_ltree[i * 2] = 0;
- }
- for (int i = 0; i < InternalConstants.D_CODES; i++)
- {
- dyn_dtree[i * 2] = 0;
- }
- for (int i = 0; i < InternalConstants.BL_CODES; i++)
- {
- bl_tree[i * 2] = 0;
- }
-
- dyn_ltree[END_BLOCK * 2] = 1;
- opt_len = static_len = 0;
- last_lit = matches = 0;
- }
-
- // Restore the heap property by moving down the tree starting at node k,
- // exchanging a node with the smallest of its two sons if necessary, stopping
- // when the heap property is re-established (each father smaller than its
- // two sons).
- internal void pqdownheap(short[] tree, int k)
- {
- int v = heap[k];
- int j = k << 1; // left son of k
- while (j <= heap_len)
- {
- // Set j to the smallest of the two sons:
- if (j < heap_len && _IsSmaller(tree, heap[j + 1], heap[j], depth))
- {
- j++;
- }
- // Exit if v is smaller than both sons
- if (_IsSmaller(tree, v, heap[j], depth))
- {
- break;
- }
-
- // Exchange v with the smallest son
- heap[k] = heap[j]; k = j;
- // And continue down the tree, setting j to the left son of k
- j <<= 1;
- }
- heap[k] = v;
- }
-
- internal static bool _IsSmaller(short[] tree, int n, int m, sbyte[] depth)
- {
- short tn2 = tree[n * 2];
- short tm2 = tree[m * 2];
- return (tn2 < tm2 || (tn2 == tm2 && depth[n] <= depth[m]));
- }
-
- // Scan a literal or distance tree to determine the frequencies of the codes
- // in the bit length tree.
- internal void scan_tree(short[] tree, int max_code)
- {
- int n; // iterates over all tree elements
- int prevlen = -1; // last emitted length
- int curlen; // length of current code
- int nextlen = (int)tree[0 * 2 + 1]; // length of next code
- int count = 0; // repeat count of the current code
- int max_count = 7; // max repeat count
- int min_count = 4; // min repeat count
-
- if (nextlen == 0)
- {
- max_count = 138; min_count = 3;
- }
- tree[(max_code + 1) * 2 + 1] = (short)0x7fff; // guard //??
-
- for (n = 0; n <= max_code; n++)
- {
- curlen = nextlen; nextlen = (int)tree[(n + 1) * 2 + 1];
- if (++count < max_count && curlen == nextlen)
- {
- continue;
- }
- else if (count < min_count)
- {
- bl_tree[curlen * 2] = (short)(bl_tree[curlen * 2] + count);
- }
- else if (curlen != 0)
- {
- if (curlen != prevlen)
- {
- bl_tree[curlen * 2]++;
- }
- bl_tree[InternalConstants.REP_3_6 * 2]++;
- }
- else if (count <= 10)
- {
- bl_tree[InternalConstants.REPZ_3_10 * 2]++;
- }
- else
- {
- bl_tree[InternalConstants.REPZ_11_138 * 2]++;
- }
- count = 0; prevlen = curlen;
- if (nextlen == 0)
- {
- max_count = 138; min_count = 3;
- }
- else if (curlen == nextlen)
- {
- max_count = 6; min_count = 3;
- }
- else
- {
- max_count = 7; min_count = 4;
- }
- }
- }
-
- // Construct the Huffman tree for the bit lengths and return the index in
- // bl_order of the last bit length code to send.
- internal int build_bl_tree()
- {
- int max_blindex; // index of last bit length code of non zero freq
-
- // Determine the bit length frequencies for literal and distance trees
- scan_tree(dyn_ltree, treeLiterals.max_code);
- scan_tree(dyn_dtree, treeDistances.max_code);
-
- // Build the bit length tree:
- treeBitLengths.build_tree(this);
- // opt_len now includes the length of the tree representations, except
- // the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
-
- // Determine the number of bit length codes to send. The pkzip format
- // requires that at least 4 bit length codes be sent. (appnote.txt says
- // 3 but the actual value used is 4.)
- for (max_blindex = InternalConstants.BL_CODES - 1; max_blindex >= 3; max_blindex--)
- {
- if (bl_tree[Tree.bl_order[max_blindex] * 2 + 1] != 0)
- {
- break;
- }
- }
- // Update opt_len to include the bit length tree and counts
- opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;
-
- return max_blindex;
- }
-
-
- // Send the header for a block using dynamic Huffman trees: the counts, the
- // lengths of the bit length codes, the literal tree and the distance tree.
- // IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
- internal void send_all_trees(int lcodes, int dcodes, int blcodes)
- {
- int rank; // index in bl_order
-
- send_bits(lcodes - 257, 5); // not +255 as stated in appnote.txt
- send_bits(dcodes - 1, 5);
- send_bits(blcodes - 4, 4); // not -3 as stated in appnote.txt
- for (rank = 0; rank < blcodes; rank++)
- {
- send_bits(bl_tree[Tree.bl_order[rank] * 2 + 1], 3);
- }
- send_tree(dyn_ltree, lcodes - 1); // literal tree
- send_tree(dyn_dtree, dcodes - 1); // distance tree
- }
-
- // Send a literal or distance tree in compressed form, using the codes in
- // bl_tree.
- internal void send_tree(short[] tree, int max_code)
- {
- int n; // iterates over all tree elements
- int prevlen = -1; // last emitted length
- int curlen; // length of current code
- int nextlen = tree[0 * 2 + 1]; // length of next code
- int count = 0; // repeat count of the current code
- int max_count = 7; // max repeat count
- int min_count = 4; // min repeat count
-
- if (nextlen == 0)
- {
- max_count = 138; min_count = 3;
- }
-
- for (n = 0; n <= max_code; n++)
- {
- curlen = nextlen; nextlen = tree[(n + 1) * 2 + 1];
- if (++count < max_count && curlen == nextlen)
- {
- continue;
- }
- else if (count < min_count)
- {
- do
- {
- send_code(curlen, bl_tree);
- }
- while (--count != 0);
- }
- else if (curlen != 0)
- {
- if (curlen != prevlen)
- {
- send_code(curlen, bl_tree); count--;
- }
- send_code(InternalConstants.REP_3_6, bl_tree);
- send_bits(count - 3, 2);
- }
- else if (count <= 10)
- {
- send_code(InternalConstants.REPZ_3_10, bl_tree);
- send_bits(count - 3, 3);
- }
- else
- {
- send_code(InternalConstants.REPZ_11_138, bl_tree);
- send_bits(count - 11, 7);
- }
- count = 0; prevlen = curlen;
- if (nextlen == 0)
- {
- max_count = 138; min_count = 3;
- }
- else if (curlen == nextlen)
- {
- max_count = 6; min_count = 3;
- }
- else
- {
- max_count = 7; min_count = 4;
- }
- }
- }
-
- // Output a block of bytes on the stream.
- // IN assertion: there is enough room in pending_buf.
- private void put_bytes(byte[] p, int start, int len)
- {
- Array.Copy(p, start, pending, pendingCount, len);
- pendingCount += len;
- }
-
-#if NOTNEEDED
- private void put_byte(byte c)
- {
- pending[pendingCount++] = c;
- }
- internal void put_short(int b)
- {
- unchecked
- {
- pending[pendingCount++] = (byte)b;
- pending[pendingCount++] = (byte)(b >> 8);
- }
- }
- internal void putShortMSB(int b)
- {
- unchecked
- {
- pending[pendingCount++] = (byte)(b >> 8);
- pending[pendingCount++] = (byte)b;
- }
- }
-#endif
-
- internal void send_code(int c, short[] tree)
- {
- int c2 = c * 2;
- send_bits((tree[c2] & 0xffff), (tree[c2 + 1] & 0xffff));
- }
-
- internal void send_bits(int value, int length)
- {
- int len = length;
- unchecked
- {
- if (bi_valid > (int)Buf_size - len)
- {
- //int val = value;
- // bi_buf |= (val << bi_valid);
-
- bi_buf |= (short)((value << bi_valid) & 0xffff);
- //put_short(bi_buf);
- pending[pendingCount++] = (byte)bi_buf;
- pending[pendingCount++] = (byte)(bi_buf >> 8);
-
-
- bi_buf = (short)((uint)value >> (Buf_size - bi_valid));
- bi_valid += len - Buf_size;
- }
- else
- {
- // bi_buf |= (value) << bi_valid;
- bi_buf |= (short)((value << bi_valid) & 0xffff);
- bi_valid += len;
- }
- }
- }
-
- // Send one empty static block to give enough lookahead for inflate.
- // This takes 10 bits, of which 7 may remain in the bit buffer.
- // The current inflate code requires 9 bits of lookahead. If the
- // last two codes for the previous block (real code plus EOB) were coded
- // on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
- // the last real code. In this case we send two empty static blocks instead
- // of one. (There are no problems if the previous block is stored or fixed.)
- // To simplify the code, we assume the worst case of last real code encoded
- // on one bit only.
- internal void _tr_align()
- {
- send_bits(STATIC_TREES << 1, 3);
- send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes);
-
- bi_flush();
-
- // Of the 10 bits for the empty block, we have already sent
- // (10 - bi_valid) bits. The lookahead for the last real code (before
- // the EOB of the previous block) was thus at least one plus the length
- // of the EOB plus what we have just sent of the empty static block.
- if (1 + last_eob_len + 10 - bi_valid < 9)
- {
- send_bits(STATIC_TREES << 1, 3);
- send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes);
- bi_flush();
- }
- last_eob_len = 7;
- }
-
- // Save the match info and tally the frequency counts. Return true if
- // the current block must be flushed.
- internal bool _tr_tally(int dist, int lc)
- {
- pending[_distanceOffset + last_lit * 2] = unchecked((byte)((uint)dist >> 8));
- pending[_distanceOffset + last_lit * 2 + 1] = unchecked((byte)dist);
- pending[_lengthOffset + last_lit] = unchecked((byte)lc);
- last_lit++;
-
- if (dist == 0)
- {
- // lc is the unmatched char
- dyn_ltree[lc * 2]++;
- }
- else
- {
- matches++;
- // Here, lc is the match length - MIN_MATCH
- dist--; // dist = match distance - 1
- dyn_ltree[(Tree.LengthCode[lc] + InternalConstants.LITERALS + 1) * 2]++;
- dyn_dtree[Tree.DistanceCode(dist) * 2]++;
- }
-
- /* ************************************************************
- * *
- * this code is not turned on by default in ZLIB Trrntzip code *
- * *
- * *************************************************************
- */
- /*
- if (false) //CompSettings
- {
- if ((last_lit & 0x1fff) == 0 && (int)compressionLevel > 2)
- {
- // Compute an upper bound for the compressed length
- int out_length = last_lit << 3;
- int in_length = strstart - block_start;
- int dcode;
- for (dcode = 0; dcode < InternalConstants.D_CODES; dcode++)
- {
- out_length = (int)(out_length + (int)dyn_dtree[dcode * 2] * (5L + Tree.ExtraDistanceBits[dcode]));
- }
- out_length >>= 3;
- if ((matches < (last_lit / 2)) && out_length < in_length / 2)
- return true;
- }
- }
- */
-
- return (last_lit == lit_bufsize - 1) || (last_lit == lit_bufsize);
- // dinoch - wraparound?
- // We avoid equality with lit_bufsize because of wraparound at 64K
- // on 16 bit machines and because stored blocks are restricted to
- // 64K-1 bytes.
- }
-
- // Send the block data compressed using the given Huffman trees
- internal void send_compressed_block(short[] ltree, short[] dtree)
- {
- int distance; // distance of matched string
- int lc; // match length or unmatched char (if dist == 0)
- int lx = 0; // running index in l_buf
- int code; // the code to send
- int extra; // number of extra bits to send
-
- if (last_lit != 0)
- {
- do
- {
- int ix = _distanceOffset + lx * 2;
- distance = ((pending[ix] << 8) & 0xff00) |
- (pending[ix + 1] & 0xff);
- lc = (pending[_lengthOffset + lx]) & 0xff;
- lx++;
-
- if (distance == 0)
- {
- send_code(lc, ltree); // send a literal byte
- }
- else
- {
- // literal or match pair
- // Here, lc is the match length - MIN_MATCH
- code = Tree.LengthCode[lc];
-
- // send the length code
- send_code(code + InternalConstants.LITERALS + 1, ltree);
- extra = Tree.ExtraLengthBits[code];
- if (extra != 0)
- {
- // send the extra length bits
- lc -= Tree.LengthBase[code];
- send_bits(lc, extra);
- }
- distance--; // dist is now the match distance - 1
- code = Tree.DistanceCode(distance);
-
- // send the distance code
- send_code(code, dtree);
-
- extra = Tree.ExtraDistanceBits[code];
- if (extra != 0)
- {
- // send the extra distance bits
- distance -= Tree.DistanceBase[code];
- send_bits(distance, extra);
- }
- }
-
- // Check that the overlay between pending and d_buf+l_buf is ok:
- }
- while (lx < last_lit);
- }
-
- send_code(END_BLOCK, ltree);
- last_eob_len = ltree[END_BLOCK * 2 + 1];
- }
-
- // Set the data type to ASCII or BINARY, using a crude approximation:
- // binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
- // IN assertion: the fields freq of dyn_ltree are set and the total of all
- // frequencies does not exceed 64K (to fit in an int on 16 bit machines).
- internal void set_data_type()
- {
- int n = 0;
- int ascii_freq = 0;
- int bin_freq = 0;
- while (n < 7)
- {
- bin_freq += dyn_ltree[n * 2]; n++;
- }
- while (n < 128)
- {
- ascii_freq += dyn_ltree[n * 2]; n++;
- }
- while (n < InternalConstants.LITERALS)
- {
- bin_freq += dyn_ltree[n * 2]; n++;
- }
- data_type = (sbyte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
- }
-
- // Flush the bit buffer, keeping at most 7 bits in it.
- internal void bi_flush()
- {
- if (bi_valid == 16)
- {
- pending[pendingCount++] = (byte)bi_buf;
- pending[pendingCount++] = (byte)(bi_buf >> 8);
- bi_buf = 0;
- bi_valid = 0;
- }
- else if (bi_valid >= 8)
- {
- //put_byte((byte)bi_buf);
- pending[pendingCount++] = (byte)bi_buf;
- bi_buf >>= 8;
- bi_valid -= 8;
- }
- }
-
- // Flush the bit buffer and align the output on a byte boundary
- internal void bi_windup()
- {
- if (bi_valid > 8)
- {
- pending[pendingCount++] = (byte)bi_buf;
- pending[pendingCount++] = (byte)(bi_buf >> 8);
- }
- else if (bi_valid > 0)
- {
- //put_byte((byte)bi_buf);
- pending[pendingCount++] = (byte)bi_buf;
- }
- bi_buf = 0;
- bi_valid = 0;
- }
-
- // Copy a stored block, storing first the length and its
- // one's complement if requested.
- internal void copy_block(int buf, int len, bool header)
- {
- bi_windup(); // align on byte boundary
- last_eob_len = 8; // enough lookahead for inflate
-
- if (header)
- {
- unchecked
- {
- //put_short((short)len);
- pending[pendingCount++] = (byte)len;
- pending[pendingCount++] = (byte)(len >> 8);
- //put_short((short)~len);
- pending[pendingCount++] = (byte)~len;
- pending[pendingCount++] = (byte)(~len >> 8);
- }
- }
-
- put_bytes(window, buf, len);
- }
-
- internal void flush_block_only(bool eof)
- {
- _tr_flush_block(block_start >= 0 ? block_start : -1, strstart - block_start, eof);
- block_start = strstart;
- _codec.flush_pending();
- }
-
- // Copy without compression as much as possible from the input stream, return
- // the current block state.
- // This function does not insert new strings in the dictionary since
- // uncompressible data is probably not useful. This function is used
- // only for the level=0 compression option.
- // NOTE: this function should be optimized to avoid extra copying from
- // window to pending_buf.
- internal BlockState DeflateNone(FlushType flush)
- {
- // Stored blocks are limited to 0xffff bytes, pending is limited
- // to pending_buf_size, and each stored block has a 5 byte header:
-
- int max_block_size = 0xffff;
- int max_start;
-
- if (max_block_size > pending.Length - 5)
- {
- max_block_size = pending.Length - 5;
- }
-
- // Copy as much as possible from input to output:
- while (true)
- {
- // Fill the window as much as possible:
- if (lookahead <= 1)
- {
- _fillWindow();
- if (lookahead == 0 && flush == FlushType.None)
- {
- return BlockState.NeedMore;
- }
- if (lookahead == 0)
- {
- break; // flush the current block
- }
- }
-
- strstart += lookahead;
- lookahead = 0;
-
- // Emit a stored block if pending will be full:
- max_start = block_start + max_block_size;
- if (strstart == 0 || strstart >= max_start)
- {
- // strstart == 0 is possible when wraparound on 16-bit machine
- lookahead = (int)(strstart - max_start);
- strstart = (int)max_start;
-
- flush_block_only(false);
- if (_codec.AvailableBytesOut == 0)
- {
- return BlockState.NeedMore;
- }
- }
-
- // Flush if we may have to slide, otherwise block_start may become
- // negative and the data will be gone:
- if (strstart - block_start >= w_size - MIN_LOOKAHEAD)
- {
- flush_block_only(false);
- if (_codec.AvailableBytesOut == 0)
- {
- return BlockState.NeedMore;
- }
- }
- }
-
- flush_block_only(flush == FlushType.Finish);
- if (_codec.AvailableBytesOut == 0)
- {
- return (flush == FlushType.Finish) ? BlockState.FinishStarted : BlockState.NeedMore;
- }
-
- return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone;
- }
-
- // Send a stored block
- internal void _tr_stored_block(int buf, int stored_len, bool eof)
- {
- send_bits((STORED_BLOCK << 1) + (eof ? 1 : 0), 3); // send block type
- copy_block(buf, stored_len, true); // with header
- }
-
- // Determine the best encoding for the current block: dynamic trees, static
- // trees or store, and output the encoded block to the zip file.
- internal void _tr_flush_block(int buf, int stored_len, bool eof)
- {
- int opt_lenb, static_lenb; // opt_len and static_len in bytes
- int max_blindex = 0; // index of last bit length code of non zero freq
-
- // Build the Huffman trees unless a stored block is forced
- if (compressionLevel > 0)
- {
- // Check if the file is ascii or binary
- if (data_type == Z_UNKNOWN)
- {
- set_data_type();
- }
-
- // Construct the literal and distance trees
- treeLiterals.build_tree(this);
-
- treeDistances.build_tree(this);
-
- // At this point, opt_len and static_len are the total bit lengths of
- // the compressed block data, excluding the tree representations.
-
- // Build the bit length tree for the above two trees, and get the index
- // in bl_order of the last bit length code to send.
- max_blindex = build_bl_tree();
-
- // Determine the best encoding. Compute first the block length in bytes
- opt_lenb = (opt_len + 3 + 7) >> 3;
- static_lenb = (static_len + 3 + 7) >> 3;
-
- if (static_lenb <= opt_lenb)
- {
- opt_lenb = static_lenb;
- }
- }
- else
- {
- opt_lenb = static_lenb = stored_len + 5; // force a stored block
- }
-
- if (stored_len + 4 <= opt_lenb && buf != -1)
- {
- // 4: two words for the lengths
- // The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
- // Otherwise we can't have processed more than WSIZE input bytes since
- // the last block flush, because compression would have been
- // successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
- // transform a block into a stored block.
- _tr_stored_block(buf, stored_len, eof);
- }
- else if (static_lenb == opt_lenb)
- {
- send_bits((STATIC_TREES << 1) + (eof ? 1 : 0), 3);
- send_compressed_block(StaticTree.lengthAndLiteralsTreeCodes, StaticTree.distTreeCodes);
- }
- else
- {
- send_bits((DYN_TREES << 1) + (eof ? 1 : 0), 3);
- send_all_trees(treeLiterals.max_code + 1, treeDistances.max_code + 1, max_blindex + 1);
- send_compressed_block(dyn_ltree, dyn_dtree);
- }
-
- // The above check is made mod 2^32, for files larger than 512 MB
- // and uLong implemented on 32 bits.
-
- _InitializeBlocks();
-
- if (eof)
- {
- bi_windup();
- }
- }
-
- // Fill the window when the lookahead becomes insufficient.
- // Updates strstart and lookahead.
- //
- // IN assertion: lookahead < MIN_LOOKAHEAD
- // OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
- // At least one byte has been read, or avail_in == 0; reads are
- // performed for at least two bytes (required for the zip translate_eol
- // option -- not supported here).
- private void _fillWindow()
- {
- int n, m;
- int p;
- int more; // Amount of free space at the end of the window.
-
- do
- {
- more = (window_size - lookahead - strstart);
-
- // Deal with !@#$% 64K limit:
- if (more == 0 && strstart == 0 && lookahead == 0)
- {
- more = w_size;
- }
- else if (more == -1)
- {
- // Very unlikely, but possible on 16 bit machine if strstart == 0
- // and lookahead == 1 (input done one byte at time)
- more--;
-
- // If the window is almost full and there is insufficient lookahead,
- // move the upper half to the lower one to make room in the upper half.
- }
- else if (strstart >= w_size + w_size - MIN_LOOKAHEAD)
- {
- Array.Copy(window, w_size, window, 0, w_size);
- match_start -= w_size;
- strstart -= w_size; // we now have strstart >= MAX_DIST
- block_start -= w_size;
-
- // Slide the hash table (could be avoided with 32 bit values
- // at the expense of memory usage). We slide even when level == 0
- // to keep the hash table consistent if we switch back to level > 0
- // later. (Using level 0 permanently is not an optimal usage of
- // zlib, so we don't care about this pathological case.)
-
- n = hash_size;
- p = n;
- do
- {
- m = (head[--p] & 0xffff);
- head[p] = (short)((m >= w_size) ? (m - w_size) : 0);
- }
- while (--n != 0);
-
- n = w_size;
- p = n;
- do
- {
- m = (prev[--p] & 0xffff);
- prev[p] = (short)((m >= w_size) ? (m - w_size) : 0);
- // If n is not on any hash chain, prev[n] is garbage but
- // its value will never be used.
- }
- while (--n != 0);
- more += w_size;
- }
-
- if (_codec.AvailableBytesIn == 0)
- {
- return;
- }
-
- // If there was no sliding:
- // strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
- // more == window_size - lookahead - strstart
- // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
- // => more >= window_size - 2*WSIZE + 2
- // In the BIG_MEM or MMAP case (not yet supported),
- // window_size == input_size + MIN_LOOKAHEAD &&
- // strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
- // Otherwise, window_size == 2*WSIZE so more >= 2.
- // If there was sliding, more >= WSIZE. So in all cases, more >= 2.
-
- n = _codec.read_buf(window, strstart + lookahead, more);
- lookahead += n;
-
- // Initialize the hash value now that we have some input:
- if (lookahead >= MIN_MATCH)
- {
- ins_h = window[strstart] & 0xff;
- ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
- }
- // If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
- // but this is not important since only literal bytes will be emitted.
- }
- while (lookahead < MIN_LOOKAHEAD && _codec.AvailableBytesIn != 0);
- }
-
- // Compress as much as possible from the input stream, return the current
- // block state.
- // This function does not perform lazy evaluation of matches and inserts
- // new strings in the dictionary only for unmatched strings or for short
- // matches. It is used only for the fast compression options.
- internal BlockState DeflateFast(FlushType flush)
- {
- // short hash_head = 0; // head of the hash chain
- int hash_head = 0; // head of the hash chain
- bool bflush; // set if current block must be flushed
-
- while (true)
- {
- // Make sure that we always have enough lookahead, except
- // at the end of the input file. We need MAX_MATCH bytes
- // for the next match, plus MIN_MATCH bytes to insert the
- // string following the next match.
- if (lookahead < MIN_LOOKAHEAD)
- {
- _fillWindow();
- if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None)
- {
- return BlockState.NeedMore;
- }
- if (lookahead == 0)
- {
- break; // flush the current block
- }
- }
-
- // Insert the string window[strstart .. strstart+2] in the
- // dictionary, and set hash_head to the head of the hash chain:
- if (lookahead >= MIN_MATCH)
- {
- ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
-
- // prev[strstart&w_mask]=hash_head=head[ins_h];
- hash_head = (head[ins_h] & 0xffff);
- prev[strstart & w_mask] = head[ins_h];
- head[ins_h] = unchecked((short)strstart);
- }
-
- // Find the longest match, discarding those <= prev_length.
- // At this point we have always match_length < MIN_MATCH
-
- if (hash_head != 0L && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD)
- {
- // To simplify the code, we prevent matches with the string
- // of window index 0 (in particular we have to avoid a match
- // of the string with itself at the start of the input file).
- if (compressionStrategy != CompressionStrategy.HuffmanOnly)
- {
- match_length = longest_match(hash_head);
- }
- // longest_match() sets match_start
- }
- if (match_length >= MIN_MATCH)
- {
- // check_match(strstart, match_start, match_length);
-
- bflush = _tr_tally(strstart - match_start, match_length - MIN_MATCH);
-
- lookahead -= match_length;
-
- // Insert new strings in the hash table only if the match length
- // is not too large. This saves time but degrades compression.
- if (match_length <= config.MaxLazy && lookahead >= MIN_MATCH)
- {
- match_length--; // string at strstart already in hash table
- do
- {
- strstart++;
-
- ins_h = ((ins_h << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
- // prev[strstart&w_mask]=hash_head=head[ins_h];
- hash_head = (head[ins_h] & 0xffff);
- prev[strstart & w_mask] = head[ins_h];
- head[ins_h] = unchecked((short)strstart);
-
- // strstart never exceeds WSIZE-MAX_MATCH, so there are
- // always MIN_MATCH bytes ahead.
- }
- while (--match_length != 0);
- strstart++;
- }
- else
- {
- strstart += match_length;
- match_length = 0;
- ins_h = window[strstart] & 0xff;
-
- ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
- // If lookahead < MIN_MATCH, ins_h is garbage, but it does not
- // matter since it will be recomputed at next deflate call.
- }
- }
- else
- {
- // No match, output a literal byte
-
- bflush = _tr_tally(0, window[strstart] & 0xff);
- lookahead--;
- strstart++;
- }
- if (bflush)
- {
- flush_block_only(false);
- if (_codec.AvailableBytesOut == 0)
- {
- return BlockState.NeedMore;
- }
- }
- }
-
- flush_block_only(flush == FlushType.Finish);
- if (_codec.AvailableBytesOut == 0)
- {
- if (flush == FlushType.Finish)
- {
- return BlockState.FinishStarted;
- }
- else
- {
- return BlockState.NeedMore;
- }
- }
- return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone;
- }
-
- // Same as above, but achieves better compression. We use a lazy
- // evaluation for matches: a match is finally adopted only if there is
- // no better match at the next window position.
- internal BlockState DeflateSlow(FlushType flush)
- {
- // short hash_head = 0; // head of hash chain
- int hash_head = 0; // head of hash chain
- bool bflush; // set if current block must be flushed
-
- // Process the input block.
- while (true)
- {
- // Make sure that we always have enough lookahead, except
- // at the end of the input file. We need MAX_MATCH bytes
- // for the next match, plus MIN_MATCH bytes to insert the
- // string following the next match.
-
- if (lookahead < MIN_LOOKAHEAD)
- {
- _fillWindow();
- if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None)
- {
- return BlockState.NeedMore;
- }
-
- if (lookahead == 0)
- {
- break; // flush the current block
- }
- }
-
- // Insert the string window[strstart .. strstart+2] in the
- // dictionary, and set hash_head to the head of the hash chain:
-
- if (lookahead >= MIN_MATCH)
- {
- ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
- // prev[strstart&w_mask]=hash_head=head[ins_h];
- hash_head = (head[ins_h] & 0xffff);
- prev[strstart & w_mask] = head[ins_h];
- head[ins_h] = unchecked((short)strstart);
- }
-
- // Find the longest match, discarding those <= prev_length.
- prev_length = match_length;
- prev_match = match_start;
- match_length = MIN_MATCH - 1;
-
- if (hash_head != 0 && prev_length < config.MaxLazy &&
- ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD)
- {
- // To simplify the code, we prevent matches with the string
- // of window index 0 (in particular we have to avoid a match
- // of the string with itself at the start of the input file).
-
- if (compressionStrategy != CompressionStrategy.HuffmanOnly)
- {
- match_length = longest_match(hash_head);
- }
- // longest_match() sets match_start
-
- if (match_length <= 5 && (compressionStrategy == CompressionStrategy.Filtered ||
- (match_length == MIN_MATCH && strstart - match_start > 4096)))
- {
-
- // If prev_match is also MIN_MATCH, match_start is garbage
- // but we will ignore the current match anyway.
- match_length = MIN_MATCH - 1;
- }
- }
-
- // If there was a match at the previous step and the current
- // match is not better, output the previous match:
- if (prev_length >= MIN_MATCH && match_length <= prev_length)
- {
- int max_insert = strstart + lookahead - MIN_MATCH;
- // Do not insert strings in hash table beyond this.
-
- // check_match(strstart-1, prev_match, prev_length);
-
- bflush = _tr_tally(strstart - 1 - prev_match, prev_length - MIN_MATCH);
-
- // Insert in hash table all strings up to the end of the match.
- // strstart-1 and strstart are already inserted. If there is not
- // enough lookahead, the last two strings are not inserted in
- // the hash table.
- lookahead -= (prev_length - 1);
- prev_length -= 2;
- do
- {
- if (++strstart <= max_insert)
- {
- ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
- //prev[strstart&w_mask]=hash_head=head[ins_h];
- hash_head = (head[ins_h] & 0xffff);
- prev[strstart & w_mask] = head[ins_h];
- head[ins_h] = unchecked((short)strstart);
- }
- }
- while (--prev_length != 0);
- match_available = 0;
- match_length = MIN_MATCH - 1;
- strstart++;
-
- if (bflush)
- {
- flush_block_only(false);
- if (_codec.AvailableBytesOut == 0)
- {
- return BlockState.NeedMore;
- }
- }
- }
- else if (match_available != 0)
- {
-
- // If there was no match at the previous position, output a
- // single literal. If there was a match but the current match
- // is longer, truncate the previous match to a single literal.
-
- bflush = _tr_tally(0, window[strstart - 1] & 0xff);
-
- if (bflush)
- {
- flush_block_only(false);
- }
- strstart++;
- lookahead--;
- if (_codec.AvailableBytesOut == 0)
- return BlockState.NeedMore;
- }
- else
- {
- // There is no previous match to compare with, wait for
- // the next step to decide.
-
- match_available = 1;
- strstart++;
- lookahead--;
- }
- }
-
- if (match_available != 0)
- {
- bflush = _tr_tally(0, window[strstart - 1] & 0xff);
- match_available = 0;
- }
- flush_block_only(flush == FlushType.Finish);
-
- if (_codec.AvailableBytesOut == 0)
- {
- if (flush == FlushType.Finish)
- {
- return BlockState.FinishStarted;
- }
- else
- {
- return BlockState.NeedMore;
- }
- }
-
- return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone;
- }
-
-
- internal int longest_match(int cur_match)
- {
- int chain_length = config.MaxChainLength; // max hash chain length
- int scan = strstart; // current string
- int match; // matched string
- int len; // length of current match
- int best_len = prev_length; // best match length so far
- int limit = strstart > (w_size - MIN_LOOKAHEAD) ? strstart - (w_size - MIN_LOOKAHEAD) : 0;
-
- int niceLength = config.NiceLength;
-
- // Stop when cur_match becomes <= limit. To simplify the code,
- // we prevent matches with the string of window index 0.
-
- int wmask = w_mask;
-
- int strend = strstart + MAX_MATCH;
- byte scan_end1 = window[scan + best_len - 1];
- byte scan_end = window[scan + best_len];
-
- // The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
- // It is easy to get rid of this optimization if necessary.
-
- // Do not waste too much time if we already have a good match:
- if (prev_length >= config.GoodLength)
- {
- chain_length >>= 2;
- }
-
- // Do not look for matches beyond the end of the input. This is necessary
- // to make deflate deterministic.
- if (niceLength > lookahead)
- {
- niceLength = lookahead;
- }
-
- do
- {
- match = cur_match;
-
- // Skip to next match if the match length cannot increase
- // or if the match length is less than 2:
- if (window[match + best_len] != scan_end ||
- window[match + best_len - 1] != scan_end1 ||
- window[match] != window[scan] ||
- window[++match] != window[scan + 1])
- {
- continue;
- }
-
- // The check at best_len-1 can be removed because it will be made
- // again later. (This heuristic is not always a win.)
- // It is not necessary to compare scan[2] and match[2] since they
- // are always equal when the other bytes match, given that
- // the hash keys are equal and that HASH_BITS >= 8.
- scan += 2; match++;
-
- // We check for insufficient lookahead only every 8th comparison;
- // the 256th check will be made at strstart+258.
- do
- {
- }
- while (window[++scan] == window[++match] &&
- window[++scan] == window[++match] &&
- window[++scan] == window[++match] &&
- window[++scan] == window[++match] &&
- window[++scan] == window[++match] &&
- window[++scan] == window[++match] &&
- window[++scan] == window[++match] &&
- window[++scan] == window[++match] && scan < strend);
-
- len = MAX_MATCH - (int)(strend - scan);
- scan = strend - MAX_MATCH;
-
- if (len > best_len)
- {
- match_start = cur_match;
- best_len = len;
- if (len >= niceLength)
- {
- break;
- }
- scan_end1 = window[scan + best_len - 1];
- scan_end = window[scan + best_len];
- }
- }
- while ((cur_match = (prev[cur_match & wmask] & 0xffff)) > limit && --chain_length != 0);
-
- if (best_len <= lookahead)
- {
- return best_len;
- }
- return lookahead;
- }
-
-
- private bool Rfc1950BytesEmitted = false;
- private bool _WantRfc1950HeaderBytes = true;
- internal bool WantRfc1950HeaderBytes
- {
- get { return _WantRfc1950HeaderBytes; }
- set { _WantRfc1950HeaderBytes = value; }
- }
-
- internal int Initialize(ZlibCodec codec, CompressionLevel level)
- {
- return Initialize(codec, level, ZlibConstants.WindowBitsMax);
- }
-
- internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits)
- {
- return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, CompressionStrategy.Default);
- }
-
- internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits, CompressionStrategy compressionStrategy)
- {
- return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, compressionStrategy);
- }
-
- internal int Initialize(ZlibCodec codec, CompressionLevel level, int windowBits, int memLevel, CompressionStrategy strategy)
- {
- _codec = codec;
- _codec.Message = null;
-
- // validation
- if (windowBits < 9 || windowBits > 15)
- {
- throw new ZlibException("windowBits must be in the range 9..15.");
- }
-
- if (memLevel < 1 || memLevel > MEM_LEVEL_MAX)
- {
- throw new ZlibException(String.Format("memLevel must be in the range 1.. {0}", MEM_LEVEL_MAX));
- }
-
- _codec.dstate = this;
-
- w_bits = windowBits;
- w_size = 1 << w_bits;
- w_mask = w_size - 1;
-
- hash_bits = memLevel + 7;
- hash_size = 1 << hash_bits;
- hash_mask = hash_size - 1;
- hash_shift = ((hash_bits + MIN_MATCH - 1) / MIN_MATCH);
-
- window = new byte[w_size * 2];
- prev = new short[w_size];
- head = new short[hash_size];
-
- // for memLevel==8, this will be 16384, 16k
- lit_bufsize = 1 << (memLevel + 6);
-
- // Use a single array as the buffer for data pending compression,
- // the output distance codes, and the output length codes (aka tree).
- // orig comment: This works just fine since the average
- // output size for (length,distance) codes is <= 24 bits.
- pending = new byte[lit_bufsize * 4];
- _distanceOffset = lit_bufsize;
- _lengthOffset = (1 + 2) * lit_bufsize;
-
- // So, for memLevel 8, the length of the pending buffer is 65536. 64k.
- // The first 16k are pending bytes.
- // The middle slice, of 32k, is used for distance codes.
- // The final 16k are length codes.
-
- this.compressionLevel = level;
- this.compressionStrategy = strategy;
-
- Reset();
- return ZlibConstants.Z_OK;
- }
-
- internal void Reset()
- {
- _codec.TotalBytesIn = _codec.TotalBytesOut = 0;
- _codec.Message = null;
- //strm.data_type = Z_UNKNOWN;
-
- pendingCount = 0;
- nextPending = 0;
-
- Rfc1950BytesEmitted = false;
-
- status = (WantRfc1950HeaderBytes) ? INIT_STATE : BUSY_STATE;
- _codec._Adler32 = Adler.Adler32(0, null, 0, 0);
-
- last_flush = (int)FlushType.None;
-
- _InitializeTreeData();
- _InitializeLazyMatch();
- }
-
- internal int End()
- {
- if (status != INIT_STATE && status != BUSY_STATE && status != FINISH_STATE)
- {
- return ZlibConstants.Z_STREAM_ERROR;
- }
- // Deallocate in reverse order of allocations:
- pending = null;
- head = null;
- prev = null;
- window = null;
- // free
- // dstate=null;
- return status == BUSY_STATE ? ZlibConstants.Z_DATA_ERROR : ZlibConstants.Z_OK;
- }
-
- private void SetDeflater()
- {
- switch (config.Flavor)
- {
- case DeflateFlavor.Store:
- DeflateFunction = DeflateNone;
- break;
- case DeflateFlavor.Fast:
- DeflateFunction = DeflateFast;
- break;
- case DeflateFlavor.Slow:
- DeflateFunction = DeflateSlow;
- break;
- }
- }
-
- internal int SetParams(CompressionLevel level, CompressionStrategy strategy)
- {
- int result = ZlibConstants.Z_OK;
-
- if (compressionLevel != level)
- {
- Config newConfig = Config.Lookup(level);
-
- // change in the deflate flavor (Fast vs slow vs none)?
- if (newConfig.Flavor != config.Flavor && _codec.TotalBytesIn != 0)
- {
- // Flush the last buffer:
- result = _codec.Deflate(FlushType.Partial);
- }
-
- compressionLevel = level;
- config = newConfig;
- SetDeflater();
- }
-
- // no need to flush with change in strategy? Really?
- compressionStrategy = strategy;
-
- return result;
- }
-
- internal int SetDictionary(byte[] dictionary)
- {
- int length = dictionary.Length;
- int index = 0;
-
- if (dictionary == null || status != INIT_STATE)
- {
- throw new ZlibException("Stream error.");
- }
-
- _codec._Adler32 = Adler.Adler32(_codec._Adler32, dictionary, 0, dictionary.Length);
-
- if (length < MIN_MATCH)
- {
- return ZlibConstants.Z_OK;
- }
- if (length > w_size - MIN_LOOKAHEAD)
- {
- length = w_size - MIN_LOOKAHEAD;
- index = dictionary.Length - length; // use the tail of the dictionary
- }
- Array.Copy(dictionary, index, window, 0, length);
- strstart = length;
- block_start = length;
-
- // Insert all strings in the hash table (except for the last two bytes).
- // s->lookahead stays null, so s->ins_h will be recomputed at the next
- // call of fill_window.
-
- ins_h = window[0] & 0xff;
- ins_h = (((ins_h) << hash_shift) ^ (window[1] & 0xff)) & hash_mask;
-
- for (int n = 0; n <= length - MIN_MATCH; n++)
- {
- ins_h = (((ins_h) << hash_shift) ^ (window[(n) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
- prev[n & w_mask] = head[ins_h];
- head[ins_h] = (short)n;
- }
- return ZlibConstants.Z_OK;
- }
-
- internal int Deflate(FlushType flush)
- {
- int old_flush;
-
- if (_codec.OutputBuffer == null ||
- (_codec.InputBuffer == null && _codec.AvailableBytesIn != 0) ||
- (status == FINISH_STATE && flush != FlushType.Finish))
- {
- _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_STREAM_ERROR)];
- throw new ZlibException(String.Format("Something is fishy. [{0}]", _codec.Message));
- }
- if (_codec.AvailableBytesOut == 0)
- {
- _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)];
- throw new ZlibException("OutputBuffer is full (AvailableBytesOut == 0)");
- }
-
- old_flush = last_flush;
- last_flush = (int)flush;
-
- // Write the zlib (rfc1950) header bytes
- if (status == INIT_STATE)
- {
- int header = (Z_DEFLATED + ((w_bits - 8) << 4)) << 8;
- int level_flags = (((int)compressionLevel - 1) & 0xff) >> 1;
-
- if (level_flags > 3)
- {
- level_flags = 3;
- }
- header |= (level_flags << 6);
- if (strstart != 0)
- {
- header |= PRESET_DICT;
- }
- header += 31 - (header % 31);
-
- status = BUSY_STATE;
- //putShortMSB(header);
- unchecked
- {
- pending[pendingCount++] = (byte)(header >> 8);
- pending[pendingCount++] = (byte)header;
- }
- // Save the adler32 of the preset dictionary:
- if (strstart != 0)
- {
- pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24);
- pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16);
- pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8);
- pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF);
- }
- _codec._Adler32 = Adler.Adler32(0, null, 0, 0);
- }
-
- // Flush as much pending output as possible
- if (pendingCount != 0)
- {
- _codec.flush_pending();
- if (_codec.AvailableBytesOut == 0)
- {
- //System.out.println(" avail_out==0");
- // Since avail_out is 0, deflate will be called again with
- // more output space, but possibly with both pending and
- // avail_in equal to zero. There won't be anything to do,
- // but this is not an error situation so make sure we
- // return OK instead of BUF_ERROR at next call of deflate:
- last_flush = -1;
- return ZlibConstants.Z_OK;
- }
-
- // Make sure there is something to do and avoid duplicate consecutive
- // flushes. For repeated and useless calls with Z_FINISH, we keep
- // returning Z_STREAM_END instead of Z_BUFF_ERROR.
- }
- else if (_codec.AvailableBytesIn == 0 &&
- (int)flush <= old_flush &&
- flush != FlushType.Finish)
- {
- // workitem 8557
- //
- // Not sure why this needs to be an error. pendingCount == 0, which
- // means there's nothing to deflate. And the caller has not asked
- // for a FlushType.Finish, but... that seems very non-fatal. We
- // can just say "OK" and do nothing.
-
- // _codec.Message = z_errmsg[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)];
- // throw new ZlibException("AvailableBytesIn == 0 && flush<=old_flush && flush != FlushType.Finish");
-
- return ZlibConstants.Z_OK;
- }
-
- // User must not provide more input after the first FINISH:
- if (status == FINISH_STATE && _codec.AvailableBytesIn != 0)
- {
- _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)];
- throw new ZlibException("status == FINISH_STATE && _codec.AvailableBytesIn != 0");
- }
-
- // Start a new block or continue the current one.
- if (_codec.AvailableBytesIn != 0 || lookahead != 0 || (flush != FlushType.None && status != FINISH_STATE))
- {
- BlockState bstate = DeflateFunction(flush);
-
- if (bstate == BlockState.FinishStarted || bstate == BlockState.FinishDone)
- {
- status = FINISH_STATE;
- }
- if (bstate == BlockState.NeedMore || bstate == BlockState.FinishStarted)
- {
- if (_codec.AvailableBytesOut == 0)
- {
- last_flush = -1; // avoid BUF_ERROR next call, see above
- }
- return ZlibConstants.Z_OK;
- // If flush != Z_NO_FLUSH && avail_out == 0, the next call
- // of deflate should use the same flush parameter to make sure
- // that the flush is complete. So we don't have to output an
- // empty block here, this will be done at next call. This also
- // ensures that for a very small output buffer, we emit at most
- // one empty block.
- }
-
- if (bstate == BlockState.BlockDone)
- {
- if (flush == FlushType.Partial)
- {
- _tr_align();
- }
- else
- {
- // FlushType.Full or FlushType.Sync
- _tr_stored_block(0, 0, false);
- // For a full flush, this empty block will be recognized
- // as a special marker by inflate_sync().
- if (flush == FlushType.Full)
- {
- // clear hash (forget the history)
- for (int i = 0; i < hash_size; i++)
- head[i] = 0;
- }
- }
- _codec.flush_pending();
- if (_codec.AvailableBytesOut == 0)
- {
- last_flush = -1; // avoid BUF_ERROR at next call, see above
- return ZlibConstants.Z_OK;
- }
- }
- }
-
- if (flush != FlushType.Finish)
- {
- return ZlibConstants.Z_OK;
- }
-
- if (!WantRfc1950HeaderBytes || Rfc1950BytesEmitted)
- {
- return ZlibConstants.Z_STREAM_END;
- }
-
- // Write the zlib trailer (adler32)
- pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24);
- pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16);
- pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8);
- pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF);
- //putShortMSB((int)(SharedUtils.URShift(_codec._Adler32, 16)));
- //putShortMSB((int)(_codec._Adler32 & 0xffff));
-
- _codec.flush_pending();
-
- // If avail_out is zero, the application will call deflate again
- // to flush the rest.
-
- Rfc1950BytesEmitted = true; // write the trailer only once!
-
- return pendingCount != 0 ? ZlibConstants.Z_OK : ZlibConstants.Z_STREAM_END;
- }
- }
-}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Zlib/DeflateStream.cs b/SabreTools.Library/External/Zlib/DeflateStream.cs
deleted file mode 100644
index ae9e2d80..00000000
--- a/SabreTools.Library/External/Zlib/DeflateStream.cs
+++ /dev/null
@@ -1,759 +0,0 @@
-// DeflateStream.cs
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2009-2010 Dino Chiesa.
-// All rights reserved.
-//
-// This code module is part of DotNetZip, a zipfile class library.
-//
-// ------------------------------------------------------------------
-//
-// This code is licensed under the Microsoft Public License.
-// See the file License.txt for the license details.
-// More info on: http://dotnetzip.codeplex.com
-//
-// ------------------------------------------------------------------
-//
-// last saved (in emacs):
-// Time-stamp: <2011-July-31 14:48:11>
-//
-// ------------------------------------------------------------------
-//
-// This module defines the DeflateStream class, which can be used as a replacement for
-// the System.IO.Compression.DeflateStream class in the .NET BCL.
-//
-// ------------------------------------------------------------------
-
-using System;
-
-namespace Ionic.Zlib
-{
- ///
- /// A class for compressing and decompressing streams using the Deflate algorithm.
- ///
- ///
- ///
- ///
- ///
- /// The DeflateStream is a Decorator on a . It adds DEFLATE compression or decompression to any
- /// stream.
- ///
- ///
- ///
- /// Using this stream, applications can compress or decompress data via stream
- /// Read and Write operations. Either compresssion or decompression
- /// can occur through either reading or writing. The compression format used is
- /// DEFLATE, which is documented in IETF RFC 1951, "DEFLATE
- /// Compressed Data Format Specification version 1.3.".
- ///
- ///
- ///
- /// This class is similar to , except that
- /// ZlibStream adds the RFC
- /// 1950 - ZLIB framing bytes to a compressed stream when compressing, or
- /// expects the RFC1950 framing bytes when decompressing. The DeflateStream
- /// does not.
- ///
- ///
- ///
- ///
- ///
- ///
- public class DeflateStream : System.IO.Stream
- {
- internal ZlibBaseStream _baseStream;
- internal System.IO.Stream _innerStream;
- bool _disposed;
-
- ///
- /// Create a DeflateStream using the specified CompressionMode.
- ///
- ///
- ///
- /// When mode is CompressionMode.Compress, the DeflateStream will use
- /// the default compression level. The "captive" stream will be closed when
- /// the DeflateStream is closed.
- ///
- ///
- ///
- /// This example uses a DeflateStream to compress data from a file, and writes
- /// the compressed data to another file.
- ///
- /// using (System.IO.Stream input = System.IO.File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (var raw = System.IO.File.Create(fileToCompress + ".deflated"))
- /// {
- /// using (Stream compressor = new DeflateStream(raw, CompressionMode.Compress))
- /// {
- /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
- /// int n;
- /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
- /// {
- /// compressor.Write(buffer, 0, n);
- /// }
- /// }
- /// }
- /// }
- ///
- ///
- ///
- /// Using input As Stream = File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
- /// Using raw As FileStream = File.Create(fileToCompress & ".deflated")
- /// Using compressor As Stream = New DeflateStream(raw, CompressionMode.Compress)
- /// Dim buffer As Byte() = New Byte(4096) {}
- /// Dim n As Integer = -1
- /// Do While (n <> 0)
- /// If (n > 0) Then
- /// compressor.Write(buffer, 0, n)
- /// End If
- /// n = input.Read(buffer, 0, buffer.Length)
- /// Loop
- /// End Using
- /// End Using
- /// End Using
- ///
- ///
- /// The stream which will be read or written.
- /// Indicates whether the DeflateStream will compress or decompress.
- public DeflateStream(System.IO.Stream stream, CompressionMode mode)
- : this(stream, mode, CompressionLevel.Default, false)
- {
- }
-
- ///
- /// Create a DeflateStream using the specified CompressionMode and the specified CompressionLevel.
- ///
- ///
- ///
- ///
- ///
- /// When mode is CompressionMode.Decompress, the level parameter is
- /// ignored. The "captive" stream will be closed when the DeflateStream is
- /// closed.
- ///
- ///
- ///
- ///
- ///
- ///
- /// This example uses a DeflateStream to compress data from a file, and writes
- /// the compressed data to another file.
- ///
- ///
- /// using (System.IO.Stream input = System.IO.File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (var raw = System.IO.File.Create(fileToCompress + ".deflated"))
- /// {
- /// using (Stream compressor = new DeflateStream(raw,
- /// CompressionMode.Compress,
- /// CompressionLevel.BestCompression))
- /// {
- /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
- /// int n= -1;
- /// while (n != 0)
- /// {
- /// if (n > 0)
- /// compressor.Write(buffer, 0, n);
- /// n= input.Read(buffer, 0, buffer.Length);
- /// }
- /// }
- /// }
- /// }
- ///
- ///
- ///
- /// Using input As Stream = File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
- /// Using raw As FileStream = File.Create(fileToCompress & ".deflated")
- /// Using compressor As Stream = New DeflateStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression)
- /// Dim buffer As Byte() = New Byte(4096) {}
- /// Dim n As Integer = -1
- /// Do While (n <> 0)
- /// If (n > 0) Then
- /// compressor.Write(buffer, 0, n)
- /// End If
- /// n = input.Read(buffer, 0, buffer.Length)
- /// Loop
- /// End Using
- /// End Using
- /// End Using
- ///
- ///
- /// The stream to be read or written while deflating or inflating.
- /// Indicates whether the DeflateStream will compress or decompress.
- /// A tuning knob to trade speed for effectiveness.
- public DeflateStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level)
- : this(stream, mode, level, false)
- {
- }
-
- ///
- /// Create a DeflateStream using the specified
- /// CompressionMode, and explicitly specify whether the
- /// stream should be left open after Deflation or Inflation.
- ///
- ///
- ///
- ///
- ///
- /// This constructor allows the application to request that the captive stream
- /// remain open after the deflation or inflation occurs. By default, after
- /// Close() is called on the stream, the captive stream is also
- /// closed. In some cases this is not desired, for example if the stream is a
- /// memory stream that will be re-read after compression. Specify true for
- /// the parameter to leave the stream open.
- ///
- ///
- ///
- /// The DeflateStream will use the default compression level.
- ///
- ///
- ///
- /// See the other overloads of this constructor for example code.
- ///
- ///
- ///
- ///
- /// The stream which will be read or written. This is called the
- /// "captive" stream in other places in this documentation.
- ///
- ///
- ///
- /// Indicates whether the DeflateStream will compress or decompress.
- ///
- ///
- /// true if the application would like the stream to
- /// remain open after inflation/deflation.
- public DeflateStream(System.IO.Stream stream, CompressionMode mode, bool leaveOpen)
- : this(stream, mode, CompressionLevel.Default, leaveOpen)
- {
- }
-
- ///
- /// Create a DeflateStream using the specified CompressionMode
- /// and the specified CompressionLevel, and explicitly specify whether
- /// the stream should be left open after Deflation or Inflation.
- ///
- ///
- ///
- ///
- ///
- /// When mode is CompressionMode.Decompress, the level parameter is ignored.
- ///
- ///
- ///
- /// This constructor allows the application to request that the captive stream
- /// remain open after the deflation or inflation occurs. By default, after
- /// Close() is called on the stream, the captive stream is also
- /// closed. In some cases this is not desired, for example if the stream is a
- /// that will be re-read after
- /// compression. Specify true for the parameter
- /// to leave the stream open.
- ///
- ///
- ///
- ///
- ///
- ///
- /// This example shows how to use a DeflateStream to compress data from
- /// a file, and store the compressed data into another file.
- ///
- ///
- /// using (var output = System.IO.File.Create(fileToCompress + ".deflated"))
- /// {
- /// using (System.IO.Stream input = System.IO.File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (Stream compressor = new DeflateStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, true))
- /// {
- /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
- /// int n= -1;
- /// while (n != 0)
- /// {
- /// if (n > 0)
- /// compressor.Write(buffer, 0, n);
- /// n= input.Read(buffer, 0, buffer.Length);
- /// }
- /// }
- /// }
- /// // can write additional data to the output stream here
- /// }
- ///
- ///
- ///
- /// Using output As FileStream = File.Create(fileToCompress & ".deflated")
- /// Using input As Stream = File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
- /// Using compressor As Stream = New DeflateStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, True)
- /// Dim buffer As Byte() = New Byte(4096) {}
- /// Dim n As Integer = -1
- /// Do While (n <> 0)
- /// If (n > 0) Then
- /// compressor.Write(buffer, 0, n)
- /// End If
- /// n = input.Read(buffer, 0, buffer.Length)
- /// Loop
- /// End Using
- /// End Using
- /// ' can write additional data to the output stream here.
- /// End Using
- ///
- ///
- /// The stream which will be read or written.
- /// Indicates whether the DeflateStream will compress or decompress.
- /// true if the application would like the stream to remain open after inflation/deflation.
- /// A tuning knob to trade speed for effectiveness.
- public DeflateStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
- {
- _innerStream = stream;
- _baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen);
- }
-
- #region Zlib properties
-
- ///
- /// This property sets the flush behavior on the stream.
- ///
- /// See the ZLIB documentation for the meaning of the flush behavior.
- ///
- virtual public FlushType FlushMode
- {
- get { return (this._baseStream._flushMode); }
- set
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("DeflateStream");
- }
- this._baseStream._flushMode = value;
- }
- }
-
- ///
- /// The size of the working buffer for the compression codec.
- ///
- ///
- ///
- ///
- /// The working buffer is used for all stream operations. The default size is
- /// 1024 bytes. The minimum size is 128 bytes. You may get better performance
- /// with a larger buffer. Then again, you might not. You would have to test
- /// it.
- ///
- ///
- ///
- /// Set this before the first call to Read() or Write() on the
- /// stream. If you try to set it afterwards, it will throw.
- ///
- ///
- public int BufferSize
- {
- get
- {
- return this._baseStream._bufferSize;
- }
- set
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("DeflateStream");
- }
- if (this._baseStream._workingBuffer != null)
- {
- throw new ZlibException("The working buffer is already set.");
- }
- if (value < ZlibConstants.WorkingBufferSizeMin)
- {
- throw new ZlibException(String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, ZlibConstants.WorkingBufferSizeMin));
- }
- this._baseStream._bufferSize = value;
- }
- }
-
- ///
- /// The ZLIB strategy to be used during compression.
- ///
- ///
- ///
- /// By tweaking this parameter, you may be able to optimize the compression for
- /// data with particular characteristics.
- ///
- public CompressionStrategy Strategy
- {
- get
- {
- return this._baseStream.Strategy;
- }
- set
- {
- if (_disposed) throw new ObjectDisposedException("DeflateStream");
- {
- this._baseStream.Strategy = value;
- }
- }
- }
-
- /// Returns the total number of bytes input so far.
- virtual public long TotalIn
- {
- get
- {
- return this._baseStream._z.TotalBytesIn;
- }
- }
-
- /// Returns the total number of bytes output so far.
- virtual public long TotalOut
- {
- get
- {
- return this._baseStream._z.TotalBytesOut;
- }
- }
-
- #endregion
-
- #region System.IO.Stream methods
-
- ///
- /// Dispose the stream.
- ///
- ///
- ///
- /// This may or may not result in a Close() call on the captive
- /// stream. See the constructors that have a leaveOpen parameter
- /// for more information.
- ///
- ///
- /// Application code won't call this code directly. This method may be
- /// invoked in two distinct scenarios. If disposing == true, the method
- /// has been called directly or indirectly by a user's code, for example
- /// via the public Dispose() method. In this case, both managed and
- /// unmanaged resources can be referenced and disposed. If disposing ==
- /// false, the method has been called by the runtime from inside the
- /// object finalizer and this method should not reference other objects;
- /// in that case only unmanaged resources must be referenced or
- /// disposed.
- ///
- ///
- ///
- /// true if the Dispose method was invoked by user code.
- ///
- protected override void Dispose(bool disposing)
- {
- try
- {
- if (!_disposed)
- {
- if (disposing && (this._baseStream != null))
- {
- this._baseStream.Close();
- }
- _disposed = true;
- }
- }
- finally
- {
- base.Dispose(disposing);
- }
- }
-
- ///
- /// Indicates whether the stream can be read.
- ///
- ///
- /// The return value depends on whether the captive stream supports reading.
- ///
- public override bool CanRead
- {
- get
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("DeflateStream");
- }
- return _baseStream._stream.CanRead;
- }
- }
-
- ///
- /// Indicates whether the stream supports Seek operations.
- ///
- ///
- /// Always returns false.
- ///
- public override bool CanSeek
- {
- get { return false; }
- }
-
- ///
- /// Indicates whether the stream can be written.
- ///
- ///
- /// The return value depends on whether the captive stream supports writing.
- ///
- public override bool CanWrite
- {
- get
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("DeflateStream");
- }
- return _baseStream._stream.CanWrite;
- }
- }
-
- ///
- /// Flush the stream.
- ///
- public override void Flush()
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("DeflateStream");
- }
- _baseStream.Flush();
- }
-
- ///
- /// Reading this property always throws a .
- ///
- public override long Length
- {
- get { throw new NotImplementedException(); }
- }
-
- ///
- /// The position of the stream pointer.
- ///
- ///
- ///
- /// Setting this property always throws a . Reading will return the total bytes
- /// written out, if used in writing, or the total bytes read in, if used in
- /// reading. The count may refer to compressed bytes or uncompressed bytes,
- /// depending on how you've used the stream.
- ///
- public override long Position
- {
- get
- {
- if (this._baseStream._streamMode == ZlibBaseStream.StreamMode.Writer)
- {
- return this._baseStream._z.TotalBytesOut;
- }
- if (this._baseStream._streamMode == ZlibBaseStream.StreamMode.Reader)
- {
- return this._baseStream._z.TotalBytesIn;
- }
- return 0;
- }
- set { throw new NotImplementedException(); }
- }
-
- ///
- /// Read data from the stream.
- ///
- ///
- ///
- ///
- /// If you wish to use the DeflateStream to compress data while
- /// reading, you can create a DeflateStream with
- /// CompressionMode.Compress, providing an uncompressed data stream.
- /// Then call Read() on that DeflateStream, and the data read will be
- /// compressed as you read. If you wish to use the DeflateStream to
- /// decompress data while reading, you can create a DeflateStream with
- /// CompressionMode.Decompress, providing a readable compressed data
- /// stream. Then call Read() on that DeflateStream, and the data read
- /// will be decompressed as you read.
- ///
- ///
- ///
- /// A DeflateStream can be used for Read() or Write(), but not both.
- ///
- ///
- ///
- /// The buffer into which the read data should be placed.
- /// the offset within that data array to put the first byte read.
- /// the number of bytes to read.
- /// the number of bytes actually read
- public override int Read(byte[] buffer, int offset, int count)
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("DeflateStream");
- }
- return _baseStream.Read(buffer, offset, count);
- }
-
- ///
- /// Calling this method always throws a .
- ///
- /// this is irrelevant, since it will always throw!
- /// this is irrelevant, since it will always throw!
- /// irrelevant!
- public override long Seek(long offset, System.IO.SeekOrigin origin)
- {
- throw new NotImplementedException();
- }
-
- ///
- /// Calling this method always throws a .
- ///
- /// this is irrelevant, since it will always throw!
- public override void SetLength(long value)
- {
- throw new NotImplementedException();
- }
-
- ///
- /// Write data to the stream.
- ///
- ///
- ///
- ///
- /// If you wish to use the DeflateStream to compress data while
- /// writing, you can create a DeflateStream with
- /// CompressionMode.Compress, and a writable output stream. Then call
- /// Write() on that DeflateStream, providing uncompressed data
- /// as input. The data sent to the output stream will be the compressed form
- /// of the data written. If you wish to use the DeflateStream to
- /// decompress data while writing, you can create a DeflateStream with
- /// CompressionMode.Decompress, and a writable output stream. Then
- /// call Write() on that stream, providing previously compressed
- /// data. The data sent to the output stream will be the decompressed form of
- /// the data written.
- ///
- ///
- ///
- /// A DeflateStream can be used for Read() or Write(),
- /// but not both.
- ///
- ///
- ///
- ///
- /// The buffer holding data to write to the stream.
- /// the offset within that data array to find the first byte to write.
- /// the number of bytes to write.
- public override void Write(byte[] buffer, int offset, int count)
- {
- if (_disposed) throw new ObjectDisposedException("DeflateStream");
- _baseStream.Write(buffer, offset, count);
- }
-
- #endregion
-
- ///
- /// Compress a string into a byte array using DEFLATE (RFC 1951).
- ///
- ///
- ///
- /// Uncompress it with .
- ///
- ///
- /// DeflateStream.UncompressString(byte[])
- /// DeflateStream.CompressBuffer(byte[])
- /// GZipStream.CompressString(string)
- /// ZlibStream.CompressString(string)
- ///
- ///
- /// A string to compress. The string will first be encoded
- /// using UTF8, then compressed.
- ///
- ///
- /// The string in compressed form
- public static byte[] CompressString(String s)
- {
- using (var ms = new System.IO.MemoryStream())
- {
- System.IO.Stream compressor =
- new DeflateStream(ms, CompressionMode.Compress, CompressionLevel.BestCompression);
- ZlibBaseStream.CompressString(s, compressor);
- return ms.ToArray();
- }
- }
-
- ///
- /// Compress a byte array into a new byte array using DEFLATE.
- ///
- ///
- ///
- /// Uncompress it with .
- ///
- ///
- /// DeflateStream.CompressString(string)
- /// DeflateStream.UncompressBuffer(byte[])
- /// GZipStream.CompressBuffer(byte[])
- /// ZlibStream.CompressBuffer(byte[])
- ///
- ///
- /// A buffer to compress.
- ///
- ///
- /// The data in compressed form
- public static byte[] CompressBuffer(byte[] b)
- {
- using (var ms = new System.IO.MemoryStream())
- {
- System.IO.Stream compressor =
- new DeflateStream(ms, CompressionMode.Compress, CompressionLevel.BestCompression);
-
- ZlibBaseStream.CompressBuffer(b, compressor);
- return ms.ToArray();
- }
- }
-
- ///
- /// Uncompress a DEFLATE'd byte array into a single string.
- ///
- ///
- /// DeflateStream.CompressString(String)
- /// DeflateStream.UncompressBuffer(byte[])
- /// GZipStream.UncompressString(byte[])
- /// ZlibStream.UncompressString(byte[])
- ///
- ///
- /// A buffer containing DEFLATE-compressed data.
- ///
- ///
- /// The uncompressed string
- public static String UncompressString(byte[] compressed)
- {
- using (var input = new System.IO.MemoryStream(compressed))
- {
- System.IO.Stream decompressor =
- new DeflateStream(input, CompressionMode.Decompress);
-
- return ZlibBaseStream.UncompressString(compressed, decompressor);
- }
- }
-
- ///
- /// Uncompress a DEFLATE'd byte array into a byte array.
- ///
- ///
- /// DeflateStream.CompressBuffer(byte[])
- /// DeflateStream.UncompressString(byte[])
- /// GZipStream.UncompressBuffer(byte[])
- /// ZlibStream.UncompressBuffer(byte[])
- ///
- ///
- /// A buffer containing data that has been compressed with DEFLATE.
- ///
- ///
- /// The data in uncompressed form
- public static byte[] UncompressBuffer(byte[] compressed)
- {
- using (var input = new System.IO.MemoryStream(compressed))
- {
- System.IO.Stream decompressor =
- new DeflateStream(input, CompressionMode.Decompress);
-
- return ZlibBaseStream.UncompressBuffer(compressed, decompressor);
- }
- }
- }
-}
-
diff --git a/SabreTools.Library/External/Zlib/GZipStream.cs b/SabreTools.Library/External/Zlib/GZipStream.cs
deleted file mode 100644
index 44edaafa..00000000
--- a/SabreTools.Library/External/Zlib/GZipStream.cs
+++ /dev/null
@@ -1,1072 +0,0 @@
-// GZipStream.cs
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
-// All rights reserved.
-//
-// This code module is part of DotNetZip, a zipfile class library.
-//
-// ------------------------------------------------------------------
-//
-// This code is licensed under the Microsoft Public License.
-// See the file License.txt for the license details.
-// More info on: http://dotnetzip.codeplex.com
-//
-// ------------------------------------------------------------------
-//
-// last saved (in emacs):
-// Time-stamp: <2011-August-08 18:14:39>
-//
-// ------------------------------------------------------------------
-//
-// This module defines the GZipStream class, which can be used as a replacement for
-// the System.IO.Compression.GZipStream class in the .NET BCL. NB: The design is not
-// completely OO clean: there is some intelligence in the ZlibBaseStream that reads the
-// GZip header.
-//
-// ------------------------------------------------------------------
-
-using System;
-
-#if MONO
-using System.IO;
-#else
-using Alphaleonis.Win32.Filesystem;
-
-using MemoryStream = System.IO.MemoryStream;
-using SeekOrigin = System.IO.SeekOrigin;
-using Stream = System.IO.Stream;
-#endif
-
-namespace Ionic.Zlib
-{
- ///
- /// A class for compressing and decompressing GZIP streams.
- ///
- ///
- ///
- ///
- /// The GZipStream is a Decorator on a
- /// . It adds GZIP compression or decompression to any
- /// stream.
- ///
- ///
- ///
- /// Like the System.IO.Compression.GZipStream in the .NET Base Class Library, the
- /// Ionic.Zlib.GZipStream can compress while writing, or decompress while
- /// reading, but not vice versa. The compression method used is GZIP, which is
- /// documented in IETF RFC
- /// 1952, "GZIP file format specification version 4.3".
- ///
- ///
- /// A GZipStream can be used to decompress data (through Read()) or
- /// to compress data (through Write()), but not both.
- ///
- ///
- ///
- /// If you wish to use the GZipStream to compress data, you must wrap it
- /// around a write-able stream. As you call Write() on the GZipStream, the
- /// data will be compressed into the GZIP format. If you want to decompress data,
- /// you must wrap the GZipStream around a readable stream that contains an
- /// IETF RFC 1952-compliant stream. The data will be decompressed as you call
- /// Read() on the GZipStream.
- ///
- ///
- ///
- /// Though the GZIP format allows data from multiple files to be concatenated
- /// together, this stream handles only a single segment of GZIP format, typically
- /// representing a single file.
- ///
- ///
- ///
- /// This class is similar to and .
- /// ZlibStream handles RFC1950-compliant streams.
- /// handles RFC1951-compliant streams. This class handles RFC1952-compliant streams.
- ///
- ///
- ///
- ///
- ///
- ///
- public class GZipStream : System.IO.Stream
- {
- // GZip format
- // source: http://tools.ietf.org/html/rfc1952
- //
- // header id: 2 bytes 1F 8B
- // compress method 1 byte 8= DEFLATE (none other supported)
- // flag 1 byte bitfield (See below)
- // mtime 4 bytes time_t (seconds since jan 1, 1970 UTC of the file.
- // xflg 1 byte 2 = max compress used , 4 = max speed (can be ignored)
- // OS 1 byte OS for originating archive. set to 0xFF in compression.
- // extra field length 2 bytes optional - only if FEXTRA is set.
- // extra field varies
- // filename varies optional - if FNAME is set. zero terminated. ISO-8859-1.
- // file comment varies optional - if FCOMMENT is set. zero terminated. ISO-8859-1.
- // crc16 1 byte optional - present only if FHCRC bit is set
- // compressed data varies
- // CRC32 4 bytes
- // isize 4 bytes data size modulo 2^32
- //
- // FLG (FLaGs)
- // bit 0 FTEXT - indicates file is ASCII text (can be safely ignored)
- // bit 1 FHCRC - there is a CRC16 for the header immediately following the header
- // bit 2 FEXTRA - extra fields are present
- // bit 3 FNAME - the zero-terminated filename is present. encoding; ISO-8859-1.
- // bit 4 FCOMMENT - a zero-terminated file comment is present. encoding: ISO-8859-1
- // bit 5 reserved
- // bit 6 reserved
- // bit 7 reserved
- //
- // On consumption:
- // Extra field is a bunch of nonsense and can be safely ignored.
- // Header CRC and OS, likewise.
- //
- // on generation:
- // all optional fields get 0, except for the OS, which gets 255.
- //
-
- ///
- /// The comment on the GZIP stream.
- ///
- ///
- ///
- ///
- /// The GZIP format allows for each file to optionally have an associated
- /// comment stored with the file. The comment is encoded with the ISO-8859-1
- /// code page. To include a comment in a GZIP stream you create, set this
- /// property before calling Write() for the first time on the
- /// GZipStream.
- ///
- ///
- ///
- /// When using GZipStream to decompress, you can retrieve this property
- /// after the first call to Read(). If no comment has been set in the
- /// GZIP bytestream, the Comment property will return null
- /// (Nothing in VB).
- ///
- ///
- public String Comment
- {
- get
- {
- return _Comment;
- }
- set
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("GZipStream");
- }
- _Comment = value;
- }
- }
-
- ///
- /// The FileName for the GZIP stream.
- ///
- ///
- ///
- ///
- ///
- /// The GZIP format optionally allows each file to have an associated
- /// filename. When compressing data (through Write()), set this
- /// FileName before calling Write() the first time on the GZipStream.
- /// The actual filename is encoded into the GZIP bytestream with the
- /// ISO-8859-1 code page, according to RFC 1952. It is the application's
- /// responsibility to insure that the FileName can be encoded and decoded
- /// correctly with this code page.
- ///
- ///
- ///
- /// When decompressing (through Read()), you can retrieve this value
- /// any time after the first Read(). In the case where there was no filename
- /// encoded into the GZIP bytestream, the property will return null (Nothing
- /// in VB).
- ///
- ///
- public String FileName
- {
- get { return _FileName; }
- set
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("GZipStream");
- }
- _FileName = value;
- if (_FileName == null)
- {
- return;
- }
- if (_FileName.IndexOf("/") != -1)
- {
- _FileName = _FileName.Replace("/", "\\");
- }
- if (_FileName.EndsWith("\\"))
- {
- throw new Exception("Illegal filename");
- }
- if (_FileName.IndexOf("\\") != -1)
- {
- // trim any leading path
- _FileName = Path.GetFileName(_FileName);
- }
- }
- }
-
- ///
- /// The last modified time for the GZIP stream.
- ///
- ///
- ///
- /// GZIP allows the storage of a last modified time with each GZIP entry.
- /// When compressing data, you can set this before the first call to
- /// Write(). When decompressing, you can retrieve this value any time
- /// after the first call to Read().
- ///
- public DateTime? LastModified;
-
- ///
- /// The CRC on the GZIP stream.
- ///
- ///
- /// This is used for internal error checking. You probably don't need to look at this property.
- ///
- public int Crc32 { get { return _Crc32; } }
-
- private int _headerByteCount;
- internal ZlibBaseStream _baseStream;
- bool _disposed;
- bool _firstReadDone;
- string _FileName;
- string _Comment;
- int _Crc32;
-
- ///
- /// Create a GZipStream using the specified CompressionMode.
- ///
- ///
- ///
- ///
- /// When mode is CompressionMode.Compress, the GZipStream will use the
- /// default compression level.
- ///
- ///
- ///
- /// As noted in the class documentation, the CompressionMode (Compress
- /// or Decompress) also establishes the "direction" of the stream. A
- /// GZipStream with CompressionMode.Compress works only through
- /// Write(). A GZipStream with
- /// CompressionMode.Decompress works only through Read().
- ///
- ///
- ///
- ///
- ///
- /// This example shows how to use a GZipStream to compress data.
- ///
- /// using (System.IO.Stream input = System.IO.File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (var raw = System.IO.File.Create(outputFile))
- /// {
- /// using (Stream compressor = new GZipStream(raw, CompressionMode.Compress))
- /// {
- /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
- /// int n;
- /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
- /// {
- /// compressor.Write(buffer, 0, n);
- /// }
- /// }
- /// }
- /// }
- ///
- ///
- /// Dim outputFile As String = (fileToCompress & ".compressed")
- /// Using input As Stream = File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
- /// Using raw As FileStream = File.Create(outputFile)
- /// Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress)
- /// Dim buffer As Byte() = New Byte(4096) {}
- /// Dim n As Integer = -1
- /// Do While (n <> 0)
- /// If (n > 0) Then
- /// compressor.Write(buffer, 0, n)
- /// End If
- /// n = input.Read(buffer, 0, buffer.Length)
- /// Loop
- /// End Using
- /// End Using
- /// End Using
- ///
- ///
- ///
- ///
- /// This example shows how to use a GZipStream to uncompress a file.
- ///
- /// private void GunZipFile(string filename)
- /// {
- /// if (!filename.EndsWith(".gz))
- /// throw new ArgumentException("filename");
- /// var DecompressedFile = filename.Substring(0,filename.Length-3);
- /// byte[] working = new byte[WORKING_BUFFER_SIZE];
- /// int n= 1;
- /// using (System.IO.Stream input = System.IO.File.Open(filename, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (Stream decompressor= new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, true))
- /// {
- /// using (var output = System.IO.File.Create(DecompressedFile))
- /// {
- /// while (n !=0)
- /// {
- /// n= decompressor.Read(working, 0, working.Length);
- /// if (n > 0)
- /// {
- /// output.Write(working, 0, n);
- /// }
- /// }
- /// }
- /// }
- /// }
- /// }
- ///
- ///
- ///
- /// Private Sub GunZipFile(ByVal filename as String)
- /// If Not (filename.EndsWith(".gz)) Then
- /// Throw New ArgumentException("filename")
- /// End If
- /// Dim DecompressedFile as String = filename.Substring(0,filename.Length-3)
- /// Dim working(WORKING_BUFFER_SIZE) as Byte
- /// Dim n As Integer = 1
- /// Using input As Stream = File.Open(filename, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
- /// Using decompressor As Stream = new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, True)
- /// Using output As Stream = File.Create(UncompressedFile)
- /// Do
- /// n= decompressor.Read(working, 0, working.Length)
- /// If n > 0 Then
- /// output.Write(working, 0, n)
- /// End IF
- /// Loop While (n > 0)
- /// End Using
- /// End Using
- /// End Using
- /// End Sub
- ///
- ///
- ///
- /// The stream which will be read or written.
- /// Indicates whether the GZipStream will compress or decompress.
- public GZipStream(Stream stream, CompressionMode mode)
- : this(stream, mode, CompressionLevel.Default, false)
- {
- }
-
- ///
- /// Create a GZipStream using the specified CompressionMode and
- /// the specified CompressionLevel.
- ///
- ///
- ///
- ///
- /// The CompressionMode (Compress or Decompress) also establishes the
- /// "direction" of the stream. A GZipStream with
- /// CompressionMode.Compress works only through Write(). A
- /// GZipStream with CompressionMode.Decompress works only
- /// through Read().
- ///
- ///
- ///
- ///
- ///
- ///
- /// This example shows how to use a GZipStream to compress a file into a .gz file.
- ///
- ///
- /// using (System.IO.Stream input = System.IO.File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (var raw = System.IO.File.Create(fileToCompress + ".gz"))
- /// {
- /// using (Stream compressor = new GZipStream(raw,
- /// CompressionMode.Compress,
- /// CompressionLevel.BestCompression))
- /// {
- /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
- /// int n;
- /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
- /// {
- /// compressor.Write(buffer, 0, n);
- /// }
- /// }
- /// }
- /// }
- ///
- ///
- ///
- /// Using input As Stream = File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
- /// Using raw As FileStream = File.Create(fileToCompress & ".gz")
- /// Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression)
- /// Dim buffer As Byte() = New Byte(4096) {}
- /// Dim n As Integer = -1
- /// Do While (n <> 0)
- /// If (n > 0) Then
- /// compressor.Write(buffer, 0, n)
- /// End If
- /// n = input.Read(buffer, 0, buffer.Length)
- /// Loop
- /// End Using
- /// End Using
- /// End Using
- ///
- ///
- /// The stream to be read or written while deflating or inflating.
- /// Indicates whether the GZipStream will compress or decompress.
- /// A tuning knob to trade speed for effectiveness.
- public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level)
- : this(stream, mode, level, false)
- {
- }
-
- ///
- /// Create a GZipStream using the specified CompressionMode, and
- /// explicitly specify whether the stream should be left open after Deflation
- /// or Inflation.
- ///
- ///
- ///
- ///
- /// This constructor allows the application to request that the captive stream
- /// remain open after the deflation or inflation occurs. By default, after
- /// Close() is called on the stream, the captive stream is also
- /// closed. In some cases this is not desired, for example if the stream is a
- /// memory stream that will be re-read after compressed data has been written
- /// to it. Specify true for the parameter to leave
- /// the stream open.
- ///
- ///
- ///
- /// The (Compress or Decompress) also
- /// establishes the "direction" of the stream. A GZipStream with
- /// CompressionMode.Compress works only through Write(). A GZipStream
- /// with CompressionMode.Decompress works only through Read().
- ///
- ///
- ///
- /// The GZipStream will use the default compression level. If you want
- /// to specify the compression level, see .
- ///
- ///
- ///
- /// See the other overloads of this constructor for example code.
- ///
- ///
- ///
- ///
- ///
- /// The stream which will be read or written. This is called the "captive"
- /// stream in other places in this documentation.
- ///
- ///
- /// Indicates whether the GZipStream will compress or decompress.
- ///
- ///
- ///
- /// true if the application would like the base stream to remain open after
- /// inflation/deflation.
- ///
- public GZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
- : this(stream, mode, CompressionLevel.Default, leaveOpen)
- {
- }
-
- ///
- /// Create a GZipStream using the specified CompressionMode and the
- /// specified CompressionLevel, and explicitly specify whether the
- /// stream should be left open after Deflation or Inflation.
- ///
- ///
- ///
- ///
- ///
- /// This constructor allows the application to request that the captive stream
- /// remain open after the deflation or inflation occurs. By default, after
- /// Close() is called on the stream, the captive stream is also
- /// closed. In some cases this is not desired, for example if the stream is a
- /// memory stream that will be re-read after compressed data has been written
- /// to it. Specify true for the parameter to
- /// leave the stream open.
- ///
- ///
- ///
- /// As noted in the class documentation, the CompressionMode (Compress
- /// or Decompress) also establishes the "direction" of the stream. A
- /// GZipStream with CompressionMode.Compress works only through
- /// Write(). A GZipStream with CompressionMode.Decompress works only
- /// through Read().
- ///
- ///
- ///
- ///
- ///
- /// This example shows how to use a GZipStream to compress data.
- ///
- /// using (System.IO.Stream input = System.IO.File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (var raw = System.IO.File.Create(outputFile))
- /// {
- /// using (Stream compressor = new GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression, true))
- /// {
- /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
- /// int n;
- /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
- /// {
- /// compressor.Write(buffer, 0, n);
- /// }
- /// }
- /// }
- /// }
- ///
- ///
- /// Dim outputFile As String = (fileToCompress & ".compressed")
- /// Using input As Stream = File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
- /// Using raw As FileStream = File.Create(outputFile)
- /// Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression, True)
- /// Dim buffer As Byte() = New Byte(4096) {}
- /// Dim n As Integer = -1
- /// Do While (n <> 0)
- /// If (n > 0) Then
- /// compressor.Write(buffer, 0, n)
- /// End If
- /// n = input.Read(buffer, 0, buffer.Length)
- /// Loop
- /// End Using
- /// End Using
- /// End Using
- ///
- ///
- /// The stream which will be read or written.
- /// Indicates whether the GZipStream will compress or decompress.
- /// true if the application would like the stream to remain open after inflation/deflation.
- /// A tuning knob to trade speed for effectiveness.
- public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
- {
- _baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen);
- }
-
- #region Zlib properties
-
- ///
- /// This property sets the flush behavior on the stream.
- ///
- virtual public FlushType FlushMode
- {
- get { return (this._baseStream._flushMode); }
- set {
- if (_disposed)
- {
- throw new ObjectDisposedException("GZipStream");
- }
- this._baseStream._flushMode = value;
- }
- }
-
- ///
- /// The size of the working buffer for the compression codec.
- ///
- ///
- ///
- ///
- /// The working buffer is used for all stream operations. The default size is
- /// 1024 bytes. The minimum size is 128 bytes. You may get better performance
- /// with a larger buffer. Then again, you might not. You would have to test
- /// it.
- ///
- ///
- ///
- /// Set this before the first call to Read() or Write() on the
- /// stream. If you try to set it afterwards, it will throw.
- ///
- ///
- public int BufferSize
- {
- get
- {
- return this._baseStream._bufferSize;
- }
- set
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("GZipStream");
- }
- if (this._baseStream._workingBuffer != null)
- {
- throw new ZlibException("The working buffer is already set.");
- }
- if (value < ZlibConstants.WorkingBufferSizeMin)
- {
- throw new ZlibException(String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, ZlibConstants.WorkingBufferSizeMin));
- }
- this._baseStream._bufferSize = value;
- }
- }
-
- /// Returns the total number of bytes input so far.
- virtual public long TotalIn
- {
- get
- {
- return this._baseStream._z.TotalBytesIn;
- }
- }
-
- /// Returns the total number of bytes output so far.
- virtual public long TotalOut
- {
- get
- {
- return this._baseStream._z.TotalBytesOut;
- }
- }
-
- #endregion
-
- #region Stream methods
-
- ///
- /// Dispose the stream.
- ///
- ///
- ///
- /// This may or may not result in a Close() call on the captive
- /// stream. See the constructors that have a leaveOpen parameter
- /// for more information.
- ///
- ///
- /// This method may be invoked in two distinct scenarios. If disposing
- /// == true, the method has been called directly or indirectly by a
- /// user's code, for example via the public Dispose() method. In this
- /// case, both managed and unmanaged resources can be referenced and
- /// disposed. If disposing == false, the method has been called by the
- /// runtime from inside the object finalizer and this method should not
- /// reference other objects; in that case only unmanaged resources must
- /// be referenced or disposed.
- ///
- ///
- ///
- /// indicates whether the Dispose method was invoked by user code.
- ///
- protected override void Dispose(bool disposing)
- {
- try
- {
- if (!_disposed)
- {
- if (disposing && (this._baseStream != null))
- {
- this._baseStream.Close();
- this._Crc32 = _baseStream.Crc32;
- }
- _disposed = true;
- }
- }
- finally
- {
- base.Dispose(disposing);
- }
- }
-
- ///
- /// Indicates whether the stream can be read.
- ///
- ///
- /// The return value depends on whether the captive stream supports reading.
- ///
- public override bool CanRead
- {
- get
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("GZipStream");
- }
- return _baseStream._stream.CanRead;
- }
- }
-
- ///
- /// Indicates whether the stream supports Seek operations.
- ///
- ///
- /// Always returns false.
- ///
- public override bool CanSeek
- {
- get { return false; }
- }
-
- ///
- /// Indicates whether the stream can be written.
- ///
- ///
- /// The return value depends on whether the captive stream supports writing.
- ///
- public override bool CanWrite
- {
- get
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("GZipStream");
- }
- return _baseStream._stream.CanWrite;
- }
- }
-
- ///
- /// Flush the stream.
- ///
- public override void Flush()
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("GZipStream");
- }
- _baseStream.Flush();
- }
-
- ///
- /// Reading this property always throws a .
- ///
- public override long Length
- {
- get { throw new NotImplementedException(); }
- }
-
- ///
- /// The position of the stream pointer.
- ///
- ///
- ///
- /// Setting this property always throws a . Reading will return the total bytes
- /// written out, if used in writing, or the total bytes read in, if used in
- /// reading. The count may refer to compressed bytes or uncompressed bytes,
- /// depending on how you've used the stream.
- ///
- public override long Position
- {
- get
- {
- if (this._baseStream._streamMode == ZlibBaseStream.StreamMode.Writer)
- {
- return this._baseStream._z.TotalBytesOut + _headerByteCount;
- }
- if (this._baseStream._streamMode == ZlibBaseStream.StreamMode.Reader)
- {
- return this._baseStream._z.TotalBytesIn + this._baseStream._gzipHeaderByteCount;
- }
- return 0;
- }
-
- set { throw new NotImplementedException(); }
- }
-
- ///
- /// Read and decompress data from the source stream.
- ///
- ///
- ///
- /// With a GZipStream, decompression is done through reading.
- ///
- ///
- ///
- ///
- /// byte[] working = new byte[WORKING_BUFFER_SIZE];
- /// using (System.IO.Stream input = System.IO.File.Open(_CompressedFile, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (Stream decompressor= new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, true))
- /// {
- /// using (var output = System.IO.File.Create(_DecompressedFile))
- /// {
- /// int n;
- /// while ((n= decompressor.Read(working, 0, working.Length)) !=0)
- /// {
- /// output.Write(working, 0, n);
- /// }
- /// }
- /// }
- /// }
- ///
- ///
- /// The buffer into which the decompressed data should be placed.
- /// the offset within that data array to put the first byte read.
- /// the number of bytes to read.
- /// the number of bytes actually read
- public override int Read(byte[] buffer, int offset, int count)
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("GZipStream");
- }
- int n = _baseStream.Read(buffer, offset, count);
-
- // Console.WriteLine("GZipStream::Read(buffer, off({0}), c({1}) = {2}", offset, count, n);
- // Console.WriteLine( Util.FormatByteArray(buffer, offset, n) );
-
- if (!_firstReadDone)
- {
- _firstReadDone = true;
- FileName = _baseStream._GzipFileName;
- Comment = _baseStream._GzipComment;
- }
- return n;
- }
-
- ///
- /// Calling this method always throws a .
- ///
- /// irrelevant; it will always throw!
- /// irrelevant; it will always throw!
- /// irrelevant!
- public override long Seek(long offset, SeekOrigin origin)
- {
- throw new NotImplementedException();
- }
-
- ///
- /// Calling this method always throws a .
- ///
- /// irrelevant; this method will always throw!
- public override void SetLength(long value)
- {
- throw new NotImplementedException();
- }
-
- ///
- /// Write data to the stream.
- ///
- ///
- ///
- ///
- /// If you wish to use the GZipStream to compress data while writing,
- /// you can create a GZipStream with CompressionMode.Compress, and a
- /// writable output stream. Then call Write() on that GZipStream,
- /// providing uncompressed data as input. The data sent to the output stream
- /// will be the compressed form of the data written.
- ///
- ///
- ///
- /// A GZipStream can be used for Read() or Write(), but not
- /// both. Writing implies compression. Reading implies decompression.
- ///
- ///
- ///
- /// The buffer holding data to write to the stream.
- /// the offset within that data array to find the first byte to write.
- /// the number of bytes to write.
- public override void Write(byte[] buffer, int offset, int count)
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("GZipStream");
- }
- if (_baseStream._streamMode == ZlibBaseStream.StreamMode.Undefined)
- {
- //Console.WriteLine("GZipStream: First write");
- if (_baseStream._wantCompress)
- {
- // first write in compression, therefore, emit the GZIP header
- _headerByteCount = EmitHeader();
- }
- else
- {
- throw new InvalidOperationException();
- }
- }
-
- _baseStream.Write(buffer, offset, count);
- }
-
- #endregion
-
- internal static readonly System.DateTime _unixEpoch = new System.DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
-#if SILVERLIGHT || NETCF
- internal static readonly System.Text.Encoding iso8859dash1 = new Ionic.Encoding.Iso8859Dash1Encoding();
-#else
- internal static readonly System.Text.Encoding iso8859dash1 = System.Text.Encoding.GetEncoding("iso-8859-1");
-#endif
-
- private int EmitHeader()
- {
- byte[] commentBytes = (Comment == null) ? null : iso8859dash1.GetBytes(Comment);
- byte[] filenameBytes = (FileName == null) ? null : iso8859dash1.GetBytes(FileName);
-
- int cbLength = (Comment == null) ? 0 : commentBytes.Length + 1;
- int fnLength = (FileName == null) ? 0 : filenameBytes.Length + 1;
-
- int bufferLength = 10 + cbLength + fnLength;
- byte[] header = new byte[bufferLength];
- int i = 0;
- // ID
- header[i++] = 0x1F;
- header[i++] = 0x8B;
-
- // compression method
- header[i++] = 8;
- byte flag = 0;
- if (Comment != null)
- {
- flag ^= 0x10;
- }
- if (FileName != null)
- {
- flag ^= 0x8;
- }
-
- // flag
- header[i++] = flag;
-
- // mtime
- if (!LastModified.HasValue)
- {
- LastModified = DateTime.Now;
- }
- System.TimeSpan delta = LastModified.Value - _unixEpoch;
- Int32 timet = (Int32)delta.TotalSeconds;
- Array.Copy(BitConverter.GetBytes(timet), 0, header, i, 4);
- i += 4;
-
- // xflg
- header[i++] = 0; // this field is totally useless
- // OS
- header[i++] = 0xFF; // 0xFF == unspecified
-
- // extra field length - only if FEXTRA is set, which it is not.
- //header[i++]= 0;
- //header[i++]= 0;
-
- // filename
- if (fnLength != 0)
- {
- Array.Copy(filenameBytes, 0, header, i, fnLength - 1);
- i += fnLength - 1;
- header[i++] = 0; // terminate
- }
-
- // comment
- if (cbLength != 0)
- {
- Array.Copy(commentBytes, 0, header, i, cbLength - 1);
- i += cbLength - 1;
- header[i++] = 0; // terminate
- }
-
- _baseStream._stream.Write(header, 0, header.Length);
-
- return header.Length; // bytes written
- }
-
- ///
- /// Compress a string into a byte array using GZip.
- ///
- ///
- ///
- /// Uncompress it with .
- ///
- ///
- ///
- ///
- ///
- ///
- /// A string to compress. The string will first be encoded
- /// using UTF8, then compressed.
- ///
- ///
- /// The string in compressed form
- public static byte[] CompressString(String s)
- {
- using (var ms = new MemoryStream())
- {
- System.IO.Stream compressor =
- new GZipStream(ms, CompressionMode.Compress, CompressionLevel.BestCompression);
- ZlibBaseStream.CompressString(s, compressor);
- return ms.ToArray();
- }
- }
-
- ///
- /// Compress a byte array into a new byte array using GZip.
- ///
- ///
- ///
- /// Uncompress it with .
- ///
- ///
- ///
- ///
- ///
- ///
- /// A buffer to compress.
- ///
- ///
- /// The data in compressed form
- public static byte[] CompressBuffer(byte[] b)
- {
- using (var ms = new MemoryStream())
- {
- System.IO.Stream compressor =
- new GZipStream(ms, CompressionMode.Compress, CompressionLevel.BestCompression);
-
- ZlibBaseStream.CompressBuffer(b, compressor);
- return ms.ToArray();
- }
- }
-
- ///
- /// Uncompress a GZip'ed byte array into a single string.
- ///
- ///
- ///
- ///
- ///
- ///
- /// A buffer containing GZIP-compressed data.
- ///
- ///
- /// The uncompressed string
- public static String UncompressString(byte[] compressed)
- {
- using (var input = new MemoryStream(compressed))
- {
- Stream decompressor = new GZipStream(input, CompressionMode.Decompress);
- return ZlibBaseStream.UncompressString(compressed, decompressor);
- }
- }
-
- ///
- /// Uncompress a GZip'ed byte array into a byte array.
- ///
- ///
- ///
- ///
- ///
- ///
- /// A buffer containing data that has been compressed with GZip.
- ///
- ///
- /// The data in uncompressed form
- public static byte[] UncompressBuffer(byte[] compressed)
- {
- using (var input = new System.IO.MemoryStream(compressed))
- {
- System.IO.Stream decompressor =
- new GZipStream( input, CompressionMode.Decompress );
-
- return ZlibBaseStream.UncompressBuffer(compressed, decompressor);
- }
- }
- }
-}
diff --git a/SabreTools.Library/External/Zlib/InfTree.cs b/SabreTools.Library/External/Zlib/InfTree.cs
deleted file mode 100644
index 1e05bec8..00000000
--- a/SabreTools.Library/External/Zlib/InfTree.cs
+++ /dev/null
@@ -1,441 +0,0 @@
-// Inftree.cs
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
-// All rights reserved.
-//
-// This code module is part of DotNetZip, a zipfile class library.
-//
-// ------------------------------------------------------------------
-//
-// This code is licensed under the Microsoft Public License.
-// See the file License.txt for the license details.
-// More info on: http://dotnetzip.codeplex.com
-//
-// ------------------------------------------------------------------
-//
-// last saved (in emacs):
-// Time-stamp: <2009-October-28 12:43:54>
-//
-// ------------------------------------------------------------------
-//
-// This module defines classes used in decompression. This code is derived
-// from the jzlib implementation of zlib. In keeping with the license for jzlib,
-// the copyright to that code is below.
-//
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in
-// the documentation and/or other materials provided with the distribution.
-//
-// 3. The names of the authors may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
-// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
-// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
-// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// -----------------------------------------------------------------------
-//
-// This program is based on zlib-1.1.3; credit to authors
-// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
-// and contributors of zlib.
-//
-// -----------------------------------------------------------------------
-
-using System;
-
-namespace Ionic.Zlib
-{
- sealed class InfTree
- {
- private const int MANY = 1440;
-
- private const int Z_OK = 0;
- private const int Z_STREAM_END = 1;
- private const int Z_NEED_DICT = 2;
- private const int Z_ERRNO = - 1;
- private const int Z_STREAM_ERROR = - 2;
- private const int Z_DATA_ERROR = - 3;
- private const int Z_MEM_ERROR = - 4;
- private const int Z_BUF_ERROR = - 5;
- private const int Z_VERSION_ERROR = - 6;
-
- internal const int fixed_bl = 9;
- internal const int fixed_bd = 5;
-
- //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_tl'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
- internal static readonly int[] fixed_tl = new int[]{96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 192, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 160, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 224, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 144, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 208, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 176, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 240, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 200, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 168, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 232, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 152, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 216, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 184, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 248, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 196, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 164, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 228, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 148, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 212, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 180, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 244, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 204, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 172, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 236, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 156, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 220, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 188, 0, 8, 14, 0, 8, 142, 0, 8, 78, 0, 9, 252, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 194, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 162, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 226, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 146, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 210, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 178, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 242, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 202, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 170, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 234, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 154, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 218, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 186,
- 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 250, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 198, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 166, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 230, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 150, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 214, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 182, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 246, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 206, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 174, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 238, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 158, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 222, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 190, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 254, 96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 193, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 161, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 225, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 145, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 209, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 177, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 241, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 201, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 169, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 233, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 153, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 217, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 185, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 249, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 197, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 165, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 229, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 149, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 213, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 181, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 245, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 205, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 173, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 237, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 157, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 221, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 189, 0, 8,
- 14, 0, 8, 142, 0, 8, 78, 0, 9, 253, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 195, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 163, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 227, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 147, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 211, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 179, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 243, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 203, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 171, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 235, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 155, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 219, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 187, 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 251, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 199, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 167, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 231, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 151, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 215, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 183, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 247, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 207, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 175, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 239, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 159, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 223, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 191, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 255};
- //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_td'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
- internal static readonly int[] fixed_td = new int[]{80, 5, 1, 87, 5, 257, 83, 5, 17, 91, 5, 4097, 81, 5, 5, 89, 5, 1025, 85, 5, 65, 93, 5, 16385, 80, 5, 3, 88, 5, 513, 84, 5, 33, 92, 5, 8193, 82, 5, 9, 90, 5, 2049, 86, 5, 129, 192, 5, 24577, 80, 5, 2, 87, 5, 385, 83, 5, 25, 91, 5, 6145, 81, 5, 7, 89, 5, 1537, 85, 5, 97, 93, 5, 24577, 80, 5, 4, 88, 5, 769, 84, 5, 49, 92, 5, 12289, 82, 5, 13, 90, 5, 3073, 86, 5, 193, 192, 5, 24577};
-
- // Tables for deflate from PKZIP's appnote.txt.
- //UPGRADE_NOTE: Final was removed from the declaration of 'cplens'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
- internal static readonly int[] cplens = new int[]{3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
-
- // see note #13 above about 258
- //UPGRADE_NOTE: Final was removed from the declaration of 'cplext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
- internal static readonly int[] cplext = new int[]{0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112};
-
- //UPGRADE_NOTE: Final was removed from the declaration of 'cpdist'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
- internal static readonly int[] cpdist = new int[]{1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577};
-
- //UPGRADE_NOTE: Final was removed from the declaration of 'cpdext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
- internal static readonly int[] cpdext = new int[]{0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
-
- // If BMAX needs to be larger than 16, then h and x[] should be uLong.
- internal const int BMAX = 15; // maximum bit length of any code
-
- internal int[] hn = null; // hufts used in space
- internal int[] v = null; // work area for huft_build
- internal int[] c = null; // bit length count table
- internal int[] r = null; // table entry for structure assignment
- internal int[] u = null; // table stack
- internal int[] x = null; // bit offsets, then code stack
-
- private int huft_build(int[] b, int bindex, int n, int s, int[] d, int[] e, int[] t, int[] m, int[] hp, int[] hn, int[] v)
- {
- // Given a list of code lengths and a maximum table size, make a set of
- // tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
- // if the given code set is incomplete (the tables are still built in this
- // case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of
- // lengths), or Z_MEM_ERROR if not enough memory.
-
- int a; // counter for codes of length k
- int f; // i repeats in table every f entries
- int g; // maximum code length
- int h; // table level
- int i; // counter, current code
- int j; // counter
- int k; // number of bits in current code
- int l; // bits per table (returned in m)
- int mask; // (1 << w) - 1, to avoid cc -O bug on HP
- int p; // pointer into c[], b[], or v[]
- int q; // points to current table
- int w; // bits before this table == (l * h)
- int xp; // pointer into x
- int y; // number of dummy codes added
- int z; // number of entries in current table
-
- // Generate counts for each bit length
-
- p = 0; i = n;
- do
- {
- c[b[bindex + p]]++; p++; i--; // assume all entries <= BMAX
- }
- while (i != 0);
-
- if (c[0] == n)
- {
- // null input--all zero length codes
- t[0] = - 1;
- m[0] = 0;
- return Z_OK;
- }
-
- // Find minimum and maximum length, bound *m by those
- l = m[0];
- for (j = 1; j <= BMAX; j++)
- {
- if (c[j] != 0)
- {
- break;
- }
- }
- k = j; // minimum code length
- if (l < j)
- {
- l = j;
- }
- for (i = BMAX; i != 0; i--)
- {
- if (c[i] != 0)
- {
- break;
- }
- }
- g = i; // maximum code length
- if (l > i)
- {
- l = i;
- }
- m[0] = l;
-
- // Adjust last length count to fill out codes, if needed
- for (y = 1 << j; j < i; j++, y <<= 1)
- {
- if ((y -= c[j]) < 0)
- {
- return Z_DATA_ERROR;
- }
- }
- if ((y -= c[i]) < 0)
- {
- return Z_DATA_ERROR;
- }
- c[i] += y;
-
- // Generate starting offsets into the value table for each length
- x[1] = j = 0;
- p = 1; xp = 2;
- while (--i != 0)
- {
- // note that i == g from above
- x[xp] = (j += c[p]);
- xp++;
- p++;
- }
-
- // Make a table of values in order of bit lengths
- i = 0; p = 0;
- do
- {
- if ((j = b[bindex + p]) != 0)
- {
- v[x[j]++] = i;
- }
- p++;
- }
- while (++i < n);
- n = x[g]; // set n to length of v
-
- // Generate the Huffman codes and for each, make the table entries
- x[0] = i = 0; // first Huffman code is zero
- p = 0; // grab values in bit order
- h = - 1; // no tables yet--level -1
- w = - l; // bits decoded == (l * h)
- u[0] = 0; // just to keep compilers happy
- q = 0; // ditto
- z = 0; // ditto
-
- // go through the bit lengths (k already is bits in shortest code)
- for (; k <= g; k++)
- {
- a = c[k];
- while (a-- != 0)
- {
- // here i is the Huffman code of length k bits for value *p
- // make tables up to required level
- while (k > w + l)
- {
- h++;
- w += l; // previous table always l bits
- // compute minimum size table less than or equal to l bits
- z = g - w;
- z = (z > l)?l:z; // table size upper limit
- if ((f = 1 << (j = k - w)) > a + 1)
- {
- // try a k-w bit table
- // too few codes for k-w bit table
- f -= (a + 1); // deduct codes from patterns left
- xp = k;
- if (j < z)
- {
- while (++j < z)
- {
- // try smaller tables up to z bits
- if ((f <<= 1) <= c[++xp])
- {
- break; // enough codes to use up j bits
- }
- f -= c[xp]; // else deduct codes from patterns
- }
- }
- }
- z = 1 << j; // table entries for j-bit table
-
- // allocate new table
- if (hn[0] + z > MANY)
- {
- // (note: doesn't matter for fixed)
- return Z_DATA_ERROR; // overflow of MANY
- }
- u[h] = q = hn[0]; // DEBUG
- hn[0] += z;
-
- // connect to last table, if there is one
- if (h != 0)
- {
- x[h] = i; // save pattern for backing up
- r[0] = (sbyte) j; // bits in this table
- r[1] = (sbyte) l; // bits to dump before this table
- j = SharedUtils.URShift(i, (w - l));
- r[2] = (int) (q - u[h - 1] - j); // offset to this table
- Array.Copy(r, 0, hp, (u[h - 1] + j) * 3, 3); // connect to last table
- }
- else
- {
- t[0] = q; // first table is returned result
- }
- }
-
- // set up table entry in r
- r[1] = (sbyte) (k - w);
- if (p >= n)
- {
- r[0] = 128 + 64; // out of values--invalid code
- }
- else if (v[p] < s)
- {
- r[0] = (sbyte) (v[p] < 256?0:32 + 64); // 256 is end-of-block
- r[2] = v[p++]; // simple code is just the value
- }
- else
- {
- r[0] = (sbyte) (e[v[p] - s] + 16 + 64); // non-simple--look up in lists
- r[2] = d[v[p++] - s];
- }
-
- // fill code-like entries with r
- f = 1 << (k - w);
- for (j = SharedUtils.URShift(i, w); j < z; j += f)
- {
- Array.Copy(r, 0, hp, (q + j) * 3, 3);
- }
-
- // backwards increment the k-bit code i
- for (j = 1 << (k - 1); (i & j) != 0; j = SharedUtils.URShift(j, 1))
- {
- i ^= j;
- }
- i ^= j;
-
- // backup over finished tables
- mask = (1 << w) - 1; // needed on HP, cc -O bug
- while ((i & mask) != x[h])
- {
- h--; // don't need to update q
- w -= l;
- mask = (1 << w) - 1;
- }
- }
- }
- // Return Z_BUF_ERROR if we were given an incomplete table
- return y != 0 && g != 1?Z_BUF_ERROR:Z_OK;
- }
-
- internal int inflate_trees_bits(int[] c, int[] bb, int[] tb, int[] hp, ZlibCodec z)
- {
- int result;
- initWorkArea(19);
- hn[0] = 0;
- result = huft_build(c, 0, 19, 19, null, null, tb, bb, hp, hn, v);
-
- if (result == Z_DATA_ERROR)
- {
- z.Message = "oversubscribed dynamic bit lengths tree";
- }
- else if (result == Z_BUF_ERROR || bb[0] == 0)
- {
- z.Message = "incomplete dynamic bit lengths tree";
- result = Z_DATA_ERROR;
- }
- return result;
- }
-
- internal int inflate_trees_dynamic(int nl, int nd, int[] c, int[] bl, int[] bd, int[] tl, int[] td, int[] hp, ZlibCodec z)
- {
- int result;
-
- // build literal/length tree
- initWorkArea(288);
- hn[0] = 0;
- result = huft_build(c, 0, nl, 257, cplens, cplext, tl, bl, hp, hn, v);
- if (result != Z_OK || bl[0] == 0)
- {
- if (result == Z_DATA_ERROR)
- {
- z.Message = "oversubscribed literal/length tree";
- }
- else if (result != Z_MEM_ERROR)
- {
- z.Message = "incomplete literal/length tree";
- result = Z_DATA_ERROR;
- }
- return result;
- }
-
- // build distance tree
- initWorkArea(288);
- result = huft_build(c, nl, nd, 0, cpdist, cpdext, td, bd, hp, hn, v);
-
- if (result != Z_OK || (bd[0] == 0 && nl > 257))
- {
- if (result == Z_DATA_ERROR)
- {
- z.Message = "oversubscribed distance tree";
- }
- else if (result == Z_BUF_ERROR)
- {
- z.Message = "incomplete distance tree";
- result = Z_DATA_ERROR;
- }
- else if (result != Z_MEM_ERROR)
- {
- z.Message = "empty distance tree with lengths";
- result = Z_DATA_ERROR;
- }
- return result;
- }
-
- return Z_OK;
- }
-
- internal static int inflate_trees_fixed(int[] bl, int[] bd, int[][] tl, int[][] td, ZlibCodec z)
- {
- bl[0] = fixed_bl;
- bd[0] = fixed_bd;
- tl[0] = fixed_tl;
- td[0] = fixed_td;
- return Z_OK;
- }
-
- private void initWorkArea(int vsize)
- {
- if (hn == null)
- {
- hn = new int[1];
- v = new int[vsize];
- c = new int[BMAX + 1];
- r = new int[3];
- u = new int[BMAX];
- x = new int[BMAX + 1];
- }
- else
- {
- if (v.Length < vsize)
- {
- v = new int[vsize];
- }
- Array.Clear(v,0,vsize);
- Array.Clear(c,0,BMAX+1);
- r[0]=0; r[1]=0; r[2]=0;
- // for(int i=0; i
-//
-// ------------------------------------------------------------------
-//
-// This module defines classes for decompression. This code is derived
-// from the jzlib implementation of zlib, but significantly modified.
-// The object model is not the same, and many of the behaviors are
-// different. Nonetheless, in keeping with the license for jzlib, I am
-// reproducing the copyright to that code here.
-//
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in
-// the documentation and/or other materials provided with the distribution.
-//
-// 3. The names of the authors may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
-// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
-// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
-// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// -----------------------------------------------------------------------
-//
-// This program is based on zlib-1.1.3; credit to authors
-// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
-// and contributors of zlib.
-//
-// -----------------------------------------------------------------------
-
-using System;
-
-namespace Ionic.Zlib
-{
- sealed class InflateBlocks
- {
- private const int MANY = 1440;
-
- // Table for deflate from PKZIP's appnote.txt.
- internal static readonly int[] border = new int[]
- { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
-
- private enum InflateBlockMode
- {
- TYPE = 0, // get type bits (3, including end bit)
- LENS = 1, // get lengths for stored
- STORED = 2, // processing stored block
- TABLE = 3, // get table lengths
- BTREE = 4, // get bit lengths tree for a dynamic block
- DTREE = 5, // get length, distance trees for a dynamic block
- CODES = 6, // processing fixed or dynamic block
- DRY = 7, // output remaining window bytes
- DONE = 8, // finished last block, done
- BAD = 9, // ot a data error--stuck here
- }
-
- private InflateBlockMode mode; // current inflate_block mode
-
- internal int left; // if STORED, bytes left to copy
-
- internal int table; // table lengths (14 bits)
- internal int index; // index into blens (or border)
- internal int[] blens; // bit lengths of codes
- internal int[] bb = new int[1]; // bit length tree depth
- internal int[] tb = new int[1]; // bit length decoding tree
-
- internal InflateCodes codes = new InflateCodes(); // if CODES, current state
-
- internal int last; // true if this block is the last block
-
- internal ZlibCodec _codec; // pointer back to this zlib stream
-
- // mode independent information
- internal int bitk; // bits in bit buffer
- internal int bitb; // bit buffer
- internal int[] hufts; // single malloc for tree space
- internal byte[] window; // sliding window
- internal int end; // one byte after sliding window
- internal int readAt; // window read pointer
- internal int writeAt; // window write pointer
- internal System.Object checkfn; // check function
- internal uint check; // check on output
-
- internal InfTree inftree = new InfTree();
-
- internal InflateBlocks(ZlibCodec codec, System.Object checkfn, int w)
- {
- _codec = codec;
- hufts = new int[MANY * 3];
- window = new byte[w];
- end = w;
- this.checkfn = checkfn;
- mode = InflateBlockMode.TYPE;
- Reset();
- }
-
- internal uint Reset()
- {
- uint oldCheck = check;
- mode = InflateBlockMode.TYPE;
- bitk = 0;
- bitb = 0;
- readAt = writeAt = 0;
-
- if (checkfn != null)
- {
- _codec._Adler32 = check = Adler.Adler32(0, null, 0, 0);
- }
- return oldCheck;
- }
-
- internal int Process(int r)
- {
- int t; // temporary storage
- int b; // bit buffer
- int k; // bits in bit buffer
- int p; // input data pointer
- int n; // bytes available there
- int q; // output window write pointer
- int m; // bytes to end of window or read pointer
-
- // copy input/output information to locals (UPDATE macro restores)
- p = _codec.NextIn;
- n = _codec.AvailableBytesIn;
- b = bitb;
- k = bitk;
-
- q = writeAt;
- m = (int)(q < readAt ? readAt - q - 1 : end - q);
-
- // process input based on current state
- while (true)
- {
- switch (mode)
- {
- case InflateBlockMode.TYPE:
-
- while (k < (3))
- {
- if (n != 0)
- {
- r = ZlibConstants.Z_OK;
- }
- else
- {
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
-
- n--;
- b |= (_codec.InputBuffer[p++] & 0xff) << k;
- k += 8;
- }
- t = (int)(b & 7);
- last = t & 1;
-
- switch ((uint)t >> 1)
- {
- case 0: // stored
- b >>= 3; k -= (3);
- t = k & 7; // go to byte boundary
- b >>= t; k -= t;
- mode = InflateBlockMode.LENS; // get length of stored block
- break;
-
- case 1: // fixed
- int[] bl = new int[1];
- int[] bd = new int[1];
- int[][] tl = new int[1][];
- int[][] td = new int[1][];
- InfTree.inflate_trees_fixed(bl, bd, tl, td, _codec);
- codes.Init(bl[0], bd[0], tl[0], 0, td[0], 0);
- b >>= 3; k -= 3;
- mode = InflateBlockMode.CODES;
- break;
-
- case 2: // dynamic
- b >>= 3; k -= 3;
- mode = InflateBlockMode.TABLE;
- break;
-
- case 3: // illegal
- b >>= 3; k -= 3;
- mode = InflateBlockMode.BAD;
- _codec.Message = "invalid block type";
- r = ZlibConstants.Z_DATA_ERROR;
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
- break;
-
- case InflateBlockMode.LENS:
-
- while (k < (32))
- {
- if (n != 0)
- {
- r = ZlibConstants.Z_OK;
- }
- else
- {
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
- ;
- n--;
- b |= (_codec.InputBuffer[p++] & 0xff) << k;
- k += 8;
- }
-
- if ( ( ((~b)>>16) & 0xffff) != (b & 0xffff))
- {
- mode = InflateBlockMode.BAD;
- _codec.Message = "invalid stored block lengths";
- r = ZlibConstants.Z_DATA_ERROR;
-
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
- left = (b & 0xffff);
- b = k = 0; // dump bits
- mode = left != 0 ? InflateBlockMode.STORED : (last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE);
- break;
-
- case InflateBlockMode.STORED:
- if (n == 0)
- {
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
-
- if (m == 0)
- {
- if (q == end && readAt != 0)
- {
- q = 0; m = (int)(q < readAt ? readAt - q - 1 : end - q);
- }
- if (m == 0)
- {
- writeAt = q;
- r = Flush(r);
- q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q);
- if (q == end && readAt != 0)
- {
- q = 0; m = (int)(q < readAt ? readAt - q - 1 : end - q);
- }
- if (m == 0)
- {
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
- }
- }
- r = ZlibConstants.Z_OK;
-
- t = left;
- if (t > n)
- {
- t = n;
- }
- if (t > m)
- {
- t = m;
- }
- Array.Copy(_codec.InputBuffer, p, window, q, t);
- p += t; n -= t;
- q += t; m -= t;
- if ((left -= t) != 0)
- {
- break;
- }
- mode = last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE;
- break;
-
- case InflateBlockMode.TABLE:
-
- while (k < (14))
- {
- if (n != 0)
- {
- r = ZlibConstants.Z_OK;
- }
- else
- {
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
-
- n--;
- b |= (_codec.InputBuffer[p++] & 0xff) << k;
- k += 8;
- }
-
- table = t = (b & 0x3fff);
- if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
- {
- mode = InflateBlockMode.BAD;
- _codec.Message = "too many length or distance symbols";
- r = ZlibConstants.Z_DATA_ERROR;
-
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
- t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
- if (blens == null || blens.Length < t)
- {
- blens = new int[t];
- }
- else
- {
- Array.Clear(blens, 0, t);
- // for (int i = 0; i < t; i++)
- // {
- // blens[i] = 0;
- // }
- }
-
- b >>= 14;
- k -= 14;
-
- index = 0;
- mode = InflateBlockMode.BTREE;
- goto case InflateBlockMode.BTREE;
-
- case InflateBlockMode.BTREE:
- while (index < 4 + (table >> 10))
- {
- while (k < (3))
- {
- if (n != 0)
- {
- r = ZlibConstants.Z_OK;
- }
- else
- {
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
-
- n--;
- b |= (_codec.InputBuffer[p++] & 0xff) << k;
- k += 8;
- }
-
- blens[border[index++]] = b & 7;
-
- b >>= 3; k -= 3;
- }
-
- while (index < 19)
- {
- blens[border[index++]] = 0;
- }
-
- bb[0] = 7;
- t = inftree.inflate_trees_bits(blens, bb, tb, hufts, _codec);
- if (t != ZlibConstants.Z_OK)
- {
- r = t;
- if (r == ZlibConstants.Z_DATA_ERROR)
- {
- blens = null;
- mode = InflateBlockMode.BAD;
- }
-
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
-
- index = 0;
- mode = InflateBlockMode.DTREE;
- goto case InflateBlockMode.DTREE;
-
- case InflateBlockMode.DTREE:
- while (true)
- {
- t = table;
- if (!(index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f)))
- {
- break;
- }
-
- int i, j, c;
-
- t = bb[0];
-
- while (k < t)
- {
- if (n != 0)
- {
- r = ZlibConstants.Z_OK;
- }
- else
- {
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
-
- n--;
- b |= (_codec.InputBuffer[p++] & 0xff) << k;
- k += 8;
- }
-
- t = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 1];
- c = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 2];
-
- if (c < 16)
- {
- b >>= t; k -= t;
- blens[index++] = c;
- }
- else
- {
- // c == 16..18
- i = c == 18 ? 7 : c - 14;
- j = c == 18 ? 11 : 3;
-
- while (k < (t + i))
- {
- if (n != 0)
- {
- r = ZlibConstants.Z_OK;
- }
- else
- {
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
-
- n--;
- b |= (_codec.InputBuffer[p++] & 0xff) << k;
- k += 8;
- }
-
- b >>= t; k -= t;
-
- j += (b & InternalInflateConstants.InflateMask[i]);
-
- b >>= i; k -= i;
-
- i = index;
- t = table;
- if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || (c == 16 && i < 1))
- {
- blens = null;
- mode = InflateBlockMode.BAD;
- _codec.Message = "invalid bit length repeat";
- r = ZlibConstants.Z_DATA_ERROR;
-
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
-
- c = (c == 16) ? blens[i-1] : 0;
- do
- {
- blens[i++] = c;
- }
- while (--j != 0);
- index = i;
- }
- }
-
- tb[0] = -1;
- {
- int[] bl = new int[] { 9 }; // must be <= 9 for lookahead assumptions
- int[] bd = new int[] { 6 }; // must be <= 9 for lookahead assumptions
- int[] tl = new int[1];
- int[] td = new int[1];
-
- t = table;
- t = inftree.inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), blens, bl, bd, tl, td, hufts, _codec);
-
- if (t != ZlibConstants.Z_OK)
- {
- if (t == ZlibConstants.Z_DATA_ERROR)
- {
- blens = null;
- mode = InflateBlockMode.BAD;
- }
- r = t;
-
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
- codes.Init(bl[0], bd[0], hufts, tl[0], hufts, td[0]);
- }
- mode = InflateBlockMode.CODES;
- goto case InflateBlockMode.CODES;
-
- case InflateBlockMode.CODES:
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
-
- r = codes.Process(this, r);
- if (r != ZlibConstants.Z_STREAM_END)
- {
- return Flush(r);
- }
-
- r = ZlibConstants.Z_OK;
- p = _codec.NextIn;
- n = _codec.AvailableBytesIn;
- b = bitb;
- k = bitk;
- q = writeAt;
- m = (int)(q < readAt ? readAt - q - 1 : end - q);
-
- if (last == 0)
- {
- mode = InflateBlockMode.TYPE;
- break;
- }
- mode = InflateBlockMode.DRY;
- goto case InflateBlockMode.DRY;
-
- case InflateBlockMode.DRY:
- writeAt = q;
- r = Flush(r);
- q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q);
- if (readAt != writeAt)
- {
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
- mode = InflateBlockMode.DONE;
- goto case InflateBlockMode.DONE;
-
- case InflateBlockMode.DONE:
- r = ZlibConstants.Z_STREAM_END;
- bitb = b;
- bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
-
- case InflateBlockMode.BAD:
- r = ZlibConstants.Z_DATA_ERROR;
-
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
-
- default:
- r = ZlibConstants.Z_STREAM_ERROR;
-
- bitb = b; bitk = k;
- _codec.AvailableBytesIn = n;
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- writeAt = q;
- return Flush(r);
- }
- }
- }
-
- internal void Free()
- {
- Reset();
- window = null;
- hufts = null;
- }
-
- internal void SetDictionary(byte[] d, int start, int n)
- {
- Array.Copy(d, start, window, 0, n);
- readAt = writeAt = n;
- }
-
- // Returns true if inflate is currently at the end of a block generated
- // by Z_SYNC_FLUSH or Z_FULL_FLUSH.
- internal int SyncPoint()
- {
- return mode == InflateBlockMode.LENS ? 1 : 0;
- }
-
- // copy as much as possible from the sliding window to the output area
- internal int Flush(int r)
- {
- int nBytes;
-
- for (int pass=0; pass < 2; pass++)
- {
- if (pass==0)
- {
- // compute number of bytes to copy as far as end of window
- nBytes = (int)((readAt <= writeAt ? writeAt : end) - readAt);
- }
- else
- {
- // compute bytes to copy
- nBytes = writeAt - readAt;
- }
-
- // workitem 8870
- if (nBytes == 0)
- {
- if (r == ZlibConstants.Z_BUF_ERROR)
- r = ZlibConstants.Z_OK;
- return r;
- }
-
- if (nBytes > _codec.AvailableBytesOut)
- {
- nBytes = _codec.AvailableBytesOut;
- }
-
- if (nBytes != 0 && r == ZlibConstants.Z_BUF_ERROR)
- {
- r = ZlibConstants.Z_OK;
- }
-
- // update counters
- _codec.AvailableBytesOut -= nBytes;
- _codec.TotalBytesOut += nBytes;
-
- // update check information
- if (checkfn != null)
- {
- _codec._Adler32 = check = Adler.Adler32(check, window, readAt, nBytes);
- }
-
- // copy as far as end of window
- Array.Copy(window, readAt, _codec.OutputBuffer, _codec.NextOut, nBytes);
- _codec.NextOut += nBytes;
- readAt += nBytes;
-
- // see if more to copy at beginning of window
- if (readAt == end && pass == 0)
- {
- // wrap pointers
- readAt = 0;
- if (writeAt == end)
- {
- writeAt = 0;
- }
- }
- else pass++;
- }
-
- // done
- return r;
- }
- }
-
- internal static class InternalInflateConstants
- {
- // And'ing with mask[n] masks the lower n bits
- internal static readonly int[] InflateMask = new int[] {
- 0x00000000, 0x00000001, 0x00000003, 0x00000007,
- 0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f,
- 0x000000ff, 0x000001ff, 0x000003ff, 0x000007ff,
- 0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff };
- }
-
- sealed class InflateCodes
- {
- // waiting for "i:"=input,
- // "o:"=output,
- // "x:"=nothing
- private const int START = 0; // x: set up for LEN
- private const int LEN = 1; // i: get length/literal/eob next
- private const int LENEXT = 2; // i: getting length extra (have base)
- private const int DIST = 3; // i: get distance next
- private const int DISTEXT = 4; // i: getting distance extra
- private const int COPY = 5; // o: copying bytes in window, waiting for space
- private const int LIT = 6; // o: got literal, waiting for output space
- private const int WASH = 7; // o: got eob, possibly still output waiting
- private const int END = 8; // x: got eob and all data flushed
- private const int BADCODE = 9; // x: got error
-
- internal int mode; // current inflate_codes mode
-
- // mode dependent information
- internal int len;
-
- internal int[] tree; // pointer into tree
- internal int tree_index = 0;
- internal int need; // bits needed
-
- internal int lit;
-
- // if EXT or COPY, where and how much
- internal int bitsToGet; // bits to get for extra
- internal int dist; // distance back to copy from
-
- internal byte lbits; // ltree bits decoded per branch
- internal byte dbits; // dtree bits decoder per branch
- internal int[] ltree; // literal/length/eob tree
- internal int ltree_index; // literal/length/eob tree
- internal int[] dtree; // distance tree
- internal int dtree_index; // distance tree
-
- internal InflateCodes()
- {
- }
-
- internal void Init(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index)
- {
- mode = START;
- lbits = (byte)bl;
- dbits = (byte)bd;
- ltree = tl;
- ltree_index = tl_index;
- dtree = td;
- dtree_index = td_index;
- tree = null;
- }
-
- internal int Process(InflateBlocks blocks, int r)
- {
- int j; // temporary storage
- int tindex; // temporary pointer
- int e; // extra bits or operation
- int b = 0; // bit buffer
- int k = 0; // bits in bit buffer
- int p = 0; // input data pointer
- int n; // bytes available there
- int q; // output window write pointer
- int m; // bytes to end of window or read pointer
- int f; // pointer to copy strings from
-
- ZlibCodec z = blocks._codec;
-
- // copy input/output information to locals (UPDATE macro restores)
- p = z.NextIn;
- n = z.AvailableBytesIn;
- b = blocks.bitb;
- k = blocks.bitk;
- q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
-
- // process input and output based on current state
- while (true)
- {
- switch (mode)
- {
- // waiting for "i:"=input, "o:"=output, "x:"=nothing
- case START: // x: set up for LEN
- if (m >= 258 && n >= 10)
- {
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n;
- z.TotalBytesIn += p - z.NextIn;
- z.NextIn = p;
- blocks.writeAt = q;
- r = InflateFast(lbits, dbits, ltree, ltree_index, dtree, dtree_index, blocks, z);
-
- p = z.NextIn;
- n = z.AvailableBytesIn;
- b = blocks.bitb;
- k = blocks.bitk;
- q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
-
- if (r != ZlibConstants.Z_OK)
- {
- mode = (r == ZlibConstants.Z_STREAM_END) ? WASH : BADCODE;
- break;
- }
- }
- need = lbits;
- tree = ltree;
- tree_index = ltree_index;
-
- mode = LEN;
- goto case LEN;
-
- case LEN: // i: get length/literal/eob next
- j = need;
-
- while (k < j)
- {
- if (n != 0)
- {
- r = ZlibConstants.Z_OK;
- }
- else
- {
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n;
- z.TotalBytesIn += p - z.NextIn;
- z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
- }
- n--;
- b |= (z.InputBuffer[p++] & 0xff) << k;
- k += 8;
- }
-
- tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3;
-
- b >>= (tree[tindex + 1]);
- k -= (tree[tindex + 1]);
-
- e = tree[tindex];
-
- if (e == 0)
- {
- // literal
- lit = tree[tindex + 2];
- mode = LIT;
- break;
- }
- if ((e & 16) != 0)
- {
- // length
- bitsToGet = e & 15;
- len = tree[tindex + 2];
- mode = LENEXT;
- break;
- }
- if ((e & 64) == 0)
- {
- // next table
- need = e;
- tree_index = tindex / 3 + tree[tindex + 2];
- break;
- }
- if ((e & 32) != 0)
- {
- // end of block
- mode = WASH;
- break;
- }
- mode = BADCODE; // invalid code
- z.Message = "invalid literal/length code";
- r = ZlibConstants.Z_DATA_ERROR;
-
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n;
- z.TotalBytesIn += p - z.NextIn;
- z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
-
- case LENEXT: // i: getting length extra (have base)
- j = bitsToGet;
-
- while (k < j)
- {
- if (n != 0)
- {
- r = ZlibConstants.Z_OK;
- }
- else
- {
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
- }
- n--; b |= (z.InputBuffer[p++] & 0xff) << k;
- k += 8;
- }
-
- len += (b & InternalInflateConstants.InflateMask[j]);
-
- b >>= j;
- k -= j;
-
- need = dbits;
- tree = dtree;
- tree_index = dtree_index;
- mode = DIST;
- goto case DIST;
-
- case DIST: // i: get distance next
- j = need;
-
- while (k < j)
- {
- if (n != 0)
- {
- r = ZlibConstants.Z_OK;
- }
- else
- {
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
- }
- n--; b |= (z.InputBuffer[p++] & 0xff) << k;
- k += 8;
- }
-
- tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3;
-
- b >>= tree[tindex + 1];
- k -= tree[tindex + 1];
-
- e = (tree[tindex]);
- if ((e & 0x10) != 0)
- {
- // distance
- bitsToGet = e & 15;
- dist = tree[tindex + 2];
- mode = DISTEXT;
- break;
- }
- if ((e & 64) == 0)
- {
- // next table
- need = e;
- tree_index = tindex / 3 + tree[tindex + 2];
- break;
- }
- mode = BADCODE; // invalid code
- z.Message = "invalid distance code";
- r = ZlibConstants.Z_DATA_ERROR;
-
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
-
- case DISTEXT: // i: getting distance extra
- j = bitsToGet;
-
- while (k < j)
- {
- if (n != 0)
- {
- r = ZlibConstants.Z_OK;
- }
- else
- {
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
- }
- n--; b |= (z.InputBuffer[p++] & 0xff) << k;
- k += 8;
- }
-
- dist += (b & InternalInflateConstants.InflateMask[j]);
-
- b >>= j;
- k -= j;
-
- mode = COPY;
- goto case COPY;
-
- case COPY: // o: copying bytes in window, waiting for space
- f = q - dist;
- while (f < 0)
- {
- // modulo window size-"while" instead
- f += blocks.end; // of "if" handles invalid distances
- }
- while (len != 0)
- {
- if (m == 0)
- {
- if (q == blocks.end && blocks.readAt != 0)
- {
- q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
- }
- if (m == 0)
- {
- blocks.writeAt = q; r = blocks.Flush(r);
- q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
-
- if (q == blocks.end && blocks.readAt != 0)
- {
- q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
- }
-
- if (m == 0)
- {
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n;
- z.TotalBytesIn += p - z.NextIn;
- z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
- }
- }
- }
-
- blocks.window[q++] = blocks.window[f++]; m--;
-
- if (f == blocks.end)
- {
- f = 0;
- }
- len--;
- }
- mode = START;
- break;
-
- case LIT: // o: got literal, waiting for output space
- if (m == 0)
- {
- if (q == blocks.end && blocks.readAt != 0)
- {
- q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
- }
- if (m == 0)
- {
- blocks.writeAt = q; r = blocks.Flush(r);
- q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
-
- if (q == blocks.end && blocks.readAt != 0)
- {
- q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
- }
- if (m == 0)
- {
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
- }
- }
- }
- r = ZlibConstants.Z_OK;
-
- blocks.window[q++] = (byte)lit; m--;
-
- mode = START;
- break;
-
- case WASH: // o: got eob, possibly more output
- if (k > 7)
- {
- // return unused byte, if any
- k -= 8;
- n++;
- p--; // can always return one
- }
-
- blocks.writeAt = q; r = blocks.Flush(r);
- q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
-
- if (blocks.readAt != blocks.writeAt)
- {
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
- }
- mode = END;
- goto case END;
-
- case END:
- r = ZlibConstants.Z_STREAM_END;
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
-
- case BADCODE: // x: got error
-
- r = ZlibConstants.Z_DATA_ERROR;
-
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
-
- default:
- r = ZlibConstants.Z_STREAM_ERROR;
-
- blocks.bitb = b; blocks.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- blocks.writeAt = q;
- return blocks.Flush(r);
- }
- }
- }
-
- // Called with number of bytes left to write in window at least 258
- // (the maximum string length) and number of input bytes available
- // at least ten. The ten bytes are six bytes for the longest length/
- // distance pair plus four bytes for overloading the bit buffer.
-
- internal int InflateFast(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index, InflateBlocks s, ZlibCodec z)
- {
- int t; // temporary pointer
- int[] tp; // temporary pointer
- int tp_index; // temporary pointer
- int e; // extra bits or operation
- int b; // bit buffer
- int k; // bits in bit buffer
- int p; // input data pointer
- int n; // bytes available there
- int q; // output window write pointer
- int m; // bytes to end of window or read pointer
- int ml; // mask for literal/length tree
- int md; // mask for distance tree
- int c; // bytes to copy
- int d; // distance back to copy from
- int r; // copy source pointer
-
- int tp_index_t_3; // (tp_index+t)*3
-
- // load input, output, bit values
- p = z.NextIn; n = z.AvailableBytesIn; b = s.bitb; k = s.bitk;
- q = s.writeAt; m = q < s.readAt ? s.readAt - q - 1 : s.end - q;
-
- // initialize masks
- ml = InternalInflateConstants.InflateMask[bl];
- md = InternalInflateConstants.InflateMask[bd];
-
- // do until not enough input or output space for fast loop
- do
- {
- // assume called with m >= 258 && n >= 10
- // get literal/length code
- while (k < (20))
- {
- // max bits for literal/length code
- n--;
- b |= (z.InputBuffer[p++] & 0xff) << k; k += 8;
- }
-
- t = b & ml;
- tp = tl;
- tp_index = tl_index;
- tp_index_t_3 = (tp_index + t) * 3;
- if ((e = tp[tp_index_t_3]) == 0)
- {
- b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
-
- s.window[q++] = (byte)tp[tp_index_t_3 + 2];
- m--;
- continue;
- }
- do
- {
-
- b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
-
- if ((e & 16) != 0)
- {
- e &= 15;
- c = tp[tp_index_t_3 + 2] + ((int)b & InternalInflateConstants.InflateMask[e]);
-
- b >>= e; k -= e;
-
- // decode distance base of block to copy
- while (k < 15)
- {
- // max bits for distance code
- n--;
- b |= (z.InputBuffer[p++] & 0xff) << k; k += 8;
- }
-
- t = b & md;
- tp = td;
- tp_index = td_index;
- tp_index_t_3 = (tp_index + t) * 3;
- e = tp[tp_index_t_3];
-
- do
- {
-
- b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
-
- if ((e & 16) != 0)
- {
- // get extra bits to add to distance base
- e &= 15;
- while (k < e)
- {
- // get extra bits (up to 13)
- n--;
- b |= (z.InputBuffer[p++] & 0xff) << k; k += 8;
- }
-
- d = tp[tp_index_t_3 + 2] + (b & InternalInflateConstants.InflateMask[e]);
-
- b >>= e; k -= e;
-
- // do the copy
- m -= c;
- if (q >= d)
- {
- // offset before dest
- // just copy
- r = q - d;
- if (q - r > 0 && 2 > (q - r))
- {
- s.window[q++] = s.window[r++]; // minimum count is three,
- s.window[q++] = s.window[r++]; // so unroll loop a little
- c -= 2;
- }
- else
- {
- Array.Copy(s.window, r, s.window, q, 2);
- q += 2; r += 2; c -= 2;
- }
- }
- else
- {
- // else offset after destination
- r = q - d;
- do
- {
- r += s.end; // force pointer in window
- }
- while (r < 0); // covers invalid distances
- e = s.end - r;
- if (c > e)
- {
- // if source crosses,
- c -= e; // wrapped copy
- if (q - r > 0 && e > (q - r))
- {
- do
- {
- s.window[q++] = s.window[r++];
- }
- while (--e != 0);
- }
- else
- {
- Array.Copy(s.window, r, s.window, q, e);
- q += e; r += e; e = 0;
- }
- r = 0; // copy rest from start of window
- }
- }
-
- // copy all or what's left
- if (q - r > 0 && c > (q - r))
- {
- do
- {
- s.window[q++] = s.window[r++];
- }
- while (--c != 0);
- }
- else
- {
- Array.Copy(s.window, r, s.window, q, c);
- q += c; r += c; c = 0;
- }
- break;
- }
- else if ((e & 64) == 0)
- {
- t += tp[tp_index_t_3 + 2];
- t += (b & InternalInflateConstants.InflateMask[e]);
- tp_index_t_3 = (tp_index + t) * 3;
- e = tp[tp_index_t_3];
- }
- else
- {
- z.Message = "invalid distance code";
-
- c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
-
- s.bitb = b; s.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- s.writeAt = q;
-
- return ZlibConstants.Z_DATA_ERROR;
- }
- }
- while (true);
- break;
- }
-
- if ((e & 64) == 0)
- {
- t += tp[tp_index_t_3 + 2];
- t += (b & InternalInflateConstants.InflateMask[e]);
- tp_index_t_3 = (tp_index + t) * 3;
- if ((e = tp[tp_index_t_3]) == 0)
- {
- b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
- s.window[q++] = (byte)tp[tp_index_t_3 + 2];
- m--;
- break;
- }
- }
- else if ((e & 32) != 0)
- {
- c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
-
- s.bitb = b; s.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- s.writeAt = q;
-
- return ZlibConstants.Z_STREAM_END;
- }
- else
- {
- z.Message = "invalid literal/length code";
-
- c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
-
- s.bitb = b; s.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- s.writeAt = q;
-
- return ZlibConstants.Z_DATA_ERROR;
- }
- }
- while (true);
- }
- while (m >= 258 && n >= 10);
-
- // not enough input or output--restore pointers and return
- c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
-
- s.bitb = b; s.bitk = k;
- z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
- s.writeAt = q;
-
- return ZlibConstants.Z_OK;
- }
- }
-
- internal sealed class InflateManager
- {
- // preset dictionary flag in zlib header
- private const int PRESET_DICT = 0x20;
-
- private const int Z_DEFLATED = 8;
-
- private enum InflateManagerMode
- {
- METHOD = 0, // waiting for method byte
- FLAG = 1, // waiting for flag byte
- DICT4 = 2, // four dictionary check bytes to go
- DICT3 = 3, // three dictionary check bytes to go
- DICT2 = 4, // two dictionary check bytes to go
- DICT1 = 5, // one dictionary check byte to go
- DICT0 = 6, // waiting for inflateSetDictionary
- BLOCKS = 7, // decompressing blocks
- CHECK4 = 8, // four check bytes to go
- CHECK3 = 9, // three check bytes to go
- CHECK2 = 10, // two check bytes to go
- CHECK1 = 11, // one check byte to go
- DONE = 12, // finished check, done
- BAD = 13, // got an error--stay here
- }
-
- private InflateManagerMode mode; // current inflate mode
- internal ZlibCodec _codec; // pointer back to this zlib stream
-
- // mode dependent information
- internal int method; // if FLAGS, method byte
-
- // if CHECK, check values to compare
- internal uint computedCheck; // computed check value
- internal uint expectedCheck; // stream check value
-
- // if BAD, inflateSync's marker bytes count
- internal int marker;
-
- // mode independent information
- //internal int nowrap; // flag for no wrapper
- private bool _handleRfc1950HeaderBytes = true;
- internal bool HandleRfc1950HeaderBytes
- {
- get { return _handleRfc1950HeaderBytes; }
- set { _handleRfc1950HeaderBytes = value; }
- }
- internal int wbits; // log2(window size) (8..15, defaults to 15)
-
- internal InflateBlocks blocks; // current inflate_blocks state
-
- public InflateManager() { }
-
- public InflateManager(bool expectRfc1950HeaderBytes)
- {
- _handleRfc1950HeaderBytes = expectRfc1950HeaderBytes;
- }
-
- internal int Reset()
- {
- _codec.TotalBytesIn = _codec.TotalBytesOut = 0;
- _codec.Message = null;
- mode = HandleRfc1950HeaderBytes ? InflateManagerMode.METHOD : InflateManagerMode.BLOCKS;
- blocks.Reset();
- return ZlibConstants.Z_OK;
- }
-
- internal int End()
- {
- if (blocks != null)
- {
- blocks.Free();
- }
- blocks = null;
- return ZlibConstants.Z_OK;
- }
-
- internal int Initialize(ZlibCodec codec, int w)
- {
- _codec = codec;
- _codec.Message = null;
- blocks = null;
-
- // handle undocumented nowrap option (no zlib header or check)
- //nowrap = 0;
- //if (w < 0)
- //{
- // w = - w;
- // nowrap = 1;
- //}
-
- // set window size
- if (w < 8 || w > 15)
- {
- End();
- throw new ZlibException("Bad window size.");
-
- //return ZlibConstants.Z_STREAM_ERROR;
- }
- wbits = w;
-
- blocks = new InflateBlocks(codec,
- HandleRfc1950HeaderBytes ? this : null,
- 1 << w);
-
- // reset state
- Reset();
- return ZlibConstants.Z_OK;
- }
-
- internal int Inflate(FlushType flush)
- {
- int b;
-
- if (_codec.InputBuffer == null)
- {
- throw new ZlibException("InputBuffer is null. ");
- }
-
-// int f = (flush == FlushType.Finish)
-// ? ZlibConstants.Z_BUF_ERROR
-// : ZlibConstants.Z_OK;
-
- // workitem 8870
- int f = ZlibConstants.Z_OK;
- int r = ZlibConstants.Z_BUF_ERROR;
-
- while (true)
- {
- switch (mode)
- {
- case InflateManagerMode.METHOD:
- if (_codec.AvailableBytesIn == 0)
- {
- return r;
- }
- r = f;
- _codec.AvailableBytesIn--;
- _codec.TotalBytesIn++;
- if (((method = _codec.InputBuffer[_codec.NextIn++]) & 0xf) != Z_DEFLATED)
- {
- mode = InflateManagerMode.BAD;
- _codec.Message = String.Format("unknown compression method (0x{0:X2})", method);
- marker = 5; // can't try inflateSync
- break;
- }
- if ((method >> 4) + 8 > wbits)
- {
- mode = InflateManagerMode.BAD;
- _codec.Message = String.Format("invalid window size ({0})", (method >> 4) + 8);
- marker = 5; // can't try inflateSync
- break;
- }
- mode = InflateManagerMode.FLAG;
- break;
-
- case InflateManagerMode.FLAG:
- if (_codec.AvailableBytesIn == 0)
- {
- return r;
- }
- r = f;
- _codec.AvailableBytesIn--;
- _codec.TotalBytesIn++;
- b = (_codec.InputBuffer[_codec.NextIn++]) & 0xff;
-
- if ((((method << 8) + b) % 31) != 0)
- {
- mode = InflateManagerMode.BAD;
- _codec.Message = "incorrect header check";
- marker = 5; // can't try inflateSync
- break;
- }
-
- mode = ((b & PRESET_DICT) == 0)
- ? InflateManagerMode.BLOCKS
- : InflateManagerMode.DICT4;
- break;
-
- case InflateManagerMode.DICT4:
- if (_codec.AvailableBytesIn == 0)
- {
- return r;
- }
- r = f;
- _codec.AvailableBytesIn--;
- _codec.TotalBytesIn++;
- expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000);
- mode = InflateManagerMode.DICT3;
- break;
-
- case InflateManagerMode.DICT3:
- if (_codec.AvailableBytesIn == 0)
- {
- return r;
- }
- r = f;
- _codec.AvailableBytesIn--;
- _codec.TotalBytesIn++;
- expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000);
- mode = InflateManagerMode.DICT2;
- break;
-
- case InflateManagerMode.DICT2:
-
- if (_codec.AvailableBytesIn == 0)
- {
- return r;
- }
- r = f;
- _codec.AvailableBytesIn--;
- _codec.TotalBytesIn++;
- expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00);
- mode = InflateManagerMode.DICT1;
- break;
-
-
- case InflateManagerMode.DICT1:
- if (_codec.AvailableBytesIn == 0)
- {
- return r;
- }
- r = f;
- _codec.AvailableBytesIn--; _codec.TotalBytesIn++;
- expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff);
- _codec._Adler32 = expectedCheck;
- mode = InflateManagerMode.DICT0;
- return ZlibConstants.Z_NEED_DICT;
-
- case InflateManagerMode.DICT0:
- mode = InflateManagerMode.BAD;
- _codec.Message = "need dictionary";
- marker = 0; // can try inflateSync
- return ZlibConstants.Z_STREAM_ERROR;
-
- case InflateManagerMode.BLOCKS:
- r = blocks.Process(r);
- if (r == ZlibConstants.Z_DATA_ERROR)
- {
- mode = InflateManagerMode.BAD;
- marker = 0; // can try inflateSync
- break;
- }
-
- if (r == ZlibConstants.Z_OK)
- {
- r = f;
- }
-
- if (r != ZlibConstants.Z_STREAM_END)
- {
- return r;
- }
-
- r = f;
- computedCheck = blocks.Reset();
- if (!HandleRfc1950HeaderBytes)
- {
- mode = InflateManagerMode.DONE;
- return ZlibConstants.Z_STREAM_END;
- }
- mode = InflateManagerMode.CHECK4;
- break;
-
- case InflateManagerMode.CHECK4:
- if (_codec.AvailableBytesIn == 0)
- {
- return r;
- }
- r = f;
- _codec.AvailableBytesIn--;
- _codec.TotalBytesIn++;
- expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000);
- mode = InflateManagerMode.CHECK3;
- break;
-
- case InflateManagerMode.CHECK3:
- if (_codec.AvailableBytesIn == 0)
- {
- return r;
- }
- r = f;
- _codec.AvailableBytesIn--; _codec.TotalBytesIn++;
- expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000);
- mode = InflateManagerMode.CHECK2;
- break;
-
- case InflateManagerMode.CHECK2:
- if (_codec.AvailableBytesIn == 0)
- {
- return r;
- }
- r = f;
- _codec.AvailableBytesIn--;
- _codec.TotalBytesIn++;
- expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00);
- mode = InflateManagerMode.CHECK1;
- break;
-
- case InflateManagerMode.CHECK1:
- if (_codec.AvailableBytesIn == 0)
- {
- return r;
- }
- r = f;
- _codec.AvailableBytesIn--; _codec.TotalBytesIn++;
- expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff);
- if (computedCheck != expectedCheck)
- {
- mode = InflateManagerMode.BAD;
- _codec.Message = "incorrect data check";
- marker = 5; // can't try inflateSync
- break;
- }
- mode = InflateManagerMode.DONE;
- return ZlibConstants.Z_STREAM_END;
-
- case InflateManagerMode.DONE:
- return ZlibConstants.Z_STREAM_END;
-
- case InflateManagerMode.BAD:
- throw new ZlibException(String.Format("Bad state ({0})", _codec.Message));
-
- default:
- throw new ZlibException("Stream error.");
- }
- }
- }
-
- internal int SetDictionary(byte[] dictionary)
- {
- int index = 0;
- int length = dictionary.Length;
- if (mode != InflateManagerMode.DICT0)
- {
- throw new ZlibException("Stream error.");
- }
-
- if (Adler.Adler32(1, dictionary, 0, dictionary.Length) != _codec._Adler32)
- {
- return ZlibConstants.Z_DATA_ERROR;
- }
-
- _codec._Adler32 = Adler.Adler32(0, null, 0, 0);
-
- if (length >= (1 << wbits))
- {
- length = (1 << wbits) - 1;
- index = dictionary.Length - length;
- }
- blocks.SetDictionary(dictionary, index, length);
- mode = InflateManagerMode.BLOCKS;
- return ZlibConstants.Z_OK;
- }
-
- private static readonly byte[] mark = new byte[] { 0, 0, 0xff, 0xff };
-
- internal int Sync()
- {
- int n; // number of bytes to look at
- int p; // pointer to bytes
- int m; // number of marker bytes found in a row
- long r, w; // temporaries to save total_in and total_out
-
- // set up
- if (mode != InflateManagerMode.BAD)
- {
- mode = InflateManagerMode.BAD;
- marker = 0;
- }
- if ((n = _codec.AvailableBytesIn) == 0)
- {
- return ZlibConstants.Z_BUF_ERROR;
- }
- p = _codec.NextIn;
- m = marker;
-
- // search
- while (n != 0 && m < 4)
- {
- if (_codec.InputBuffer[p] == mark[m])
- {
- m++;
- }
- else if (_codec.InputBuffer[p] != 0)
- {
- m = 0;
- }
- else
- {
- m = 4 - m;
- }
- p++; n--;
- }
-
- // restore
- _codec.TotalBytesIn += p - _codec.NextIn;
- _codec.NextIn = p;
- _codec.AvailableBytesIn = n;
- marker = m;
-
- // return no joy or set up to restart on a new block
- if (m != 4)
- {
- return ZlibConstants.Z_DATA_ERROR;
- }
- r = _codec.TotalBytesIn;
- w = _codec.TotalBytesOut;
- Reset();
- _codec.TotalBytesIn = r;
- _codec.TotalBytesOut = w;
- mode = InflateManagerMode.BLOCKS;
- return ZlibConstants.Z_OK;
- }
-
- // Returns true if inflate is currently at the end of a block generated
- // by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
- // implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH
- // but removes the length bytes of the resulting empty stored block. When
- // decompressing, PPP checks that at the end of input packet, inflate is
- // waiting for these length bytes.
- internal int SyncPoint(ZlibCodec z)
- {
- return blocks.SyncPoint();
- }
- }
-}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Zlib/ParallelDeflateOutputStream.cs b/SabreTools.Library/External/Zlib/ParallelDeflateOutputStream.cs
deleted file mode 100644
index 7f9e116a..00000000
--- a/SabreTools.Library/External/Zlib/ParallelDeflateOutputStream.cs
+++ /dev/null
@@ -1,1396 +0,0 @@
-//#define Trace
-
-// ParallelDeflateOutputStream.cs
-// ------------------------------------------------------------------
-//
-// A DeflateStream that does compression only, it uses a
-// divide-and-conquer approach with multiple threads to exploit multiple
-// CPUs for the DEFLATE computation.
-//
-// last saved: <2011-July-31 14:49:40>
-//
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2009-2011 by Dino Chiesa
-// All rights reserved!
-//
-// This code module is part of DotNetZip, a zipfile class library.
-//
-// ------------------------------------------------------------------
-//
-// This code is licensed under the Microsoft Public License.
-// See the file License.txt for the license details.
-// More info on: http://dotnetzip.codeplex.com
-//
-// ------------------------------------------------------------------
-
-using System;
-using System.Collections.Generic;
-using System.Threading;
-using System.IO;
-
-namespace Ionic.Zlib
-{
- internal class WorkItem
- {
- public byte[] buffer;
- public byte[] compressed;
- public int crc;
- public int index;
- public int ordinal;
- public int inputBytesAvailable;
- public int compressedBytesAvailable;
- public ZlibCodec compressor;
-
- public WorkItem(int size,
- CompressionLevel compressLevel,
- CompressionStrategy strategy,
- int ix)
- {
- this.buffer= new byte[size];
- // alloc 5 bytes overhead for every block (margin of safety= 2)
- int n = size + ((size / 32768)+1) * 5 * 2;
- this.compressed = new byte[n];
- this.compressor = new ZlibCodec();
- this.compressor.InitializeDeflate(compressLevel, false);
- this.compressor.OutputBuffer = this.compressed;
- this.compressor.InputBuffer = this.buffer;
- this.index = ix;
- }
- }
-
- ///
- /// A class for compressing streams using the
- /// Deflate algorithm with multiple threads.
- ///
- ///
- ///
- ///
- /// This class performs DEFLATE compression through writing. For
- /// more information on the Deflate algorithm, see IETF RFC 1951,
- /// "DEFLATE Compressed Data Format Specification version 1.3."
- ///
- ///
- ///
- /// This class is similar to , except
- /// that this class is for compression only, and this implementation uses an
- /// approach that employs multiple worker threads to perform the DEFLATE. On
- /// a multi-cpu or multi-core computer, the performance of this class can be
- /// significantly higher than the single-threaded DeflateStream, particularly
- /// for larger streams. How large? Anything over 10mb is a good candidate
- /// for parallel compression.
- ///
- ///
- ///
- /// The tradeoff is that this class uses more memory and more CPU than the
- /// vanilla DeflateStream, and also is less efficient as a compressor. For
- /// large files the size of the compressed data stream can be less than 1%
- /// larger than the size of a compressed data stream from the vanialla
- /// DeflateStream. For smaller files the difference can be larger. The
- /// difference will also be larger if you set the BufferSize to be lower than
- /// the default value. Your mileage may vary. Finally, for small files, the
- /// ParallelDeflateOutputStream can be much slower than the vanilla
- /// DeflateStream, because of the overhead associated to using the thread
- /// pool.
- ///
- ///
- ///
- ///
- public class ParallelDeflateOutputStream : System.IO.Stream
- {
-
- private static readonly int IO_BUFFER_SIZE_DEFAULT = 64 * 1024; // 128k
- private static readonly int BufferPairsPerCore = 4;
-
- private System.Collections.Generic.List _pool;
- private bool _leaveOpen;
- private bool emitting;
- private System.IO.Stream _outStream;
- private int _maxBufferPairs;
- private int _bufferSize = IO_BUFFER_SIZE_DEFAULT;
- private AutoResetEvent _newlyCompressedBlob;
- //private ManualResetEvent _writingDone;
- //private ManualResetEvent _sessionReset;
- private object _outputLock = new object();
- private bool _isClosed;
- private bool _firstWriteDone;
- private int _currentlyFilling;
- private int _lastFilled;
- private int _lastWritten;
- private int _latestCompressed;
- private int _Crc32;
- private CRC32 _runningCrc;
- private object _latestLock = new object();
- private System.Collections.Generic.Queue _toWrite;
- private System.Collections.Generic.Queue _toFill;
- private Int64 _totalBytesProcessed;
- private CompressionLevel _compressLevel;
- private volatile Exception _pendingException;
- private bool _handlingException;
- private object _eLock = new Object(); // protects _pendingException
-
- // This bitfield is used only when Trace is defined.
- //private TraceBits _DesiredTrace = TraceBits.Write | TraceBits.WriteBegin |
- //TraceBits.WriteDone | TraceBits.Lifecycle | TraceBits.Fill | TraceBits.Flush |
- //TraceBits.Session;
-
- //private TraceBits _DesiredTrace = TraceBits.WriteBegin | TraceBits.WriteDone | TraceBits.Synch | TraceBits.Lifecycle | TraceBits.Session ;
-
- private TraceBits _DesiredTrace =
- TraceBits.Session |
- TraceBits.Compress |
- TraceBits.WriteTake |
- TraceBits.WriteEnter |
- TraceBits.EmitEnter |
- TraceBits.EmitDone |
- TraceBits.EmitLock |
- TraceBits.EmitSkip |
- TraceBits.EmitBegin;
-
- ///
- /// Create a ParallelDeflateOutputStream.
- ///
- ///
- ///
- ///
- /// This stream compresses data written into it via the DEFLATE
- /// algorithm (see RFC 1951), and writes out the compressed byte stream.
- ///
- ///
- ///
- /// The instance will use the default compression level, the default
- /// buffer sizes and the default number of threads and buffers per
- /// thread.
- ///
- ///
- ///
- /// This class is similar to ,
- /// except that this implementation uses an approach that employs
- /// multiple worker threads to perform the DEFLATE. On a multi-cpu or
- /// multi-core computer, the performance of this class can be
- /// significantly higher than the single-threaded DeflateStream,
- /// particularly for larger streams. How large? Anything over 10mb is
- /// a good candidate for parallel compression.
- ///
- ///
- ///
- ///
- ///
- ///
- /// This example shows how to use a ParallelDeflateOutputStream to compress
- /// data. It reads a file, compresses it, and writes the compressed data to
- /// a second, output file.
- ///
- ///
- /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
- /// int n= -1;
- /// String outputFile = fileToCompress + ".compressed";
- /// using (System.IO.Stream input = System.IO.File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (var raw = System.IO.File.Create(outputFile))
- /// {
- /// using (Stream compressor = new ParallelDeflateOutputStream(raw))
- /// {
- /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
- /// {
- /// compressor.Write(buffer, 0, n);
- /// }
- /// }
- /// }
- /// }
- ///
- ///
- /// Dim buffer As Byte() = New Byte(4096) {}
- /// Dim n As Integer = -1
- /// Dim outputFile As String = (fileToCompress & ".compressed")
- /// Using input As Stream = File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
- /// Using raw As FileStream = File.Create(outputFile)
- /// Using compressor As Stream = New ParallelDeflateOutputStream(raw)
- /// Do While (n <> 0)
- /// If (n > 0) Then
- /// compressor.Write(buffer, 0, n)
- /// End If
- /// n = input.Read(buffer, 0, buffer.Length)
- /// Loop
- /// End Using
- /// End Using
- /// End Using
- ///
- ///
- /// The stream to which compressed data will be written.
- public ParallelDeflateOutputStream(System.IO.Stream stream)
- : this(stream, CompressionLevel.Default, CompressionStrategy.Default, false)
- {
- }
-
- ///
- /// Create a ParallelDeflateOutputStream using the specified CompressionLevel.
- ///
- ///
- /// See the
- /// constructor for example code.
- ///
- /// The stream to which compressed data will be written.
- /// A tuning knob to trade speed for effectiveness.
- public ParallelDeflateOutputStream(System.IO.Stream stream, CompressionLevel level)
- : this(stream, level, CompressionStrategy.Default, false)
- {
- }
-
- ///
- /// Create a ParallelDeflateOutputStream and specify whether to leave the captive stream open
- /// when the ParallelDeflateOutputStream is closed.
- ///
- ///
- /// See the
- /// constructor for example code.
- ///
- /// The stream to which compressed data will be written.
- ///
- /// true if the application would like the stream to remain open after inflation/deflation.
- ///
- public ParallelDeflateOutputStream(System.IO.Stream stream, bool leaveOpen)
- : this(stream, CompressionLevel.Default, CompressionStrategy.Default, leaveOpen)
- {
- }
-
- ///
- /// Create a ParallelDeflateOutputStream and specify whether to leave the captive stream open
- /// when the ParallelDeflateOutputStream is closed.
- ///
- ///
- /// See the
- /// constructor for example code.
- ///
- /// The stream to which compressed data will be written.
- /// A tuning knob to trade speed for effectiveness.
- ///
- /// true if the application would like the stream to remain open after inflation/deflation.
- ///
- public ParallelDeflateOutputStream(System.IO.Stream stream, CompressionLevel level, bool leaveOpen)
- : this(stream, CompressionLevel.Default, CompressionStrategy.Default, leaveOpen)
- {
- }
-
- ///
- /// Create a ParallelDeflateOutputStream using the specified
- /// CompressionLevel and CompressionStrategy, and specifying whether to
- /// leave the captive stream open when the ParallelDeflateOutputStream is
- /// closed.
- ///
- ///
- /// See the
- /// constructor for example code.
- ///
- /// The stream to which compressed data will be written.
- /// A tuning knob to trade speed for effectiveness.
- ///
- /// By tweaking this parameter, you may be able to optimize the compression for
- /// data with particular characteristics.
- ///
- ///
- /// true if the application would like the stream to remain open after inflation/deflation.
- ///
- public ParallelDeflateOutputStream(System.IO.Stream stream,
- CompressionLevel level,
- CompressionStrategy strategy,
- bool leaveOpen)
- {
- TraceOutput(TraceBits.Lifecycle | TraceBits.Session, "-------------------------------------------------------");
- TraceOutput(TraceBits.Lifecycle | TraceBits.Session, "Create {0:X8}", this.GetHashCode());
- _outStream = stream;
- _compressLevel= level;
- Strategy = strategy;
- _leaveOpen = leaveOpen;
- this.MaxBufferPairs = 16; // default
- }
-
- ///
- /// The ZLIB strategy to be used during compression.
- ///
- ///
- public CompressionStrategy Strategy
- {
- get;
- private set;
- }
-
- ///
- /// The maximum number of buffer pairs to use.
- ///
- ///
- ///
- ///
- /// This property sets an upper limit on the number of memory buffer
- /// pairs to create. The implementation of this stream allocates
- /// multiple buffers to facilitate parallel compression. As each buffer
- /// fills up, this stream uses
- /// ThreadPool.QueueUserWorkItem()
- /// to compress those buffers in a background threadpool thread. After a
- /// buffer is compressed, it is re-ordered and written to the output
- /// stream.
- ///
- ///
- ///
- /// A higher number of buffer pairs enables a higher degree of
- /// parallelism, which tends to increase the speed of compression on
- /// multi-cpu computers. On the other hand, a higher number of buffer
- /// pairs also implies a larger memory consumption, more active worker
- /// threads, and a higher cpu utilization for any compression. This
- /// property enables the application to limit its memory consumption and
- /// CPU utilization behavior depending on requirements.
- ///
- ///
- ///
- /// For each compression "task" that occurs in parallel, there are 2
- /// buffers allocated: one for input and one for output. This property
- /// sets a limit for the number of pairs. The total amount of storage
- /// space allocated for buffering will then be (N*S*2), where N is the
- /// number of buffer pairs, S is the size of each buffer (). By default, DotNetZip allocates 4 buffer
- /// pairs per CPU core, so if your machine has 4 cores, and you retain
- /// the default buffer size of 128k, then the
- /// ParallelDeflateOutputStream will use 4 * 4 * 2 * 128kb of buffer
- /// memory in total, or 4mb, in blocks of 128kb. If you then set this
- /// property to 8, then the number will be 8 * 2 * 128kb of buffer
- /// memory, or 2mb.
- ///
- ///
- ///
- /// CPU utilization will also go up with additional buffers, because a
- /// larger number of buffer pairs allows a larger number of background
- /// threads to compress in parallel. If you find that parallel
- /// compression is consuming too much memory or CPU, you can adjust this
- /// value downward.
- ///
- ///
- ///
- /// The default value is 16. Different values may deliver better or
- /// worse results, depending on your priorities and the dynamic
- /// performance characteristics of your storage and compute resources.
- ///
- ///
- ///
- /// This property is not the number of buffer pairs to use; it is an
- /// upper limit. An illustration: Suppose you have an application that
- /// uses the default value of this property (which is 16), and it runs
- /// on a machine with 2 CPU cores. In that case, DotNetZip will allocate
- /// 4 buffer pairs per CPU core, for a total of 8 pairs. The upper
- /// limit specified by this property has no effect.
- ///
- ///
- ///
- /// The application can set this value at any time, but it is effective
- /// only before the first call to Write(), which is when the buffers are
- /// allocated.
- ///
- ///
- public int MaxBufferPairs
- {
- get
- {
- return _maxBufferPairs;
- }
- set
- {
- if (value < 4)
- {
- throw new ArgumentException("MaxBufferPairs",
- "Value must be 4 or greater.");
- }
- _maxBufferPairs = value;
- }
- }
-
- ///
- /// The size of the buffers used by the compressor threads.
- ///
- ///
- ///
- ///
- /// The default buffer size is 128k. The application can set this value
- /// at any time, but it is effective only before the first Write().
- ///
- ///
- ///
- /// Larger buffer sizes implies larger memory consumption but allows
- /// more efficient compression. Using smaller buffer sizes consumes less
- /// memory but may result in less effective compression. For example,
- /// using the default buffer size of 128k, the compression delivered is
- /// within 1% of the compression delivered by the single-threaded . On the other hand, using a
- /// BufferSize of 8k can result in a compressed data stream that is 5%
- /// larger than that delivered by the single-threaded
- /// DeflateStream. Excessively small buffer sizes can also cause
- /// the speed of the ParallelDeflateOutputStream to drop, because of
- /// larger thread scheduling overhead dealing with many many small
- /// buffers.
- ///
- ///
- ///
- /// The total amount of storage space allocated for buffering will be
- /// (N*S*2), where N is the number of buffer pairs, and S is the size of
- /// each buffer (this property). There are 2 buffers used by the
- /// compressor, one for input and one for output. By default, DotNetZip
- /// allocates 4 buffer pairs per CPU core, so if your machine has 4
- /// cores, then the number of buffer pairs used will be 16. If you
- /// accept the default value of this property, 128k, then the
- /// ParallelDeflateOutputStream will use 16 * 2 * 128kb of buffer memory
- /// in total, or 4mb, in blocks of 128kb. If you set this property to
- /// 64kb, then the number will be 16 * 2 * 64kb of buffer memory, or
- /// 2mb.
- ///
- ///
- ///
- public int BufferSize
- {
- get { return _bufferSize;}
- set
- {
- if (value < 1024)
- {
- throw new ArgumentOutOfRangeException("BufferSize",
- "BufferSize must be greater than 1024 bytes");
- }
- _bufferSize = value;
- }
- }
-
- ///
- /// The CRC32 for the data that was written out, prior to compression.
- ///
- ///
- /// This value is meaningful only after a call to Close().
- ///
- public int Crc32 { get { return _Crc32; } }
-
- ///
- /// The total number of uncompressed bytes processed by the ParallelDeflateOutputStream.
- ///
- ///
- /// This value is meaningful only after a call to Close().
- ///
- public Int64 BytesProcessed { get { return _totalBytesProcessed; } }
-
- private void _InitializePoolOfWorkItems()
- {
- _toWrite = new Queue();
- _toFill = new Queue();
- _pool = new System.Collections.Generic.List();
- int nTasks = BufferPairsPerCore * Environment.ProcessorCount;
- nTasks = Math.Min(nTasks, _maxBufferPairs);
- for(int i=0; i < nTasks; i++)
- {
- _pool.Add(new WorkItem(_bufferSize, _compressLevel, Strategy, i));
- _toFill.Enqueue(i);
- }
-
- _newlyCompressedBlob = new AutoResetEvent(false);
- _runningCrc = new CRC32();
- _currentlyFilling = -1;
- _lastFilled = -1;
- _lastWritten = -1;
- _latestCompressed = -1;
- }
-
- ///
- /// Write data to the stream.
- ///
- ///
- ///
- ///
- ///
- /// To use the ParallelDeflateOutputStream to compress data, create a
- /// ParallelDeflateOutputStream with CompressionMode.Compress, passing a
- /// writable output stream. Then call Write() on that
- /// ParallelDeflateOutputStream, providing uncompressed data as input. The
- /// data sent to the output stream will be the compressed form of the data
- /// written.
- ///
- ///
- ///
- /// To decompress data, use the class.
- ///
- ///
- ///
- /// The buffer holding data to write to the stream.
- /// the offset within that data array to find the first byte to write.
- /// the number of bytes to write.
- public override void Write(byte[] buffer, int offset, int count)
- {
- bool mustWait = false;
-
- // This method does this:
- // 0. handles any pending exceptions
- // 1. write any buffers that are ready to be written,
- // 2. fills a work buffer; when full, flip state to 'Filled',
- // 3. if more data to be written, goto step 1
-
- if (_isClosed)
- {
- throw new InvalidOperationException();
- }
-
- // dispense any exceptions that occurred on the BG threads
- if (_pendingException != null)
- {
- _handlingException = true;
- var pe = _pendingException;
- _pendingException = null;
- throw pe;
- }
-
- if (count == 0) return;
-
- if (!_firstWriteDone)
- {
- // Want to do this on first Write, first session, and not in the
- // constructor. We want to allow MaxBufferPairs to
- // change after construction, but before first Write.
- _InitializePoolOfWorkItems();
- _firstWriteDone = true;
- }
-
- do
- {
- // may need to make buffers available
- EmitPendingBuffers(false, mustWait);
-
- mustWait = false;
- // use current buffer, or get a new buffer to fill
- int ix = -1;
- if (_currentlyFilling >= 0)
- {
- ix = _currentlyFilling;
- TraceOutput(TraceBits.WriteTake,
- "Write notake wi({0}) lf({1})",
- ix,
- _lastFilled);
- }
- else
- {
- TraceOutput(TraceBits.WriteTake, "Write take?");
- if (_toFill.Count == 0)
- {
- // no available buffers, so... need to emit
- // compressed buffers.
- mustWait = true;
- continue;
- }
-
- ix = _toFill.Dequeue();
- TraceOutput(TraceBits.WriteTake,
- "Write take wi({0}) lf({1})",
- ix,
- _lastFilled);
- ++_lastFilled; // TODO: consider rollover?
- }
-
- WorkItem workitem = _pool[ix];
-
- int limit = ((workitem.buffer.Length - workitem.inputBytesAvailable) > count)
- ? count
- : (workitem.buffer.Length - workitem.inputBytesAvailable);
-
- workitem.ordinal = _lastFilled;
-
- TraceOutput(TraceBits.Write,
- "Write lock wi({0}) ord({1}) iba({2})",
- workitem.index,
- workitem.ordinal,
- workitem.inputBytesAvailable
- );
-
- // copy from the provided buffer to our workitem, starting at
- // the tail end of whatever data we might have in there currently.
- Buffer.BlockCopy(buffer,
- offset,
- workitem.buffer,
- workitem.inputBytesAvailable,
- limit);
-
- count -= limit;
- offset += limit;
- workitem.inputBytesAvailable += limit;
- if (workitem.inputBytesAvailable == workitem.buffer.Length)
- {
- // No need for interlocked.increment: the Write()
- // method is documented as not multi-thread safe, so
- // we can assume Write() calls come in from only one
- // thread.
- TraceOutput(TraceBits.Write,
- "Write QUWI wi({0}) ord({1}) iba({2}) nf({3})",
- workitem.index,
- workitem.ordinal,
- workitem.inputBytesAvailable);
-
- if (!ThreadPool.QueueUserWorkItem(_DeflateOne, workitem))
- {
- throw new Exception("Cannot enqueue workitem");
- }
-
- _currentlyFilling = -1; // will get a new buffer next time
- }
- else
- {
- _currentlyFilling = ix;
- }
-
- if (count > 0)
- {
- TraceOutput(TraceBits.WriteEnter, "Write more");
- }
- }
- while (count > 0); // until no more to write
-
- TraceOutput(TraceBits.WriteEnter, "Write exit");
- return;
- }
-
- private void _FlushFinish()
- {
- // After writing a series of compressed buffers, each one closed
- // with Flush.Sync, we now write the final one as Flush.Finish,
- // and then stop.
- byte[] buffer = new byte[128];
- var compressor = new ZlibCodec();
- int rc = compressor.InitializeDeflate(_compressLevel, false);
- compressor.InputBuffer = null;
- compressor.NextIn = 0;
- compressor.AvailableBytesIn = 0;
- compressor.OutputBuffer = buffer;
- compressor.NextOut = 0;
- compressor.AvailableBytesOut = buffer.Length;
- rc = compressor.Deflate(FlushType.Finish);
-
- if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
- {
- throw new Exception("deflating: " + compressor.Message);
- }
-
- if (buffer.Length - compressor.AvailableBytesOut > 0)
- {
- TraceOutput(TraceBits.EmitBegin,
- "Emit begin flush bytes({0})",
- buffer.Length - compressor.AvailableBytesOut);
-
- _outStream.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut);
-
- TraceOutput(TraceBits.EmitDone,
- "Emit done flush");
- }
-
- compressor.EndDeflate();
-
- _Crc32 = _runningCrc.Crc32Result;
- }
-
- private void _Flush(bool lastInput)
- {
- if (_isClosed)
- {
- throw new InvalidOperationException();
- }
-
- if (emitting)
- {
- return;
- }
-
- // compress any partial buffer
- if (_currentlyFilling >= 0)
- {
- WorkItem workitem = _pool[_currentlyFilling];
- _DeflateOne(workitem);
- _currentlyFilling = -1; // get a new buffer next Write()
- }
-
- if (lastInput)
- {
- EmitPendingBuffers(true, false);
- _FlushFinish();
- }
- else
- {
- EmitPendingBuffers(false, false);
- }
- }
-
- ///
- /// Flush the stream.
- ///
- public override void Flush()
- {
- if (_pendingException != null)
- {
- _handlingException = true;
- var pe = _pendingException;
- _pendingException = null;
- throw pe;
- }
- if (_handlingException)
- {
- return;
- }
-
- _Flush(false);
- }
-
- ///
- /// Close the stream.
- ///
- ///
- /// You must call Close on the stream to guarantee that all of the data written in has
- /// been compressed, and the compressed data has been written out.
- ///
- public override void Close()
- {
- TraceOutput(TraceBits.Session, "Close {0:X8}", this.GetHashCode());
-
- if (_pendingException != null)
- {
- _handlingException = true;
- var pe = _pendingException;
- _pendingException = null;
- throw pe;
- }
-
- if (_handlingException)
- {
- return;
- }
-
- if (_isClosed)
- {
- return;
- }
-
- _Flush(true);
-
- if (!_leaveOpen)
- {
- _outStream.Close();
- }
-
- _isClosed= true;
- }
-
- // workitem 10030 - implement a new Dispose method
-
- /// Dispose the object
- ///
- ///
- /// Because ParallelDeflateOutputStream is IDisposable, the
- /// application must call this method when finished using the instance.
- ///
- ///
- /// This method is generally called implicitly upon exit from
- /// a using scope in C# (Using in VB).
- ///
- ///
- new public void Dispose()
- {
- TraceOutput(TraceBits.Lifecycle, "Dispose {0:X8}", this.GetHashCode());
- Close();
- _pool = null;
- Dispose(true);
- }
-
- /// The Dispose method
- ///
- /// indicates whether the Dispose method was invoked by user code.
- ///
- protected override void Dispose(bool disposing)
- {
- base.Dispose(disposing);
- }
-
- ///
- /// Resets the stream for use with another stream.
- ///
- ///
- /// Because the ParallelDeflateOutputStream is expensive to create, it
- /// has been designed so that it can be recycled and re-used. You have
- /// to call Close() on the stream first, then you can call Reset() on
- /// it, to use it again on another stream.
- ///
- ///
- ///
- /// The new output stream for this era.
- ///
- ///
- ///
- ///
- /// ParallelDeflateOutputStream deflater = null;
- /// foreach (var inputFile in listOfFiles)
- /// {
- /// string outputFile = inputFile + ".compressed";
- /// using (System.IO.Stream input = System.IO.File.Open(inputFile, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (var outStream = System.IO.File.Create(outputFile))
- /// {
- /// if (deflater == null)
- /// deflater = new ParallelDeflateOutputStream(outStream,
- /// CompressionLevel.Best,
- /// CompressionStrategy.Default,
- /// true);
- /// deflater.Reset(outStream);
- ///
- /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
- /// {
- /// deflater.Write(buffer, 0, n);
- /// }
- /// }
- /// }
- /// }
- ///
- ///
- public void Reset(Stream stream)
- {
- TraceOutput(TraceBits.Session, "-------------------------------------------------------");
- TraceOutput(TraceBits.Session, "Reset {0:X8} firstDone({1})", this.GetHashCode(), _firstWriteDone);
-
- if (!_firstWriteDone)
- {
- return;
- }
-
- // reset all status
- _toWrite.Clear();
- _toFill.Clear();
- foreach (var workitem in _pool)
- {
- _toFill.Enqueue(workitem.index);
- workitem.ordinal = -1;
- }
-
- _firstWriteDone = false;
- _totalBytesProcessed = 0L;
- _runningCrc = new CRC32();
- _isClosed= false;
- _currentlyFilling = -1;
- _lastFilled = -1;
- _lastWritten = -1;
- _latestCompressed = -1;
- _outStream = stream;
- }
-
- private void EmitPendingBuffers(bool doAll, bool mustWait)
- {
- // When combining parallel deflation with a ZipSegmentedStream, it's
- // possible for the ZSS to throw from within this method. In that
- // case, Close/Dispose will be called on this stream, if this stream
- // is employed within a using or try/finally pair as required. But
- // this stream is unaware of the pending exception, so the Close()
- // method invokes this method AGAIN. This can lead to a deadlock.
- // Therefore, failfast if re-entering.
-
- if (emitting)
- {
- return;
- }
- emitting = true;
- if (doAll || mustWait)
- {
- _newlyCompressedBlob.WaitOne();
- }
-
- do
- {
- int firstSkip = -1;
- int millisecondsToWait = doAll ? 200 : (mustWait ? -1 : 0);
- int nextToWrite = -1;
-
- do
- {
- if (Monitor.TryEnter(_toWrite, millisecondsToWait))
- {
- nextToWrite = -1;
- try
- {
- if (_toWrite.Count > 0)
- {
- nextToWrite = _toWrite.Dequeue();
- }
- }
- finally
- {
- Monitor.Exit(_toWrite);
- }
-
- if (nextToWrite >= 0)
- {
- WorkItem workitem = _pool[nextToWrite];
- if (workitem.ordinal != _lastWritten + 1)
- {
- // out of order. requeue and try again.
- TraceOutput(TraceBits.EmitSkip,
- "Emit skip wi({0}) ord({1}) lw({2}) fs({3})",
- workitem.index,
- workitem.ordinal,
- _lastWritten,
- firstSkip);
-
- lock (_toWrite)
- {
- _toWrite.Enqueue(nextToWrite);
- }
-
- if (firstSkip == nextToWrite)
- {
- // We went around the list once.
- // None of the items in the list is the one we want.
- // Now wait for a compressor to signal again.
- _newlyCompressedBlob.WaitOne();
- firstSkip = -1;
- }
- else if (firstSkip == -1)
- {
- firstSkip = nextToWrite;
- }
-
- continue;
- }
-
- firstSkip = -1;
-
- TraceOutput(TraceBits.EmitBegin,
- "Emit begin wi({0}) ord({1}) cba({2})",
- workitem.index,
- workitem.ordinal,
- workitem.compressedBytesAvailable);
-
- _outStream.Write(workitem.compressed, 0, workitem.compressedBytesAvailable);
- _runningCrc.Combine(workitem.crc, workitem.inputBytesAvailable);
- _totalBytesProcessed += workitem.inputBytesAvailable;
- workitem.inputBytesAvailable = 0;
-
- TraceOutput(TraceBits.EmitDone,
- "Emit done wi({0}) ord({1}) cba({2}) mtw({3})",
- workitem.index,
- workitem.ordinal,
- workitem.compressedBytesAvailable,
- millisecondsToWait);
-
- _lastWritten = workitem.ordinal;
- _toFill.Enqueue(workitem.index);
-
- // don't wait next time through
- if (millisecondsToWait == -1)
- {
- millisecondsToWait = 0;
- }
- }
- }
- else
- {
- nextToWrite = -1;
- }
- } while (nextToWrite >= 0);
-
- } while (doAll && (_lastWritten != _latestCompressed));
-
- emitting = false;
- }
-
-#if OLD
- private void _PerpetualWriterMethod(object state)
- {
- TraceOutput(TraceBits.WriterThread, "_PerpetualWriterMethod START");
-
- try
- {
- do
- {
- // wait for the next session
- TraceOutput(TraceBits.Synch | TraceBits.WriterThread, "Synch _sessionReset.WaitOne(begin) PWM");
- _sessionReset.WaitOne();
- TraceOutput(TraceBits.Synch | TraceBits.WriterThread, "Synch _sessionReset.WaitOne(done) PWM");
-
- if (_isDisposed)
- {
- break;
- }
- TraceOutput(TraceBits.Synch | TraceBits.WriterThread, "Synch _sessionReset.Reset() PWM");
- _sessionReset.Reset();
-
- // repeatedly write buffers as they become ready
- WorkItem workitem = null;
- Ionic.Zlib.CRC32 c= new Ionic.Zlib.CRC32();
- do
- {
- workitem = _pool[_nextToWrite % _pc];
- lock(workitem)
- {
- if (_noMoreInputForThisSegment)
- TraceOutput(TraceBits.Write,
- "Write drain wi({0}) stat({1}) canuse({2}) cba({3})",
- workitem.index,
- workitem.status,
- (workitem.status == (int)WorkItem.Status.Compressed),
- workitem.compressedBytesAvailable);
-
- do
- {
- if (workitem.status == (int)WorkItem.Status.Compressed)
- {
- TraceOutput(TraceBits.WriteBegin,
- "Write begin wi({0}) stat({1}) cba({2})",
- workitem.index,
- workitem.status,
- workitem.compressedBytesAvailable);
-
- workitem.status = (int)WorkItem.Status.Writing;
- _outStream.Write(workitem.compressed, 0, workitem.compressedBytesAvailable);
- c.Combine(workitem.crc, workitem.inputBytesAvailable);
- _totalBytesProcessed += workitem.inputBytesAvailable;
- _nextToWrite++;
- workitem.inputBytesAvailable= 0;
- workitem.status = (int)WorkItem.Status.Done;
-
- TraceOutput(TraceBits.WriteDone,
- "Write done wi({0}) stat({1}) cba({2})",
- workitem.index,
- workitem.status,
- workitem.compressedBytesAvailable);
-
-
- Monitor.Pulse(workitem);
- break;
- }
- else
- {
- int wcycles = 0;
- // I've locked a workitem I cannot use.
- // Therefore, wake someone else up, and then release the lock.
- while (workitem.status != (int)WorkItem.Status.Compressed)
- {
- TraceOutput(TraceBits.WriteWait,
- "Write waiting wi({0}) stat({1}) nw({2}) nf({3}) nomore({4})",
- workitem.index,
- workitem.status,
- _nextToWrite, _nextToFill,
- _noMoreInputForThisSegment );
-
- if (_noMoreInputForThisSegment && _nextToWrite == _nextToFill)
- break;
-
- wcycles++;
-
- // wake up someone else
- Monitor.Pulse(workitem);
- // release and wait
- Monitor.Wait(workitem);
-
- if (workitem.status == (int)WorkItem.Status.Compressed)
- TraceOutput(TraceBits.WriteWait,
- "Write A-OK wi({0}) stat({1}) iba({2}) cba({3}) cyc({4})",
- workitem.index,
- workitem.status,
- workitem.inputBytesAvailable,
- workitem.compressedBytesAvailable,
- wcycles);
- }
-
- if (_noMoreInputForThisSegment && _nextToWrite == _nextToFill)
- break;
-
- }
- }
- while (true);
- }
-
- if (_noMoreInputForThisSegment)
- TraceOutput(TraceBits.Write,
- "Write nomore nw({0}) nf({1}) break({2})",
- _nextToWrite, _nextToFill, (_nextToWrite == _nextToFill));
-
- if (_noMoreInputForThisSegment && _nextToWrite == _nextToFill)
- break;
-
- } while (true);
-
-
- // Finish:
- // After writing a series of buffers, closing each one with
- // Flush.Sync, we now write the final one as Flush.Finish, and
- // then stop.
- byte[] buffer = new byte[128];
- ZlibCodec compressor = new ZlibCodec();
- int rc = compressor.InitializeDeflate(_compressLevel, false);
- compressor.InputBuffer = null;
- compressor.NextIn = 0;
- compressor.AvailableBytesIn = 0;
- compressor.OutputBuffer = buffer;
- compressor.NextOut = 0;
- compressor.AvailableBytesOut = buffer.Length;
- rc = compressor.Deflate(FlushType.Finish);
-
- if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
- throw new Exception("deflating: " + compressor.Message);
-
- if (buffer.Length - compressor.AvailableBytesOut > 0)
- {
- TraceOutput(TraceBits.WriteBegin,
- "Write begin flush bytes({0})",
- buffer.Length - compressor.AvailableBytesOut);
-
- _outStream.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut);
-
- TraceOutput(TraceBits.WriteBegin,
- "Write done flush");
- }
-
- compressor.EndDeflate();
-
- _Crc32 = c.Crc32Result;
-
- // signal that writing is complete:
- TraceOutput(TraceBits.Synch, "Synch _writingDone.Set() PWM");
- _writingDone.Set();
- }
- while (true);
- }
- catch (System.Exception exc1)
- {
- lock(_eLock)
- {
- // expose the exception to the main thread
- if (_pendingException!=null)
- _pendingException = exc1;
- }
- }
-
- TraceOutput(TraceBits.WriterThread, "_PerpetualWriterMethod FINIS");
- }
-#endif
-
- private void _DeflateOne(Object wi)
- {
- // compress one buffer
- WorkItem workitem = (WorkItem) wi;
- try
- {
- int myItem = workitem.index;
- CRC32 crc = new CRC32();
-
- // calc CRC on the buffer
- crc.SlurpBlock(workitem.buffer, 0, workitem.inputBytesAvailable);
-
- // deflate it
- DeflateOneSegment(workitem);
-
- // update status
- workitem.crc = crc.Crc32Result;
- TraceOutput(TraceBits.Compress,
- "Compress wi({0}) ord({1}) len({2})",
- workitem.index,
- workitem.ordinal,
- workitem.compressedBytesAvailable
- );
-
- lock(_latestLock)
- {
- if (workitem.ordinal > _latestCompressed)
- {
- _latestCompressed = workitem.ordinal;
- }
- }
- lock (_toWrite)
- {
- _toWrite.Enqueue(workitem.index);
- }
- _newlyCompressedBlob.Set();
- }
- catch (System.Exception exc1)
- {
- lock(_eLock)
- {
- // expose the exception to the main thread
- if (_pendingException != null)
- {
- _pendingException = exc1;
- }
- }
- }
- }
-
- private bool DeflateOneSegment(WorkItem workitem)
- {
- ZlibCodec compressor = workitem.compressor;
- int rc= 0;
- compressor.ResetDeflate();
- compressor.NextIn = 0;
-
- compressor.AvailableBytesIn = workitem.inputBytesAvailable;
-
- // step 1: deflate the buffer
- compressor.NextOut = 0;
- compressor.AvailableBytesOut = workitem.compressed.Length;
- do
- {
- compressor.Deflate(FlushType.None);
- }
- while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0);
-
- // step 2: flush (sync)
- rc = compressor.Deflate(FlushType.Sync);
-
- workitem.compressedBytesAvailable= (int) compressor.TotalBytesOut;
- return true;
- }
-
- [System.Diagnostics.ConditionalAttribute("Trace")]
- private void TraceOutput(TraceBits bits, string format, params object[] varParams)
- {
- if ((bits & _DesiredTrace) != 0)
- {
- lock(_outputLock)
- {
- int tid = Thread.CurrentThread.GetHashCode();
-#if !SILVERLIGHT
- Console.ForegroundColor = (ConsoleColor) (tid % 8 + 8);
-#endif
- Console.Write("{0:000} PDOS ", tid);
- Console.WriteLine(format, varParams);
-#if !SILVERLIGHT
- Console.ResetColor();
-#endif
- }
- }
- }
-
- // used only when Trace is defined
- [Flags]
- enum TraceBits : uint
- {
- None = 0,
- NotUsed1 = 1,
- EmitLock = 2,
- EmitEnter = 4, // enter _EmitPending
- EmitBegin = 8, // begin to write out
- EmitDone = 16, // done writing out
- EmitSkip = 32, // writer skipping a workitem
- EmitAll = 58, // All Emit flags
- Flush = 64,
- Lifecycle = 128, // constructor/disposer
- Session = 256, // Close/Reset
- Synch = 512, // thread synchronization
- Instance = 1024, // instance settings
- Compress = 2048, // compress task
- Write = 4096, // filling buffers, when caller invokes Write()
- WriteEnter = 8192, // upon entry to Write()
- WriteTake = 16384, // on _toFill.Take()
- All = 0xffffffff,
- }
-
- ///
- /// Indicates whether the stream supports Seek operations.
- ///
- ///
- /// Always returns false.
- ///
- public override bool CanSeek
- {
- get { return false; }
- }
-
- ///
- /// Indicates whether the stream supports Read operations.
- ///
- ///
- /// Always returns false.
- ///
- public override bool CanRead
- {
- get {return false;}
- }
-
- ///
- /// Indicates whether the stream supports Write operations.
- ///
- ///
- /// Returns true if the provided stream is writable.
- ///
- public override bool CanWrite
- {
- get { return _outStream.CanWrite; }
- }
-
- ///
- /// Reading this property always throws a NotSupportedException.
- ///
- public override long Length
- {
- get { throw new NotSupportedException(); }
- }
-
- ///
- /// Returns the current position of the output stream.
- ///
- ///
- ///
- /// Because the output gets written by a background thread,
- /// the value may change asynchronously. Setting this
- /// property always throws a NotSupportedException.
- ///
- ///
- public override long Position
- {
- get { return _outStream.Position; }
- set { throw new NotSupportedException(); }
- }
-
- ///
- /// This method always throws a NotSupportedException.
- ///
- ///
- /// The buffer into which data would be read, IF THIS METHOD
- /// ACTUALLY DID ANYTHING.
- ///
- ///
- /// The offset within that data array at which to insert the
- /// data that is read, IF THIS METHOD ACTUALLY DID
- /// ANYTHING.
- ///
- ///
- /// The number of bytes to write, IF THIS METHOD ACTUALLY DID
- /// ANYTHING.
- ///
- /// nothing.
- public override int Read(byte[] buffer, int offset, int count)
- {
- throw new NotSupportedException();
- }
-
- ///
- /// This method always throws a NotSupportedException.
- ///
- ///
- /// The offset to seek to....
- /// IF THIS METHOD ACTUALLY DID ANYTHING.
- ///
- ///
- /// The reference specifying how to apply the offset.... IF
- /// THIS METHOD ACTUALLY DID ANYTHING.
- ///
- /// nothing. It always throws.
- public override long Seek(long offset, System.IO.SeekOrigin origin)
- {
- throw new NotSupportedException();
- }
-
- ///
- /// This method always throws a NotSupportedException.
- ///
- ///
- /// The new value for the stream length.... IF
- /// THIS METHOD ACTUALLY DID ANYTHING.
- ///
- public override void SetLength(long value)
- {
- throw new NotSupportedException();
- }
- }
-}
diff --git a/SabreTools.Library/External/Zlib/Tree.cs b/SabreTools.Library/External/Zlib/Tree.cs
deleted file mode 100644
index 40e796c2..00000000
--- a/SabreTools.Library/External/Zlib/Tree.cs
+++ /dev/null
@@ -1,437 +0,0 @@
-// Tree.cs
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
-// All rights reserved.
-//
-// This code module is part of DotNetZip, a zipfile class library.
-//
-// ------------------------------------------------------------------
-//
-// This code is licensed under the Microsoft Public License.
-// See the file License.txt for the license details.
-// More info on: http://dotnetzip.codeplex.com
-//
-// ------------------------------------------------------------------
-//
-// last saved (in emacs):
-// Time-stamp: <2009-October-28 13:29:50>
-//
-// ------------------------------------------------------------------
-//
-// This module defines classes for zlib compression and
-// decompression. This code is derived from the jzlib implementation of
-// zlib. In keeping with the license for jzlib, the copyright to that
-// code is below.
-//
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in
-// the documentation and/or other materials provided with the distribution.
-//
-// 3. The names of the authors may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
-// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
-// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
-// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// -----------------------------------------------------------------------
-//
-// This program is based on zlib-1.1.3; credit to authors
-// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
-// and contributors of zlib.
-//
-// -----------------------------------------------------------------------
-
-namespace Ionic.Zlib
-{
- sealed class Tree
- {
- private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1);
-
- // extra bits for each length code
- internal static readonly int[] ExtraLengthBits = new int[]
- {
- 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
- 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0
- };
-
- // extra bits for each distance code
- internal static readonly int[] ExtraDistanceBits = new int[]
- {
- 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
- 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13
- };
-
- // extra bits for each bit length code
- internal static readonly int[] extra_blbits = new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7};
-
- internal static readonly sbyte[] bl_order = new sbyte[]{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
-
- // The lengths of the bit length codes are sent in order of decreasing
- // probability, to avoid transmitting the lengths for unused bit
- // length codes.
-
- internal const int Buf_size = 8 * 2;
-
- // see definition of array dist_code below
- //internal const int DIST_CODE_LEN = 512;
-
- private static readonly sbyte[] _dist_code = new sbyte[]
- {
- 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
- 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
- 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
- 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
- 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
- 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
- 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
- 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
- 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
- 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
- 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
- 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
- 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
- 0, 0, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
- 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
- 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
- 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
- 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
- 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
- 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
- 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
- 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
- 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
- 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
- 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
- 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
- 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
- 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
- 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29
- };
-
- internal static readonly sbyte[] LengthCode = new sbyte[]
- {
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11,
- 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15,
- 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
- 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19,
- 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
- 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
- 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
- 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
- 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
- 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
- 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
- 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
- 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
- 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
- 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
- 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28
- };
-
- internal static readonly int[] LengthBase = new int[]
- {
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28,
- 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0
- };
-
- internal static readonly int[] DistanceBase = new int[]
- {
- 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
- 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576
- };
-
- ///
- /// Map from a distance to a distance code.
- ///
- ///
- /// No side effects. _dist_code[256] and _dist_code[257] are never used.
- ///
- internal static int DistanceCode(int dist)
- {
- return (dist < 256)
- ? _dist_code[dist]
- : _dist_code[256 + SharedUtils.URShift(dist, 7)];
- }
-
- internal short[] dyn_tree; // the dynamic tree
- internal int max_code; // largest code with non zero frequency
- internal StaticTree staticTree; // the corresponding static tree
-
- // Compute the optimal bit lengths for a tree and update the total bit length
- // for the current block.
- // IN assertion: the fields freq and dad are set, heap[heap_max] and
- // above are the tree nodes sorted by increasing frequency.
- // OUT assertions: the field len is set to the optimal bit length, the
- // array bl_count contains the frequencies for each bit length.
- // The length opt_len is updated; static_len is also updated if stree is
- // not null.
- internal void gen_bitlen(DeflateManager s)
- {
- short[] tree = dyn_tree;
- short[] stree = staticTree.treeCodes;
- int[] extra = staticTree.extraBits;
- int base_Renamed = staticTree.extraBase;
- int max_length = staticTree.maxLength;
- int h; // heap index
- int n, m; // iterate over the tree elements
- int bits; // bit length
- int xbits; // extra bits
- short f; // frequency
- int overflow = 0; // number of elements with bit length too large
-
- for (bits = 0; bits <= InternalConstants.MAX_BITS; bits++)
- {
- s.bl_count[bits] = 0;
- }
-
- // In a first pass, compute the optimal bit lengths (which may
- // overflow in the case of the bit length tree).
- tree[s.heap[s.heap_max] * 2 + 1] = 0; // root of the heap
-
- for (h = s.heap_max + 1; h < HEAP_SIZE; h++)
- {
- n = s.heap[h];
- bits = tree[tree[n * 2 + 1] * 2 + 1] + 1;
- if (bits > max_length)
- {
- bits = max_length; overflow++;
- }
- tree[n * 2 + 1] = (short) bits;
- // We overwrite tree[n*2+1] which is no longer needed
-
- if (n > max_code)
- {
- continue; // not a leaf node
- }
-
- s.bl_count[bits]++;
- xbits = 0;
- if (n >= base_Renamed)
- {
- xbits = extra[n - base_Renamed];
- }
- f = tree[n * 2];
- s.opt_len += f * (bits + xbits);
- if (stree != null)
- {
- s.static_len += f * (stree[n * 2 + 1] + xbits);
- }
- }
- if (overflow == 0)
- {
- return;
- }
-
- // This happens for example on obj2 and pic of the Calgary corpus
- // Find the first bit length which could increase:
- do
- {
- bits = max_length - 1;
- while (s.bl_count[bits] == 0)
- {
- bits--;
- }
- s.bl_count[bits]--; // move one leaf down the tree
- s.bl_count[bits + 1] = (short) (s.bl_count[bits + 1] + 2); // move one overflow item as its brother
- s.bl_count[max_length]--;
- // The brother of the overflow item also moves one step up,
- // but this does not affect bl_count[max_length]
- overflow -= 2;
- }
- while (overflow > 0);
-
- for (bits = max_length; bits != 0; bits--)
- {
- n = s.bl_count[bits];
- while (n != 0)
- {
- m = s.heap[--h];
- if (m > max_code)
- {
- continue;
- }
- if (tree[m * 2 + 1] != bits)
- {
- s.opt_len = (int) (s.opt_len + ((long) bits - (long) tree[m * 2 + 1]) * (long) tree[m * 2]);
- tree[m * 2 + 1] = (short) bits;
- }
- n--;
- }
- }
- }
-
- // Construct one Huffman tree and assigns the code bit strings and lengths.
- // Update the total bit length for the current block.
- // IN assertion: the field freq is set for all tree elements.
- // OUT assertions: the fields len and code are set to the optimal bit length
- // and corresponding code. The length opt_len is updated; static_len is
- // also updated if stree is not null. The field max_code is set.
- internal void build_tree(DeflateManager s)
- {
- short[] tree = dyn_tree;
- short[] stree = staticTree.treeCodes;
- int elems = staticTree.elems;
- int n, m; // iterate over heap elements
- int max_code = -1; // largest code with non zero frequency
- int node; // new node being created
-
- // Construct the initial heap, with least frequent element in
- // heap[1]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
- // heap[0] is not used.
- s.heap_len = 0;
- s.heap_max = HEAP_SIZE;
-
- for (n = 0; n < elems; n++)
- {
- if (tree[n * 2] != 0)
- {
- s.heap[++s.heap_len] = max_code = n;
- s.depth[n] = 0;
- }
- else
- {
- tree[n * 2 + 1] = 0;
- }
- }
-
- // The pkzip format requires that at least one distance code exists,
- // and that at least one bit should be sent even if there is only one
- // possible code. So to avoid special checks later on we force at least
- // two codes of non zero frequency.
- while (s.heap_len < 2)
- {
- node = s.heap[++s.heap_len] = (max_code < 2?++max_code:0);
- tree[node * 2] = 1;
- s.depth[node] = 0;
- s.opt_len--;
- if (stree != null)
- {
- s.static_len -= stree[node * 2 + 1];
- }// node is 0 or 1 so it does not have extra bits
- }
- this.max_code = max_code;
-
- // The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
- // establish sub-heaps of increasing lengths:
-
- for (n = s.heap_len / 2; n >= 1; n--)
- {
- s.pqdownheap(tree, n);
- }
-
- // Construct the Huffman tree by repeatedly combining the least two
- // frequent nodes.
-
- node = elems; // next internal node of the tree
- do
- {
- // n = node of least frequency
- n = s.heap[1];
- s.heap[1] = s.heap[s.heap_len--];
- s.pqdownheap(tree, 1);
- m = s.heap[1]; // m = node of next least frequency
-
- s.heap[--s.heap_max] = n; // keep the nodes sorted by frequency
- s.heap[--s.heap_max] = m;
-
- // Create a new node father of n and m
- tree[node * 2] = unchecked((short) (tree[n * 2] + tree[m * 2]));
- s.depth[node] = (sbyte) (System.Math.Max((byte) s.depth[n], (byte) s.depth[m]) + 1);
- tree[n * 2 + 1] = tree[m * 2 + 1] = (short) node;
-
- // and insert the new node in the heap
- s.heap[1] = node++;
- s.pqdownheap(tree, 1);
- }
- while (s.heap_len >= 2);
-
- s.heap[--s.heap_max] = s.heap[1];
-
- // At this point, the fields freq and dad are set. We can now
- // generate the bit lengths.
-
- gen_bitlen(s);
-
- // The field len is now set, we can generate the bit codes
- gen_codes(tree, max_code, s.bl_count);
- }
-
- // Generate the codes for a given tree and bit counts (which need not be
- // optimal).
- // IN assertion: the array bl_count contains the bit length statistics for
- // the given tree and the field len is set for all tree elements.
- // OUT assertion: the field code is set for all tree elements of non
- // zero code length.
- internal static void gen_codes(short[] tree, int max_code, short[] bl_count)
- {
- short[] next_code = new short[InternalConstants.MAX_BITS + 1]; // next code value for each bit length
- short code = 0; // running code value
- int bits; // bit index
- int n; // code index
-
- // The distribution counts are first used to generate the code values
- // without bit reversal.
- for (bits = 1; bits <= InternalConstants.MAX_BITS; bits++)
- {
- unchecked
- {
- next_code[bits] = code = (short)((code + bl_count[bits - 1]) << 1);
- }
- }
-
- // Check that the bit counts in bl_count are consistent. The last code
- // must be all ones.
- //Assert (code + bl_count[MAX_BITS]-1 == (1<>= 1; //SharedUtils.URShift(code, 1);
- res <<= 1;
- }
- while (--len > 0);
- return res >> 1;
- }
- }
-}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Zlib/Zlib.cs b/SabreTools.Library/External/Zlib/Zlib.cs
deleted file mode 100644
index a301f072..00000000
--- a/SabreTools.Library/External/Zlib/Zlib.cs
+++ /dev/null
@@ -1,538 +0,0 @@
-// Zlib.cs
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2009-2011 Dino Chiesa and Microsoft Corporation.
-// All rights reserved.
-//
-// This code module is part of DotNetZip, a zipfile class library.
-//
-// ------------------------------------------------------------------
-//
-// This code is licensed under the Microsoft Public License.
-// See the file License.txt for the license details.
-// More info on: http://dotnetzip.codeplex.com
-//
-// ------------------------------------------------------------------
-//
-// Last Saved: <2011-August-03 19:52:28>
-//
-// ------------------------------------------------------------------
-//
-// This module defines classes for ZLIB compression and
-// decompression. This code is derived from the jzlib implementation of
-// zlib, but significantly modified. The object model is not the same,
-// and many of the behaviors are new or different. Nonetheless, in
-// keeping with the license for jzlib, the copyright to that code is
-// included below.
-//
-// ------------------------------------------------------------------
-//
-// The following notice applies to jzlib:
-//
-// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in
-// the documentation and/or other materials provided with the distribution.
-//
-// 3. The names of the authors may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
-// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
-// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
-// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// -----------------------------------------------------------------------
-//
-// jzlib is based on zlib-1.1.3.
-//
-// The following notice applies to zlib:
-//
-// -----------------------------------------------------------------------
-//
-// Copyright (C) 1995-2004 Jean-loup Gailly and Mark Adler
-//
-// The ZLIB software is provided 'as-is', without any express or implied
-// warranty. In no event will the authors be held liable for any damages
-// arising from the use of this software.
-//
-// Permission is granted to anyone to use this software for any purpose,
-// including commercial applications, and to alter it and redistribute it
-// freely, subject to the following restrictions:
-//
-// 1. The origin of this software must not be misrepresented; you must not
-// claim that you wrote the original software. If you use this software
-// in a product, an acknowledgment in the product documentation would be
-// appreciated but is not required.
-// 2. Altered source versions must be plainly marked as such, and must not be
-// misrepresented as being the original software.
-// 3. This notice may not be removed or altered from any source distribution.
-//
-// Jean-loup Gailly jloup@gzip.org
-// Mark Adler madler@alumni.caltech.edu
-//
-// -----------------------------------------------------------------------
-
-using System.Runtime.InteropServices;
-
-namespace Ionic.Zlib
-{
- ///
- /// Describes how to flush the current deflate operation.
- ///
- ///
- /// The different FlushType values are useful when using a Deflate in a streaming application.
- ///
- public enum FlushType
- {
- /// No flush at all.
- None = 0,
-
- /// Closes the current block, but doesn't flush it to
- /// the output. Used internally only in hypothetical
- /// scenarios. This was supposed to be removed by Zlib, but it is
- /// still in use in some edge cases.
- ///
- Partial,
-
- ///
- /// Use this during compression to specify that all pending output should be
- /// flushed to the output buffer and the output should be aligned on a byte
- /// boundary. You might use this in a streaming communication scenario, so that
- /// the decompressor can get all input data available so far. When using this
- /// with a ZlibCodec, AvailableBytesIn will be zero after the call if
- /// enough output space has been provided before the call. Flushing will
- /// degrade compression and so it should be used only when necessary.
- ///
- Sync,
-
- ///
- /// Use this during compression to specify that all output should be flushed, as
- /// with FlushType.Sync, but also, the compression state should be reset
- /// so that decompression can restart from this point if previous compressed
- /// data has been damaged or if random access is desired. Using
- /// FlushType.Full too often can significantly degrade the compression.
- ///
- Full,
-
- /// Signals the end of the compression/decompression stream.
- Finish,
- }
-
- ///
- /// The compression level to be used when using a DeflateStream or ZlibStream with CompressionMode.Compress.
- ///
- public enum CompressionLevel
- {
- ///
- /// None means that the data will be simply stored, with no change at all.
- /// If you are producing ZIPs for use on Mac OSX, be aware that archives produced with CompressionLevel.None
- /// cannot be opened with the default zip reader. Use a different CompressionLevel.
- ///
- None= 0,
- ///
- /// Same as None.
- ///
- Level0 = 0,
-
- ///
- /// The fastest but least effective compression.
- ///
- BestSpeed = 1,
-
- ///
- /// A synonym for BestSpeed.
- ///
- Level1 = 1,
-
- ///
- /// A little slower, but better, than level 1.
- ///
- Level2 = 2,
-
- ///
- /// A little slower, but better, than level 2.
- ///
- Level3 = 3,
-
- ///
- /// A little slower, but better, than level 3.
- ///
- Level4 = 4,
-
- ///
- /// A little slower than level 4, but with better compression.
- ///
- Level5 = 5,
-
- ///
- /// The default compression level, with a good balance of speed and compression efficiency.
- ///
- Default = 6,
- ///
- /// A synonym for Default.
- ///
- Level6 = 6,
-
- ///
- /// Pretty good compression!
- ///
- Level7 = 7,
-
- ///
- /// Better compression than Level7!
- ///
- Level8 = 8,
-
- ///
- /// The "best" compression, where best means greatest reduction in size of the input data stream.
- /// This is also the slowest compression.
- ///
- BestCompression = 9,
-
- ///
- /// A synonym for BestCompression.
- ///
- Level9 = 9,
- }
-
- ///
- /// Describes options for how the compression algorithm is executed. Different strategies
- /// work better on different sorts of data. The strategy parameter can affect the compression
- /// ratio and the speed of compression but not the correctness of the compresssion.
- ///
- public enum CompressionStrategy
- {
- ///
- /// The default strategy is probably the best for normal data.
- ///
- Default = 0,
-
- ///
- /// The Filtered strategy is intended to be used most effectively with data produced by a
- /// filter or predictor. By this definition, filtered data consists mostly of small
- /// values with a somewhat random distribution. In this case, the compression algorithm
- /// is tuned to compress them better. The effect of Filtered is to force more Huffman
- /// coding and less string matching; it is a half-step between Default and HuffmanOnly.
- ///
- Filtered = 1,
-
- ///
- /// Using HuffmanOnly will force the compressor to do Huffman encoding only, with no
- /// string matching.
- ///
- HuffmanOnly = 2,
- }
-
- ///
- /// An enum to specify the direction of transcoding - whether to compress or decompress.
- ///
- public enum CompressionMode
- {
- ///
- /// Used to specify that the stream should compress the data.
- ///
- Compress= 0,
- ///
- /// Used to specify that the stream should decompress the data.
- ///
- Decompress = 1,
- }
-
- ///
- /// A general purpose exception class for exceptions in the Zlib library.
- ///
- [Guid("ebc25cf6-9120-4283-b972-0e5520d0000E")]
- public class ZlibException : System.Exception
- {
- ///
- /// The ZlibException class captures exception information generated
- /// by the Zlib library.
- ///
- public ZlibException()
- : base()
- {
- }
-
- ///
- /// This ctor collects a message attached to the exception.
- ///
- /// the message for the exception.
- public ZlibException(System.String s)
- : base(s)
- {
- }
- }
-
- internal class SharedUtils
- {
- ///
- /// Performs an unsigned bitwise right shift with the specified number
- ///
- /// Number to operate on
- /// Ammount of bits to shift
- /// The resulting number from the shift operation
- public static int URShift(int number, int bits)
- {
- return (int)((uint)number >> bits);
- }
-
-#if NOT
- ///
- /// Performs an unsigned bitwise right shift with the specified number
- ///
- /// Number to operate on
- /// Ammount of bits to shift
- /// The resulting number from the shift operation
- public static long URShift(long number, int bits)
- {
- return (long) ((UInt64)number >> bits);
- }
-#endif
-
- ///
- /// Reads a number of characters from the current source TextReader and writes
- /// the data to the target array at the specified index.
- ///
- ///
- /// The source TextReader to read from
- /// Contains the array of characteres read from the source TextReader.
- /// The starting index of the target array.
- /// The maximum number of characters to read from the source TextReader.
- ///
- ///
- /// The number of characters read. The number will be less than or equal to
- /// count depending on the data available in the source TextReader. Returns -1
- /// if the end of the stream is reached.
- ///
- public static System.Int32 ReadInput(System.IO.TextReader sourceTextReader, byte[] target, int start, int count)
- {
- // Returns 0 bytes if not enough space in target
- if (target.Length == 0)
- {
- return 0;
- }
-
- char[] charArray = new char[target.Length];
- int bytesRead = sourceTextReader.Read(charArray, start, count);
-
- // Returns -1 if EOF
- if (bytesRead == 0) return -1;
-
- for (int index = start; index < start + bytesRead; index++)
- {
- target[index] = (byte)charArray[index];
- }
-
- return bytesRead;
- }
-
- internal static byte[] ToByteArray(System.String sourceString)
- {
- return System.Text.UTF8Encoding.UTF8.GetBytes(sourceString);
- }
-
- internal static char[] ToCharArray(byte[] byteArray)
- {
- return System.Text.UTF8Encoding.UTF8.GetChars(byteArray);
- }
- }
-
- internal static class InternalConstants
- {
- internal static readonly int MAX_BITS = 15;
- internal static readonly int BL_CODES = 19;
- internal static readonly int D_CODES = 30;
- internal static readonly int LITERALS = 256;
- internal static readonly int LENGTH_CODES = 29;
- internal static readonly int L_CODES = (LITERALS + 1 + LENGTH_CODES);
-
- // Bit length codes must not exceed MAX_BL_BITS bits
- internal static readonly int MAX_BL_BITS = 7;
-
- // repeat previous bit length 3-6 times (2 bits of repeat count)
- internal static readonly int REP_3_6 = 16;
-
- // repeat a zero length 3-10 times (3 bits of repeat count)
- internal static readonly int REPZ_3_10 = 17;
-
- // repeat a zero length 11-138 times (7 bits of repeat count)
- internal static readonly int REPZ_11_138 = 18;
- }
-
- internal sealed class StaticTree
- {
- internal static readonly short[] lengthAndLiteralsTreeCodes = new short[] {
- 12, 8, 140, 8, 76, 8, 204, 8, 44, 8, 172, 8, 108, 8, 236, 8,
- 28, 8, 156, 8, 92, 8, 220, 8, 60, 8, 188, 8, 124, 8, 252, 8,
- 2, 8, 130, 8, 66, 8, 194, 8, 34, 8, 162, 8, 98, 8, 226, 8,
- 18, 8, 146, 8, 82, 8, 210, 8, 50, 8, 178, 8, 114, 8, 242, 8,
- 10, 8, 138, 8, 74, 8, 202, 8, 42, 8, 170, 8, 106, 8, 234, 8,
- 26, 8, 154, 8, 90, 8, 218, 8, 58, 8, 186, 8, 122, 8, 250, 8,
- 6, 8, 134, 8, 70, 8, 198, 8, 38, 8, 166, 8, 102, 8, 230, 8,
- 22, 8, 150, 8, 86, 8, 214, 8, 54, 8, 182, 8, 118, 8, 246, 8,
- 14, 8, 142, 8, 78, 8, 206, 8, 46, 8, 174, 8, 110, 8, 238, 8,
- 30, 8, 158, 8, 94, 8, 222, 8, 62, 8, 190, 8, 126, 8, 254, 8,
- 1, 8, 129, 8, 65, 8, 193, 8, 33, 8, 161, 8, 97, 8, 225, 8,
- 17, 8, 145, 8, 81, 8, 209, 8, 49, 8, 177, 8, 113, 8, 241, 8,
- 9, 8, 137, 8, 73, 8, 201, 8, 41, 8, 169, 8, 105, 8, 233, 8,
- 25, 8, 153, 8, 89, 8, 217, 8, 57, 8, 185, 8, 121, 8, 249, 8,
- 5, 8, 133, 8, 69, 8, 197, 8, 37, 8, 165, 8, 101, 8, 229, 8,
- 21, 8, 149, 8, 85, 8, 213, 8, 53, 8, 181, 8, 117, 8, 245, 8,
- 13, 8, 141, 8, 77, 8, 205, 8, 45, 8, 173, 8, 109, 8, 237, 8,
- 29, 8, 157, 8, 93, 8, 221, 8, 61, 8, 189, 8, 125, 8, 253, 8,
- 19, 9, 275, 9, 147, 9, 403, 9, 83, 9, 339, 9, 211, 9, 467, 9,
- 51, 9, 307, 9, 179, 9, 435, 9, 115, 9, 371, 9, 243, 9, 499, 9,
- 11, 9, 267, 9, 139, 9, 395, 9, 75, 9, 331, 9, 203, 9, 459, 9,
- 43, 9, 299, 9, 171, 9, 427, 9, 107, 9, 363, 9, 235, 9, 491, 9,
- 27, 9, 283, 9, 155, 9, 411, 9, 91, 9, 347, 9, 219, 9, 475, 9,
- 59, 9, 315, 9, 187, 9, 443, 9, 123, 9, 379, 9, 251, 9, 507, 9,
- 7, 9, 263, 9, 135, 9, 391, 9, 71, 9, 327, 9, 199, 9, 455, 9,
- 39, 9, 295, 9, 167, 9, 423, 9, 103, 9, 359, 9, 231, 9, 487, 9,
- 23, 9, 279, 9, 151, 9, 407, 9, 87, 9, 343, 9, 215, 9, 471, 9,
- 55, 9, 311, 9, 183, 9, 439, 9, 119, 9, 375, 9, 247, 9, 503, 9,
- 15, 9, 271, 9, 143, 9, 399, 9, 79, 9, 335, 9, 207, 9, 463, 9,
- 47, 9, 303, 9, 175, 9, 431, 9, 111, 9, 367, 9, 239, 9, 495, 9,
- 31, 9, 287, 9, 159, 9, 415, 9, 95, 9, 351, 9, 223, 9, 479, 9,
- 63, 9, 319, 9, 191, 9, 447, 9, 127, 9, 383, 9, 255, 9, 511, 9,
- 0, 7, 64, 7, 32, 7, 96, 7, 16, 7, 80, 7, 48, 7, 112, 7,
- 8, 7, 72, 7, 40, 7, 104, 7, 24, 7, 88, 7, 56, 7, 120, 7,
- 4, 7, 68, 7, 36, 7, 100, 7, 20, 7, 84, 7, 52, 7, 116, 7,
- 3, 8, 131, 8, 67, 8, 195, 8, 35, 8, 163, 8, 99, 8, 227, 8
- };
-
- internal static readonly short[] distTreeCodes = new short[] {
- 0, 5, 16, 5, 8, 5, 24, 5, 4, 5, 20, 5, 12, 5, 28, 5,
- 2, 5, 18, 5, 10, 5, 26, 5, 6, 5, 22, 5, 14, 5, 30, 5,
- 1, 5, 17, 5, 9, 5, 25, 5, 5, 5, 21, 5, 13, 5, 29, 5,
- 3, 5, 19, 5, 11, 5, 27, 5, 7, 5, 23, 5 };
-
- internal static readonly StaticTree Literals;
- internal static readonly StaticTree Distances;
- internal static readonly StaticTree BitLengths;
-
- internal short[] treeCodes; // static tree or null
- internal int[] extraBits; // extra bits for each code or null
- internal int extraBase; // base index for extra_bits
- internal int elems; // max number of elements in the tree
- internal int maxLength; // max bit length for the codes
-
- private StaticTree(short[] treeCodes, int[] extraBits, int extraBase, int elems, int maxLength)
- {
- this.treeCodes = treeCodes;
- this.extraBits = extraBits;
- this.extraBase = extraBase;
- this.elems = elems;
- this.maxLength = maxLength;
- }
-
- static StaticTree()
- {
- Literals = new StaticTree(lengthAndLiteralsTreeCodes, Tree.ExtraLengthBits, InternalConstants.LITERALS + 1, InternalConstants.L_CODES, InternalConstants.MAX_BITS);
- Distances = new StaticTree(distTreeCodes, Tree.ExtraDistanceBits, 0, InternalConstants.D_CODES, InternalConstants.MAX_BITS);
- BitLengths = new StaticTree(null, Tree.extra_blbits, 0, InternalConstants.BL_CODES, InternalConstants.MAX_BL_BITS);
- }
- }
-
- ///
- /// Computes an Adler-32 checksum.
- ///
- ///
- /// The Adler checksum is similar to a CRC checksum, but faster to compute, though less
- /// reliable. It is used in producing RFC1950 compressed streams. The Adler checksum
- /// is a required part of the "ZLIB" standard. Applications will almost never need to
- /// use this class directly.
- ///
- ///
- ///
- public sealed class Adler
- {
- // largest prime smaller than 65536
- private static readonly uint BASE = 65521;
- // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
- private static readonly int NMAX = 5552;
-
-#pragma warning disable 3001
-#pragma warning disable 3002
-
- ///
- /// Calculates the Adler32 checksum.
- ///
- ///
- ///
- /// This is used within ZLIB. You probably don't need to use this directly.
- ///
- ///
- ///
- /// To compute an Adler32 checksum on a byte array:
- ///
- /// var adler = Adler.Adler32(0, null, 0, 0);
- /// adler = Adler.Adler32(adler, buffer, index, length);
- ///
- ///
- public static uint Adler32(uint adler, byte[] buf, int index, int len)
- {
- if (buf == null)
- {
- return 1;
- }
-
- uint s1 = (uint)(adler & 0xffff);
- uint s2 = (uint)((adler >> 16) & 0xffff);
-
- while (len > 0)
- {
- int k = len < NMAX ? len : NMAX;
- len -= k;
- while (k >= 16)
- {
- //s1 += (buf[index++] & 0xff); s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- s1 += buf[index++]; s2 += s1;
- k -= 16;
- }
- if (k != 0)
- {
- do
- {
- s1 += buf[index++];
- s2 += s1;
- }
- while (--k != 0);
- }
- s1 %= BASE;
- s2 %= BASE;
- }
- return (uint)((s2 << 16) | s1);
- }
-#pragma warning restore 3001
-#pragma warning restore 3002
- }
-}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Zlib/ZlibBaseStream.cs b/SabreTools.Library/External/Zlib/ZlibBaseStream.cs
deleted file mode 100644
index 512672a5..00000000
--- a/SabreTools.Library/External/Zlib/ZlibBaseStream.cs
+++ /dev/null
@@ -1,690 +0,0 @@
-// ZlibBaseStream.cs
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
-// All rights reserved.
-//
-// This code module is part of DotNetZip, a zipfile class library.
-//
-// ------------------------------------------------------------------
-//
-// This code is licensed under the Microsoft Public License.
-// See the file License.txt for the license details.
-// More info on: http://dotnetzip.codeplex.com
-//
-// ------------------------------------------------------------------
-//
-// last saved (in emacs):
-// Time-stamp: <2011-August-06 21:22:38>
-//
-// ------------------------------------------------------------------
-//
-// This module defines the ZlibBaseStream class, which is an intnernal
-// base class for DeflateStream, ZlibStream and GZipStream.
-//
-// ------------------------------------------------------------------
-
-using System;
-using System.IO;
-
-namespace Ionic.Zlib
-{
- internal enum ZlibStreamFlavor { ZLIB = 1950, DEFLATE = 1951, GZIP = 1952 }
-
- internal class ZlibBaseStream : System.IO.Stream
- {
- protected internal ZlibCodec _z = null; // deferred init... new ZlibCodec();
-
- protected internal StreamMode _streamMode = StreamMode.Undefined;
- protected internal FlushType _flushMode;
- protected internal ZlibStreamFlavor _flavor;
- protected internal CompressionMode _compressionMode;
- protected internal CompressionLevel _level;
- protected internal bool _leaveOpen;
- protected internal byte[] _workingBuffer;
- protected internal int _bufferSize = ZlibConstants.WorkingBufferSizeDefault;
- protected internal byte[] _buf1 = new byte[1];
-
- protected internal System.IO.Stream _stream;
- protected internal CompressionStrategy Strategy = CompressionStrategy.Default;
-
- // workitem 7159
- CRC32 crc;
- protected internal string _GzipFileName;
- protected internal string _GzipComment;
- protected internal DateTime _GzipMtime;
- protected internal int _gzipHeaderByteCount;
-
- internal int Crc32 { get { if (crc == null) return 0; return crc.Crc32Result; } }
-
- public ZlibBaseStream(System.IO.Stream stream,
- CompressionMode compressionMode,
- CompressionLevel level,
- ZlibStreamFlavor flavor,
- bool leaveOpen)
- : base()
- {
- this._flushMode = FlushType.None;
- //this._workingBuffer = new byte[WORKING_BUFFER_SIZE_DEFAULT];
- this._stream = stream;
- this._leaveOpen = leaveOpen;
- this._compressionMode = compressionMode;
- this._flavor = flavor;
- this._level = level;
- // workitem 7159
- if (flavor == ZlibStreamFlavor.GZIP)
- {
- this.crc = new CRC32();
- }
- }
-
-
- protected internal bool _wantCompress
- {
- get
- {
- return (this._compressionMode == CompressionMode.Compress);
- }
- }
-
- private ZlibCodec z
- {
- get
- {
- if (_z == null)
- {
- bool wantRfc1950Header = (this._flavor == ZlibStreamFlavor.ZLIB);
- _z = new ZlibCodec();
- if (this._compressionMode == CompressionMode.Decompress)
- {
- _z.InitializeInflate(wantRfc1950Header);
- }
- else
- {
- _z.Strategy = Strategy;
- _z.InitializeDeflate(this._level, wantRfc1950Header);
- }
- }
- return _z;
- }
- }
-
- private byte[] workingBuffer
- {
- get
- {
- if (_workingBuffer == null)
- {
- _workingBuffer = new byte[_bufferSize];
- }
- return _workingBuffer;
- }
- }
-
- public override void Write(System.Byte[] buffer, int offset, int count)
- {
- // workitem 7159
- // calculate the CRC on the unccompressed data (before writing)
- if (crc != null)
- {
- crc.SlurpBlock(buffer, offset, count);
- }
-
- if (_streamMode == StreamMode.Undefined)
- {
- _streamMode = StreamMode.Writer;
- }
- else if (_streamMode != StreamMode.Writer)
- {
- throw new ZlibException("Cannot Write after Reading.");
- }
-
- if (count == 0)
- {
- return;
- }
-
- // first reference of z property will initialize the private var _z
- z.InputBuffer = buffer;
- _z.NextIn = offset;
- _z.AvailableBytesIn = count;
- bool done = false;
- do
- {
- _z.OutputBuffer = workingBuffer;
- _z.NextOut = 0;
- _z.AvailableBytesOut = _workingBuffer.Length;
- int rc = (_wantCompress)
- ? _z.Deflate(_flushMode)
- : _z.Inflate(_flushMode);
- if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
- {
- throw new ZlibException((_wantCompress ? "de" : "in") + "flating: " + _z.Message);
- }
-
- //if (_workingBuffer.Length - _z.AvailableBytesOut > 0)
- _stream.Write(_workingBuffer, 0, _workingBuffer.Length - _z.AvailableBytesOut);
-
- done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0;
-
- // If GZIP and de-compress, we're done when 8 bytes remain.
- if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress)
- {
- done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0);
- }
- }
- while (!done);
- }
-
- private void finish()
- {
- if (_z == null)
- {
- return;
- }
-
- if (_streamMode == StreamMode.Writer)
- {
- bool done = false;
- do
- {
- _z.OutputBuffer = workingBuffer;
- _z.NextOut = 0;
- _z.AvailableBytesOut = _workingBuffer.Length;
- int rc = (_wantCompress)
- ? _z.Deflate(FlushType.Finish)
- : _z.Inflate(FlushType.Finish);
-
- if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
- {
- string verb = (_wantCompress ? "de" : "in") + "flating";
- if (_z.Message == null)
- {
- throw new ZlibException(String.Format("{0}: (rc = {1})", verb, rc));
- }
- else
- {
- throw new ZlibException(verb + ": " + _z.Message);
- }
- }
-
- if (_workingBuffer.Length - _z.AvailableBytesOut > 0)
- {
- _stream.Write(_workingBuffer, 0, _workingBuffer.Length - _z.AvailableBytesOut);
- }
-
- done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0;
- // If GZIP and de-compress, we're done when 8 bytes remain.
- if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress)
- done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0);
-
- }
- while (!done);
-
- Flush();
-
- // workitem 7159
- if (_flavor == ZlibStreamFlavor.GZIP)
- {
- if (_wantCompress)
- {
- // Emit the GZIP trailer: CRC32 and size mod 2^32
- int c1 = crc.Crc32Result;
- _stream.Write(BitConverter.GetBytes(c1), 0, 4);
- int c2 = (Int32)(crc.TotalBytesRead & 0x00000000FFFFFFFF);
- _stream.Write(BitConverter.GetBytes(c2), 0, 4);
- }
- else
- {
- throw new ZlibException("Writing with decompression is not supported.");
- }
- }
- }
- // workitem 7159
- else if (_streamMode == StreamMode.Reader)
- {
- if (_flavor == ZlibStreamFlavor.GZIP)
- {
- if (!_wantCompress)
- {
- // workitem 8501: handle edge case (decompress empty stream)
- if (_z.TotalBytesOut == 0L)
- {
- return;
- }
-
- // Read and potentially verify the GZIP trailer:
- // CRC32 and size mod 2^32
- byte[] trailer = new byte[8];
-
- // workitems 8679 & 12554
- if (_z.AvailableBytesIn < 8)
- {
- // Make sure we have read to the end of the stream
- Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, _z.AvailableBytesIn);
- int bytesNeeded = 8 - _z.AvailableBytesIn;
- int bytesRead = _stream.Read(trailer,
- _z.AvailableBytesIn,
- bytesNeeded);
- if (bytesNeeded != bytesRead)
- {
- throw new ZlibException(String.Format("Missing or incomplete GZIP trailer. Expected 8 bytes, got {0}.",
- _z.AvailableBytesIn + bytesRead));
- }
- }
- else
- {
- Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, trailer.Length);
- }
-
- Int32 crc32_expected = BitConverter.ToInt32(trailer, 0);
- Int32 crc32_actual = crc.Crc32Result;
- Int32 isize_expected = BitConverter.ToInt32(trailer, 4);
- Int32 isize_actual = (Int32)(_z.TotalBytesOut & 0x00000000FFFFFFFF);
-
- if (crc32_actual != crc32_expected)
- {
- throw new ZlibException(String.Format("Bad CRC32 in GZIP trailer. (actual({0:X8})!=expected({1:X8}))", crc32_actual, crc32_expected));
- }
-
- if (isize_actual != isize_expected)
- {
- throw new ZlibException(String.Format("Bad size in GZIP trailer. (actual({0})!=expected({1}))", isize_actual, isize_expected));
- }
- }
- else
- {
- throw new ZlibException("Reading with compression is not supported.");
- }
- }
- }
- }
-
- private void end()
- {
- if (z == null)
- {
- return;
- }
- if (_wantCompress)
- {
- _z.EndDeflate();
- }
- else
- {
- _z.EndInflate();
- }
- _z = null;
- }
-
-
- public override void Close()
- {
- if (_stream == null)
- {
- return;
- }
- try
- {
- finish();
- }
- finally
- {
- end();
- if (!_leaveOpen)
- {
- _stream.Close();
- }
- _stream = null;
- }
- }
-
- public override void Flush()
- {
- _stream.Flush();
- }
-
- public override System.Int64 Seek(System.Int64 offset, System.IO.SeekOrigin origin)
- {
- throw new NotImplementedException();
- //_outStream.Seek(offset, origin);
- }
- public override void SetLength(System.Int64 value)
- {
- _stream.SetLength(value);
- }
-
-#if NOT
- public int Read()
- {
- if (Read(_buf1, 0, 1) == 0)
- return 0;
- // calculate CRC after reading
- if (crc!=null)
- crc.SlurpBlock(_buf1,0,1);
- return (_buf1[0] & 0xFF);
- }
-#endif
-
- private bool nomoreinput = false;
-
- private string ReadZeroTerminatedString()
- {
- var list = new System.Collections.Generic.List();
- bool done = false;
- do
- {
- // workitem 7740
- int n = _stream.Read(_buf1, 0, 1);
- if (n != 1)
- throw new ZlibException("Unexpected EOF reading GZIP header.");
- else
- {
- if (_buf1[0] == 0)
- {
- done = true;
- }
- else
- {
- list.Add(_buf1[0]);
- }
- }
- } while (!done);
- byte[] a = list.ToArray();
- return GZipStream.iso8859dash1.GetString(a, 0, a.Length);
- }
-
- private int _ReadAndValidateGzipHeader()
- {
- int totalBytesRead = 0;
- // read the header on the first read
- byte[] header = new byte[10];
- int n = _stream.Read(header, 0, header.Length);
-
- // workitem 8501: handle edge case (decompress empty stream)
- if (n == 0)
- {
- return 0;
- }
-
- if (n != 10)
- {
- throw new ZlibException("Not a valid GZIP stream.");
- }
-
- if (header[0] != 0x1F || header[1] != 0x8B || header[2] != 8)
- {
- throw new ZlibException("Bad GZIP header.");
- }
-
- Int32 timet = BitConverter.ToInt32(header, 4);
- _GzipMtime = GZipStream._unixEpoch.AddSeconds(timet);
- totalBytesRead += n;
- if ((header[3] & 0x04) == 0x04)
- {
- // read and discard extra field
- n = _stream.Read(header, 0, 2); // 2-byte length field
- totalBytesRead += n;
-
- Int16 extraLength = (Int16)(header[0] + header[1] * 256);
- byte[] extra = new byte[extraLength];
- n = _stream.Read(extra, 0, extra.Length);
- if (n != extraLength)
- {
- throw new ZlibException("Unexpected end-of-file reading GZIP header.");
- }
- totalBytesRead += n;
- }
- if ((header[3] & 0x08) == 0x08)
- {
- _GzipFileName = ReadZeroTerminatedString();
- }
- if ((header[3] & 0x10) == 0x010)
- {
- _GzipComment = ReadZeroTerminatedString();
- }
- if ((header[3] & 0x02) == 0x02)
- {
- Read(_buf1, 0, 1); // CRC16, ignore
- }
-
- return totalBytesRead;
- }
-
- public override System.Int32 Read(System.Byte[] buffer, System.Int32 offset, System.Int32 count)
- {
- // According to MS documentation, any implementation of the IO.Stream.Read function must:
- // (a) throw an exception if offset & count reference an invalid part of the buffer,
- // or if count < 0, or if buffer is null
- // (b) return 0 only upon EOF, or if count = 0
- // (c) if not EOF, then return at least 1 byte, up to bytes
-
- if (_streamMode == StreamMode.Undefined)
- {
- if (!this._stream.CanRead)
- {
- throw new ZlibException("The stream is not readable.");
- }// for the first read, set up some controls.
- _streamMode = StreamMode.Reader;
- // (The first reference to _z goes through the private accessor which
- // may initialize it.)
- z.AvailableBytesIn = 0;
- if (_flavor == ZlibStreamFlavor.GZIP)
- {
- _gzipHeaderByteCount = _ReadAndValidateGzipHeader();
- // workitem 8501: handle edge case (decompress empty stream)
- if (_gzipHeaderByteCount == 0)
- {
- return 0;
- }
- }
- }
-
- if (_streamMode != StreamMode.Reader)
- {
- throw new ZlibException("Cannot Read after Writing.");
- }
-
- if (count == 0)
- {
- return 0;
- }
- if (nomoreinput && _wantCompress)
- {
- return 0; // workitem 8557
- }
- if (buffer == null)
- {
- throw new ArgumentNullException("buffer");
- }
- if (count < 0)
- {
- throw new ArgumentOutOfRangeException("count");
- }
- if (offset < buffer.GetLowerBound(0))
- {
- throw new ArgumentOutOfRangeException("offset");
- }
- if ((offset + count) > buffer.GetLength(0))
- {
- throw new ArgumentOutOfRangeException("count");
- }
-
- int rc = 0;
-
- // set up the output of the deflate/inflate codec:
- _z.OutputBuffer = buffer;
- _z.NextOut = offset;
- _z.AvailableBytesOut = count;
-
- // This is necessary in case _workingBuffer has been resized. (new byte[])
- // (The first reference to _workingBuffer goes through the private accessor which
- // may initialize it.)
- _z.InputBuffer = workingBuffer;
-
- do
- {
- // need data in _workingBuffer in order to deflate/inflate. Here, we check if we have any.
- if ((_z.AvailableBytesIn == 0) && (!nomoreinput))
- {
- // No data available, so try to Read data from the captive stream.
- _z.NextIn = 0;
- _z.AvailableBytesIn = _stream.Read(_workingBuffer, 0, _workingBuffer.Length);
- if (_z.AvailableBytesIn == 0)
- {
- nomoreinput = true;
- }
- }
- // we have data in InputBuffer; now compress or decompress as appropriate
- rc = (_wantCompress)
- ? _z.Deflate(_flushMode)
- : _z.Inflate(_flushMode);
-
- if (nomoreinput && (rc == ZlibConstants.Z_BUF_ERROR))
- {
- return 0;
- }
-
- if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
- {
- throw new ZlibException(String.Format("{0}flating: rc={1} msg={2}", (_wantCompress ? "de" : "in"), rc, _z.Message));
- }
-
- if ((nomoreinput || rc == ZlibConstants.Z_STREAM_END) && (_z.AvailableBytesOut == count))
- {
- break; // nothing more to read
- }
- }
- //while (_z.AvailableBytesOut == count && rc == ZlibConstants.Z_OK);
- while (_z.AvailableBytesOut > 0 && !nomoreinput && rc == ZlibConstants.Z_OK);
-
- // workitem 8557
- // is there more room in output?
- if (_z.AvailableBytesOut > 0)
- {
- if (rc == ZlibConstants.Z_OK && _z.AvailableBytesIn == 0)
- {
- // deferred
- }
-
- // are we completely done reading?
- if (nomoreinput)
- {
- // and in compression?
- if (_wantCompress)
- {
- // no more input data available; therefore we flush to
- // try to complete the read
- rc = _z.Deflate(FlushType.Finish);
-
- if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
- {
- throw new ZlibException(String.Format("Deflating: rc={0} msg={1}", rc, _z.Message));
- } }
- }
- }
-
- rc = (count - _z.AvailableBytesOut);
-
- // calculate CRC after reading
- if (crc != null)
- {
- crc.SlurpBlock(buffer, offset, rc);
- }
-
- return rc;
- }
-
- public override System.Boolean CanRead
- {
- get { return this._stream.CanRead; }
- }
-
- public override System.Boolean CanSeek
- {
- get { return this._stream.CanSeek; }
- }
-
- public override System.Boolean CanWrite
- {
- get { return this._stream.CanWrite; }
- }
-
- public override System.Int64 Length
- {
- get { return _stream.Length; }
- }
-
- public override long Position
- {
- get { throw new NotImplementedException(); }
- set { throw new NotImplementedException(); }
- }
-
- internal enum StreamMode
- {
- Writer,
- Reader,
- Undefined,
- }
-
- public static void CompressString(String s, Stream compressor)
- {
- byte[] uncompressed = System.Text.Encoding.UTF8.GetBytes(s);
- using (compressor)
- {
- compressor.Write(uncompressed, 0, uncompressed.Length);
- }
- }
-
- public static void CompressBuffer(byte[] b, Stream compressor)
- {
- // workitem 8460
- using (compressor)
- {
- compressor.Write(b, 0, b.Length);
- }
- }
-
- public static String UncompressString(byte[] compressed, Stream decompressor)
- {
- // workitem 8460
- byte[] working = new byte[1024];
- var encoding = System.Text.Encoding.UTF8;
- using (var output = new MemoryStream())
- {
- using (decompressor)
- {
- int n;
- while ((n = decompressor.Read(working, 0, working.Length)) != 0)
- {
- output.Write(working, 0, n);
- }
- }
-
- // reset to allow read from start
- output.Seek(0, SeekOrigin.Begin);
- var sr = new StreamReader(output, encoding);
- return sr.ReadToEnd();
- }
- }
-
- public static byte[] UncompressBuffer(byte[] compressed, Stream decompressor)
- {
- // workitem 8460
- byte[] working = new byte[1024];
- using (var output = new MemoryStream())
- {
- using (decompressor)
- {
- int n;
- while ((n = decompressor.Read(working, 0, working.Length)) != 0)
- {
- output.Write(working, 0, n);
- }
- }
- return output.ToArray();
- }
- }
- }
-}
diff --git a/SabreTools.Library/External/Zlib/ZlibCodec.cs b/SabreTools.Library/External/Zlib/ZlibCodec.cs
deleted file mode 100644
index 34206950..00000000
--- a/SabreTools.Library/External/Zlib/ZlibCodec.cs
+++ /dev/null
@@ -1,745 +0,0 @@
-// ZlibCodec.cs
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
-// All rights reserved.
-//
-// This code module is part of DotNetZip, a zipfile class library.
-//
-// ------------------------------------------------------------------
-//
-// This code is licensed under the Microsoft Public License.
-// See the file License.txt for the license details.
-// More info on: http://dotnetzip.codeplex.com
-//
-// ------------------------------------------------------------------
-//
-// last saved (in emacs):
-// Time-stamp: <2009-November-03 15:40:51>
-//
-// ------------------------------------------------------------------
-//
-// This module defines a Codec for ZLIB compression and
-// decompression. This code extends code that was based the jzlib
-// implementation of zlib, but this code is completely novel. The codec
-// class is new, and encapsulates some behaviors that are new, and some
-// that were present in other classes in the jzlib code base. In
-// keeping with the license for jzlib, the copyright to the jzlib code
-// is included below.
-//
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in
-// the documentation and/or other materials provided with the distribution.
-//
-// 3. The names of the authors may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
-// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
-// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
-// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// -----------------------------------------------------------------------
-//
-// This program is based on zlib-1.1.3; credit to authors
-// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
-// and contributors of zlib.
-//
-// -----------------------------------------------------------------------
-
-using System;
-using System.Runtime.InteropServices;
-
-namespace Ionic.Zlib
-{
- ///
- /// Encoder and Decoder for ZLIB and DEFLATE (IETF RFC1950 and RFC1951).
- ///
- ///
- ///
- /// This class compresses and decompresses data according to the Deflate algorithm
- /// and optionally, the ZLIB format, as documented in RFC 1950 - ZLIB and RFC 1951 - DEFLATE.
- ///
- [Guid("ebc25cf6-9120-4283-b972-0e5520d0000D")]
- [System.Runtime.InteropServices.ComVisible(true)]
-#if !NETCF
- [System.Runtime.InteropServices.ClassInterface(System.Runtime.InteropServices.ClassInterfaceType.AutoDispatch)]
-#endif
- sealed public class ZlibCodec
- {
- ///
- /// The buffer from which data is taken.
- ///
- public byte[] InputBuffer;
-
- ///
- /// An index into the InputBuffer array, indicating where to start reading.
- ///
- public int NextIn;
-
- ///
- /// The number of bytes available in the InputBuffer, starting at NextIn.
- ///
- ///
- /// Generally you should set this to InputBuffer.Length before the first Inflate() or Deflate() call.
- /// The class will update this number as calls to Inflate/Deflate are made.
- ///
- public int AvailableBytesIn;
-
- ///
- /// Total number of bytes read so far, through all calls to Inflate()/Deflate().
- ///
- public long TotalBytesIn;
-
- ///
- /// Buffer to store output data.
- ///
- public byte[] OutputBuffer;
-
- ///
- /// An index into the OutputBuffer array, indicating where to start writing.
- ///
- public int NextOut;
-
- ///
- /// The number of bytes available in the OutputBuffer, starting at NextOut.
- ///
- ///
- /// Generally you should set this to OutputBuffer.Length before the first Inflate() or Deflate() call.
- /// The class will update this number as calls to Inflate/Deflate are made.
- ///
- public int AvailableBytesOut;
-
- ///
- /// Total number of bytes written to the output so far, through all calls to Inflate()/Deflate().
- ///
- public long TotalBytesOut;
-
- ///
- /// used for diagnostics, when something goes wrong!
- ///
- public System.String Message;
-
- internal DeflateManager dstate;
- internal InflateManager istate;
-
- internal uint _Adler32;
-
- ///
- /// The compression level to use in this codec. Useful only in compression mode.
- ///
- public CompressionLevel CompressLevel = CompressionLevel.Default;
-
- ///
- /// The number of Window Bits to use.
- ///
- ///
- /// This gauges the size of the sliding window, and hence the
- /// compression effectiveness as well as memory consumption. It's best to just leave this
- /// setting alone if you don't know what it is. The maximum value is 15 bits, which implies
- /// a 32k window.
- ///
- public int WindowBits = ZlibConstants.WindowBitsDefault;
-
- ///
- /// The compression strategy to use.
- ///
- ///
- /// This is only effective in compression. The theory offered by ZLIB is that different
- /// strategies could potentially produce significant differences in compression behavior
- /// for different data sets. Unfortunately I don't have any good recommendations for how
- /// to set it differently. When I tested changing the strategy I got minimally different
- /// compression performance. It's best to leave this property alone if you don't have a
- /// good feel for it. Or, you may want to produce a test harness that runs through the
- /// different strategy options and evaluates them on different file types. If you do that,
- /// let me know your results.
- ///
- public CompressionStrategy Strategy = CompressionStrategy.Default;
-
- ///
- /// The Adler32 checksum on the data transferred through the codec so far. You probably don't need to look at this.
- ///
- public int Adler32 { get { return (int)_Adler32; } }
-
- ///
- /// Create a ZlibCodec.
- ///
- ///
- /// If you use this default constructor, you will later have to explicitly call
- /// InitializeInflate() or InitializeDeflate() before using the ZlibCodec to compress
- /// or decompress.
- ///
- public ZlibCodec() { }
-
- ///
- /// Create a ZlibCodec that either compresses or decompresses.
- ///
- ///
- /// Indicates whether the codec should compress (deflate) or decompress (inflate).
- ///
- public ZlibCodec(CompressionMode mode)
- {
- if (mode == CompressionMode.Compress)
- {
- int rc = InitializeDeflate();
- if (rc != ZlibConstants.Z_OK)
- {
- throw new ZlibException("Cannot initialize for deflate.");
- }
- }
- else if (mode == CompressionMode.Decompress)
- {
- int rc = InitializeInflate();
- if (rc != ZlibConstants.Z_OK)
- {
- throw new ZlibException("Cannot initialize for inflate.");
- }
- }
- else throw new ZlibException("Invalid ZlibStreamFlavor.");
- }
-
- ///
- /// Initialize the inflation state.
- ///
- ///
- /// It is not necessary to call this before using the ZlibCodec to inflate data;
- /// It is implicitly called when you call the constructor.
- ///
- /// Z_OK if everything goes well.
- public int InitializeInflate()
- {
- return InitializeInflate(this.WindowBits);
- }
-
- ///
- /// Initialize the inflation state with an explicit flag to
- /// govern the handling of RFC1950 header bytes.
- ///
- ///
- ///
- /// By default, the ZLIB header defined in RFC 1950 is expected. If
- /// you want to read a zlib stream you should specify true for
- /// expectRfc1950Header. If you have a deflate stream, you will want to specify
- /// false. It is only necessary to invoke this initializer explicitly if you
- /// want to specify false.
- ///
- ///
- /// whether to expect an RFC1950 header byte
- /// pair when reading the stream of data to be inflated.
- ///
- /// Z_OK if everything goes well.
- public int InitializeInflate(bool expectRfc1950Header)
- {
- return InitializeInflate(this.WindowBits, expectRfc1950Header);
- }
-
- ///
- /// Initialize the ZlibCodec for inflation, with the specified number of window bits.
- ///
- /// The number of window bits to use. If you need to ask what that is,
- /// then you shouldn't be calling this initializer.
- /// Z_OK if all goes well.
- public int InitializeInflate(int windowBits)
- {
- this.WindowBits = windowBits;
- return InitializeInflate(windowBits, true);
- }
-
- ///
- /// Initialize the inflation state with an explicit flag to govern the handling of
- /// RFC1950 header bytes.
- ///
- ///
- ///
- /// If you want to read a zlib stream you should specify true for
- /// expectRfc1950Header. In this case, the library will expect to find a ZLIB
- /// header, as defined in RFC
- /// 1950, in the compressed stream. If you will be reading a DEFLATE or
- /// GZIP stream, which does not have such a header, you will want to specify
- /// false.
- ///
- ///
- /// whether to expect an RFC1950 header byte pair when reading
- /// the stream of data to be inflated.
- /// The number of window bits to use. If you need to ask what that is,
- /// then you shouldn't be calling this initializer.
- /// Z_OK if everything goes well.
- public int InitializeInflate(int windowBits, bool expectRfc1950Header)
- {
- this.WindowBits = windowBits;
- if (dstate != null)
- {
- throw new ZlibException("You may not call InitializeInflate() after calling InitializeDeflate().");
- }
- istate = new InflateManager(expectRfc1950Header);
- return istate.Initialize(this, windowBits);
- }
-
- ///
- /// Inflate the data in the InputBuffer, placing the result in the OutputBuffer.
- ///
- ///
- /// You must have set InputBuffer and OutputBuffer, NextIn and NextOut, and AvailableBytesIn and
- /// AvailableBytesOut before calling this method.
- ///
- ///
- ///
- /// private void InflateBuffer()
- /// {
- /// int bufferSize = 1024;
- /// byte[] buffer = new byte[bufferSize];
- /// ZlibCodec decompressor = new ZlibCodec();
- ///
- /// Console.WriteLine("\n============================================");
- /// Console.WriteLine("Size of Buffer to Inflate: {0} bytes.", CompressedBytes.Length);
- /// MemoryStream ms = new MemoryStream(DecompressedBytes);
- ///
- /// int rc = decompressor.InitializeInflate();
- ///
- /// decompressor.InputBuffer = CompressedBytes;
- /// decompressor.NextIn = 0;
- /// decompressor.AvailableBytesIn = CompressedBytes.Length;
- ///
- /// decompressor.OutputBuffer = buffer;
- ///
- /// // pass 1: inflate
- /// do
- /// {
- /// decompressor.NextOut = 0;
- /// decompressor.AvailableBytesOut = buffer.Length;
- /// rc = decompressor.Inflate(FlushType.None);
- ///
- /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
- /// throw new Exception("inflating: " + decompressor.Message);
- ///
- /// ms.Write(decompressor.OutputBuffer, 0, buffer.Length - decompressor.AvailableBytesOut);
- /// }
- /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0);
- ///
- /// // pass 2: finish and flush
- /// do
- /// {
- /// decompressor.NextOut = 0;
- /// decompressor.AvailableBytesOut = buffer.Length;
- /// rc = decompressor.Inflate(FlushType.Finish);
- ///
- /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
- /// throw new Exception("inflating: " + decompressor.Message);
- ///
- /// if (buffer.Length - decompressor.AvailableBytesOut > 0)
- /// ms.Write(buffer, 0, buffer.Length - decompressor.AvailableBytesOut);
- /// }
- /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0);
- ///
- /// decompressor.EndInflate();
- /// }
- ///
- ///
- ///
- /// The flush to use when inflating.
- /// Z_OK if everything goes well.
- public int Inflate(FlushType flush)
- {
- if (istate == null)
- {
- throw new ZlibException("No Inflate State!");
- }
- return istate.Inflate(flush);
- }
-
- ///
- /// Ends an inflation session.
- ///
- ///
- /// Call this after successively calling Inflate(). This will cause all buffers to be flushed.
- /// After calling this you cannot call Inflate() without a intervening call to one of the
- /// InitializeInflate() overloads.
- ///
- /// Z_OK if everything goes well.
- public int EndInflate()
- {
- if (istate == null)
- {
- throw new ZlibException("No Inflate State!");
- } int ret = istate.End();
- istate = null;
- return ret;
- }
-
- ///
- /// I don't know what this does!
- ///
- /// Z_OK if everything goes well.
- public int SyncInflate()
- {
- if (istate == null)
- {
- throw new ZlibException("No Inflate State!");
- }
- return istate.Sync();
- }
-
- ///
- /// Initialize the ZlibCodec for deflation operation.
- ///
- ///
- /// The codec will use the MAX window bits and the default level of compression.
- ///
- ///
- ///
- /// int bufferSize = 40000;
- /// byte[] CompressedBytes = new byte[bufferSize];
- /// byte[] DecompressedBytes = new byte[bufferSize];
- ///
- /// ZlibCodec compressor = new ZlibCodec();
- ///
- /// compressor.InitializeDeflate(CompressionLevel.Default);
- ///
- /// compressor.InputBuffer = System.Text.ASCIIEncoding.ASCII.GetBytes(TextToCompress);
- /// compressor.NextIn = 0;
- /// compressor.AvailableBytesIn = compressor.InputBuffer.Length;
- ///
- /// compressor.OutputBuffer = CompressedBytes;
- /// compressor.NextOut = 0;
- /// compressor.AvailableBytesOut = CompressedBytes.Length;
- ///
- /// while (compressor.TotalBytesIn != TextToCompress.Length && compressor.TotalBytesOut < bufferSize)
- /// {
- /// compressor.Deflate(FlushType.None);
- /// }
- ///
- /// while (true)
- /// {
- /// int rc= compressor.Deflate(FlushType.Finish);
- /// if (rc == ZlibConstants.Z_STREAM_END) break;
- /// }
- ///
- /// compressor.EndDeflate();
- ///
- ///
- ///
- /// Z_OK if all goes well. You generally don't need to check the return code.
- public int InitializeDeflate()
- {
- return _InternalInitializeDeflate(true);
- }
-
- ///
- /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel.
- ///
- ///
- /// The codec will use the maximum window bits (15) and the specified
- /// CompressionLevel. It will emit a ZLIB stream as it compresses.
- ///
- /// The compression level for the codec.
- /// Z_OK if all goes well.
- public int InitializeDeflate(CompressionLevel level)
- {
- this.CompressLevel = level;
- return _InternalInitializeDeflate(true);
- }
-
- ///
- /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel,
- /// and the explicit flag governing whether to emit an RFC1950 header byte pair.
- ///
- ///
- /// The codec will use the maximum window bits (15) and the specified CompressionLevel.
- /// If you want to generate a zlib stream, you should specify true for
- /// wantRfc1950Header. In this case, the library will emit a ZLIB
- /// header, as defined in RFC
- /// 1950, in the compressed stream.
- ///
- /// The compression level for the codec.
- /// whether to emit an initial RFC1950 byte pair in the compressed stream.
- /// Z_OK if all goes well.
- public int InitializeDeflate(CompressionLevel level, bool wantRfc1950Header)
- {
- this.CompressLevel = level;
- return _InternalInitializeDeflate(wantRfc1950Header);
- }
-
- ///
- /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel,
- /// and the specified number of window bits.
- ///
- ///
- /// The codec will use the specified number of window bits and the specified CompressionLevel.
- ///
- /// The compression level for the codec.
- /// the number of window bits to use. If you don't know what this means, don't use this method.
- /// Z_OK if all goes well.
- public int InitializeDeflate(CompressionLevel level, int bits)
- {
- this.CompressLevel = level;
- this.WindowBits = bits;
- return _InternalInitializeDeflate(true);
- }
-
- ///
- /// Initialize the ZlibCodec for deflation operation, using the specified
- /// CompressionLevel, the specified number of window bits, and the explicit flag
- /// governing whether to emit an RFC1950 header byte pair.
- ///
- ///
- /// The compression level for the codec.
- /// whether to emit an initial RFC1950 byte pair in the compressed stream.
- /// the number of window bits to use. If you don't know what this means, don't use this method.
- /// Z_OK if all goes well.
- public int InitializeDeflate(CompressionLevel level, int bits, bool wantRfc1950Header)
- {
- this.CompressLevel = level;
- this.WindowBits = bits;
- return _InternalInitializeDeflate(wantRfc1950Header);
- }
-
- private int _InternalInitializeDeflate(bool wantRfc1950Header)
- {
- if (istate != null)
- {
- throw new ZlibException("You may not call InitializeDeflate() after calling InitializeInflate().");
- }
- dstate = new DeflateManager();
- dstate.WantRfc1950HeaderBytes = wantRfc1950Header;
-
- return dstate.Initialize(this, this.CompressLevel, this.WindowBits, this.Strategy);
- }
-
- ///
- /// Deflate one batch of data.
- ///
- ///
- /// You must have set InputBuffer and OutputBuffer before calling this method.
- ///
- ///
- ///
- /// private void DeflateBuffer(CompressionLevel level)
- /// {
- /// int bufferSize = 1024;
- /// byte[] buffer = new byte[bufferSize];
- /// ZlibCodec compressor = new ZlibCodec();
- ///
- /// Console.WriteLine("\n============================================");
- /// Console.WriteLine("Size of Buffer to Deflate: {0} bytes.", UncompressedBytes.Length);
- /// MemoryStream ms = new MemoryStream();
- ///
- /// int rc = compressor.InitializeDeflate(level);
- ///
- /// compressor.InputBuffer = UncompressedBytes;
- /// compressor.NextIn = 0;
- /// compressor.AvailableBytesIn = UncompressedBytes.Length;
- ///
- /// compressor.OutputBuffer = buffer;
- ///
- /// // pass 1: deflate
- /// do
- /// {
- /// compressor.NextOut = 0;
- /// compressor.AvailableBytesOut = buffer.Length;
- /// rc = compressor.Deflate(FlushType.None);
- ///
- /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
- /// throw new Exception("deflating: " + compressor.Message);
- ///
- /// ms.Write(compressor.OutputBuffer, 0, buffer.Length - compressor.AvailableBytesOut);
- /// }
- /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0);
- ///
- /// // pass 2: finish and flush
- /// do
- /// {
- /// compressor.NextOut = 0;
- /// compressor.AvailableBytesOut = buffer.Length;
- /// rc = compressor.Deflate(FlushType.Finish);
- ///
- /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
- /// throw new Exception("deflating: " + compressor.Message);
- ///
- /// if (buffer.Length - compressor.AvailableBytesOut > 0)
- /// ms.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut);
- /// }
- /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0);
- ///
- /// compressor.EndDeflate();
- ///
- /// ms.Seek(0, SeekOrigin.Begin);
- /// CompressedBytes = new byte[compressor.TotalBytesOut];
- /// ms.Read(CompressedBytes, 0, CompressedBytes.Length);
- /// }
- ///
- ///
- /// whether to flush all data as you deflate. Generally you will want to
- /// use Z_NO_FLUSH here, in a series of calls to Deflate(), and then call EndDeflate() to
- /// flush everything.
- ///
- /// Z_OK if all goes well.
- public int Deflate(FlushType flush)
- {
- if (dstate == null)
- {
- throw new ZlibException("No Deflate State!");
- }
- return dstate.Deflate(flush);
- }
-
- ///
- /// End a deflation session.
- ///
- ///
- /// Call this after making a series of one or more calls to Deflate(). All buffers are flushed.
- ///
- /// Z_OK if all goes well.
- public int EndDeflate()
- {
- if (dstate == null)
- {
- throw new ZlibException("No Deflate State!");
- }
- // TODO: dinoch Tue, 03 Nov 2009 15:39 (test this)
- //int ret = dstate.End();
- dstate = null;
- return ZlibConstants.Z_OK; //ret;
- }
-
- ///
- /// Reset a codec for another deflation session.
- ///
- ///
- /// Call this to reset the deflation state. For example if a thread is deflating
- /// non-consecutive blocks, you can call Reset() after the Deflate(Sync) of the first
- /// block and before the next Deflate(None) of the second block.
- ///
- /// Z_OK if all goes well.
- public void ResetDeflate()
- {
- if (dstate == null)
- {
- throw new ZlibException("No Deflate State!");
- }
- dstate.Reset();
- }
-
- ///
- /// Set the CompressionStrategy and CompressionLevel for a deflation session.
- ///
- /// the level of compression to use.
- /// the strategy to use for compression.
- /// Z_OK if all goes well.
- public int SetDeflateParams(CompressionLevel level, CompressionStrategy strategy)
- {
- if (dstate == null)
- {
- throw new ZlibException("No Deflate State!");
- }
- return dstate.SetParams(level, strategy);
- }
-
- ///
- /// Set the dictionary to be used for either Inflation or Deflation.
- ///
- /// The dictionary bytes to use.
- /// Z_OK if all goes well.
- public int SetDictionary(byte[] dictionary)
- {
- if (istate != null)
- {
- return istate.SetDictionary(dictionary);
- }
-
- if (dstate != null)
- {
- return dstate.SetDictionary(dictionary);
- }
-
- throw new ZlibException("No Inflate or Deflate state!");
- }
-
- // Flush as much pending output as possible. All deflate() output goes
- // through this function so some applications may wish to modify it
- // to avoid allocating a large strm->next_out buffer and copying into it.
- // (See also read_buf()).
- internal void flush_pending()
- {
- int len = dstate.pendingCount;
-
- if (len > AvailableBytesOut)
- {
- len = AvailableBytesOut;
- }
- if (len == 0)
- {
- return;
- }
-
- if (dstate.pending.Length <= dstate.nextPending ||
- OutputBuffer.Length <= NextOut ||
- dstate.pending.Length < (dstate.nextPending + len) ||
- OutputBuffer.Length < (NextOut + len))
- {
- throw new ZlibException(String.Format("Invalid State. (pending.Length={0}, pendingCount={1})",
- dstate.pending.Length, dstate.pendingCount));
- }
-
- Array.Copy(dstate.pending, dstate.nextPending, OutputBuffer, NextOut, len);
-
- NextOut += len;
- dstate.nextPending += len;
- TotalBytesOut += len;
- AvailableBytesOut -= len;
- dstate.pendingCount -= len;
- if (dstate.pendingCount == 0)
- {
- dstate.nextPending = 0;
- }
- }
-
- // Read a new buffer from the current input stream, update the adler32
- // and total number of bytes read. All deflate() input goes through
- // this function so some applications may wish to modify it to avoid
- // allocating a large strm->next_in buffer and copying from it.
- // (See also flush_pending()).
- internal int read_buf(byte[] buf, int start, int size)
- {
- int len = AvailableBytesIn;
-
- if (len > size)
- {
- len = size;
- }
- if (len == 0)
- {
- return 0;
- }
-
- AvailableBytesIn -= len;
-
- if (dstate.WantRfc1950HeaderBytes)
- {
- _Adler32 = Adler.Adler32(_Adler32, InputBuffer, NextIn, len);
- }
- Array.Copy(InputBuffer, NextIn, buf, start, len);
- NextIn += len;
- TotalBytesIn += len;
- return len;
- }
- }
-}
\ No newline at end of file
diff --git a/SabreTools.Library/External/Zlib/ZlibStream.cs b/SabreTools.Library/External/Zlib/ZlibStream.cs
deleted file mode 100644
index a762fb68..00000000
--- a/SabreTools.Library/External/Zlib/ZlibStream.cs
+++ /dev/null
@@ -1,748 +0,0 @@
-// ZlibStream.cs
-// ------------------------------------------------------------------
-//
-// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
-// All rights reserved.
-//
-// This code module is part of DotNetZip, a zipfile class library.
-//
-// ------------------------------------------------------------------
-//
-// This code is licensed under the Microsoft Public License.
-// See the file License.txt for the license details.
-// More info on: http://dotnetzip.codeplex.com
-//
-// ------------------------------------------------------------------
-//
-// last saved (in emacs):
-// Time-stamp: <2011-July-31 14:53:33>
-//
-// ------------------------------------------------------------------
-//
-// This module defines the ZlibStream class, which is similar in idea to
-// the System.IO.Compression.DeflateStream and
-// System.IO.Compression.GZipStream classes in the .NET BCL.
-//
-// ------------------------------------------------------------------
-
-using System;
-using System.IO;
-
-namespace Ionic.Zlib
-{
- ///
- /// Represents a Zlib stream for compression or decompression.
- ///
- ///
- ///
- ///
- /// The ZlibStream is a Decorator on a . It adds ZLIB compression or decompression to any
- /// stream.
- ///
- ///
- /// Using this stream, applications can compress or decompress data via
- /// stream Read() and Write() operations. Either compresssion or
- /// decompression can occur through either reading or writing. The compression
- /// format used is ZLIB, which is documented in IETF RFC 1950, "ZLIB Compressed
- /// Data Format Specification version 3.3". This implementation of ZLIB always uses
- /// DEFLATE as the compression method. (see IETF RFC 1951, "DEFLATE
- /// Compressed Data Format Specification version 1.3.")
- ///
- ///
- /// The ZLIB format allows for varying compression methods, window sizes, and dictionaries.
- /// This implementation always uses the DEFLATE compression method, a preset dictionary,
- /// and 15 window bits by default.
- ///
- ///
- ///
- /// This class is similar to , except that it adds the
- /// RFC1950 header and trailer bytes to a compressed stream when compressing, or expects
- /// the RFC1950 header and trailer bytes when decompressing. It is also similar to the
- /// .
- ///
- ///
- ///
- ///
- public class ZlibStream : System.IO.Stream
- {
- internal ZlibBaseStream _baseStream;
- bool _disposed;
-
- ///
- /// Create a ZlibStream using the specified CompressionMode.
- ///
- ///
- ///
- ///
- /// When mode is CompressionMode.Compress, the ZlibStream
- /// will use the default compression level. The "captive" stream will be
- /// closed when the ZlibStream is closed.
- ///
- ///
- ///
- ///
- ///
- /// This example uses a ZlibStream to compress a file, and writes the
- /// compressed data to another file.
- ///
- /// using (System.IO.Stream input = System.IO.File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (var raw = System.IO.File.Create(fileToCompress + ".zlib"))
- /// {
- /// using (Stream compressor = new ZlibStream(raw, CompressionMode.Compress))
- /// {
- /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
- /// int n;
- /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
- /// {
- /// compressor.Write(buffer, 0, n);
- /// }
- /// }
- /// }
- /// }
- ///
- ///
- /// Using input As Stream = File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
- /// Using raw As FileStream = File.Create(fileToCompress & ".zlib")
- /// Using compressor As Stream = New ZlibStream(raw, CompressionMode.Compress)
- /// Dim buffer As Byte() = New Byte(4096) {}
- /// Dim n As Integer = -1
- /// Do While (n <> 0)
- /// If (n > 0) Then
- /// compressor.Write(buffer, 0, n)
- /// End If
- /// n = input.Read(buffer, 0, buffer.Length)
- /// Loop
- /// End Using
- /// End Using
- /// End Using
- ///
- ///
- ///
- /// The stream which will be read or written.
- /// Indicates whether the ZlibStream will compress or decompress.
- public ZlibStream(System.IO.Stream stream, CompressionMode mode)
- : this(stream, mode, CompressionLevel.Default, false)
- {
- }
-
- ///
- /// Create a ZlibStream using the specified CompressionMode and
- /// the specified CompressionLevel.
- ///
- ///
- ///
- ///
- ///
- /// When mode is CompressionMode.Decompress, the level parameter is ignored.
- /// The "captive" stream will be closed when the ZlibStream is closed.
- ///
- ///
- ///
- ///
- ///
- /// This example uses a ZlibStream to compress data from a file, and writes the
- /// compressed data to another file.
- ///
- ///
- /// using (System.IO.Stream input = System.IO.File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (var raw = System.IO.File.Create(fileToCompress + ".zlib"))
- /// {
- /// using (Stream compressor = new ZlibStream(raw,
- /// CompressionMode.Compress,
- /// CompressionLevel.BestCompression))
- /// {
- /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
- /// int n;
- /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
- /// {
- /// compressor.Write(buffer, 0, n);
- /// }
- /// }
- /// }
- /// }
- ///
- ///
- ///
- /// Using input As Stream = File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
- /// Using raw As FileStream = File.Create(fileToCompress & ".zlib")
- /// Using compressor As Stream = New ZlibStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression)
- /// Dim buffer As Byte() = New Byte(4096) {}
- /// Dim n As Integer = -1
- /// Do While (n <> 0)
- /// If (n > 0) Then
- /// compressor.Write(buffer, 0, n)
- /// End If
- /// n = input.Read(buffer, 0, buffer.Length)
- /// Loop
- /// End Using
- /// End Using
- /// End Using
- ///
- ///
- ///
- /// The stream to be read or written while deflating or inflating.
- /// Indicates whether the ZlibStream will compress or decompress.
- /// A tuning knob to trade speed for effectiveness.
- public ZlibStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level)
- : this(stream, mode, level, false)
- {
- }
-
- ///
- /// Create a ZlibStream using the specified CompressionMode, and
- /// explicitly specify whether the captive stream should be left open after
- /// Deflation or Inflation.
- ///
- ///
- ///
- ///
- ///
- /// When mode is CompressionMode.Compress, the ZlibStream will use
- /// the default compression level.
- ///
- ///
- ///
- /// This constructor allows the application to request that the captive stream
- /// remain open after the deflation or inflation occurs. By default, after
- /// Close() is called on the stream, the captive stream is also
- /// closed. In some cases this is not desired, for example if the stream is a
- /// that will be re-read after
- /// compression. Specify true for the parameter to leave the stream
- /// open.
- ///
- ///
- ///
- /// See the other overloads of this constructor for example code.
- ///
- ///
- ///
- ///
- /// The stream which will be read or written. This is called the
- /// "captive" stream in other places in this documentation.
- /// Indicates whether the ZlibStream will compress or decompress.
- /// true if the application would like the stream to remain
- /// open after inflation/deflation.
- public ZlibStream(System.IO.Stream stream, CompressionMode mode, bool leaveOpen)
- : this(stream, mode, CompressionLevel.Default, leaveOpen)
- {
- }
-
- ///
- /// Create a ZlibStream using the specified CompressionMode
- /// and the specified CompressionLevel, and explicitly specify
- /// whether the stream should be left open after Deflation or Inflation.
- ///
- ///
- ///
- ///
- ///
- /// This constructor allows the application to request that the captive
- /// stream remain open after the deflation or inflation occurs. By
- /// default, after Close() is called on the stream, the captive
- /// stream is also closed. In some cases this is not desired, for example
- /// if the stream is a that will be
- /// re-read after compression. Specify true for the parameter to leave the stream open.
- ///
- ///
- ///
- /// When mode is CompressionMode.Decompress, the level parameter is
- /// ignored.
- ///
- ///
- ///
- ///
- ///
- ///
- /// This example shows how to use a ZlibStream to compress the data from a file,
- /// and store the result into another file. The filestream remains open to allow
- /// additional data to be written to it.
- ///
- ///
- /// using (var output = System.IO.File.Create(fileToCompress + ".zlib"))
- /// {
- /// using (System.IO.Stream input = System.IO.File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite))
- /// {
- /// using (Stream compressor = new ZlibStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, true))
- /// {
- /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
- /// int n;
- /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
- /// {
- /// compressor.Write(buffer, 0, n);
- /// }
- /// }
- /// }
- /// // can write additional data to the output stream here
- /// }
- ///
- ///
- /// Using output As FileStream = File.Create(fileToCompress & ".zlib")
- /// Using input As Stream = File.Open(fileToCompress, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)
- /// Using compressor As Stream = New ZlibStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, True)
- /// Dim buffer As Byte() = New Byte(4096) {}
- /// Dim n As Integer = -1
- /// Do While (n <> 0)
- /// If (n > 0) Then
- /// compressor.Write(buffer, 0, n)
- /// End If
- /// n = input.Read(buffer, 0, buffer.Length)
- /// Loop
- /// End Using
- /// End Using
- /// ' can write additional data to the output stream here.
- /// End Using
- ///
- ///
- ///
- /// The stream which will be read or written.
- ///
- /// Indicates whether the ZlibStream will compress or decompress.
- ///
- ///
- /// true if the application would like the stream to remain open after
- /// inflation/deflation.
- ///
- ///
- ///
- /// A tuning knob to trade speed for effectiveness. This parameter is
- /// effective only when mode is CompressionMode.Compress.
- ///
- public ZlibStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
- {
- _baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen);
- }
-
- #region Zlib properties
-
- ///
- /// This property sets the flush behavior on the stream.
- /// Sorry, though, not sure exactly how to describe all the various settings.
- ///
- virtual public FlushType FlushMode
- {
- get { return (this._baseStream._flushMode); }
- set
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("ZlibStream");
- }
- this._baseStream._flushMode = value;
- }
- }
-
- ///
- /// The size of the working buffer for the compression codec.
- ///
- ///
- ///
- ///
- /// The working buffer is used for all stream operations. The default size is
- /// 1024 bytes. The minimum size is 128 bytes. You may get better performance
- /// with a larger buffer. Then again, you might not. You would have to test
- /// it.
- ///
- ///
- ///
- /// Set this before the first call to Read() or Write() on the
- /// stream. If you try to set it afterwards, it will throw.
- ///
- ///
- public int BufferSize
- {
- get
- {
- return this._baseStream._bufferSize;
- }
- set
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("ZlibStream");
- }
- if (this._baseStream._workingBuffer != null)
- {
- throw new ZlibException("The working buffer is already set.");
- }
- if (value < ZlibConstants.WorkingBufferSizeMin)
- {
- throw new ZlibException(String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, ZlibConstants.WorkingBufferSizeMin));
- }
- this._baseStream._bufferSize = value;
- }
- }
-
- /// Returns the total number of bytes input so far.
- virtual public long TotalIn
- {
- get { return this._baseStream._z.TotalBytesIn; }
- }
-
- /// Returns the total number of bytes output so far.
- virtual public long TotalOut
- {
- get { return this._baseStream._z.TotalBytesOut; }
- }
-
- #endregion
-
- #region System.IO.Stream methods
-
- ///
- /// Dispose the stream.
- ///
- ///
- ///
- /// This may or may not result in a Close() call on the captive
- /// stream. See the constructors that have a leaveOpen parameter
- /// for more information.
- ///
- ///
- /// This method may be invoked in two distinct scenarios. If disposing
- /// == true, the method has been called directly or indirectly by a
- /// user's code, for example via the public Dispose() method. In this
- /// case, both managed and unmanaged resources can be referenced and
- /// disposed. If disposing == false, the method has been called by the
- /// runtime from inside the object finalizer and this method should not
- /// reference other objects; in that case only unmanaged resources must
- /// be referenced or disposed.
- ///
- ///
- ///
- /// indicates whether the Dispose method was invoked by user code.
- ///
- protected override void Dispose(bool disposing)
- {
- try
- {
- if (!_disposed)
- {
- if (disposing && (this._baseStream != null))
- {
- this._baseStream.Close();
- }
- _disposed = true;
- }
- }
- finally
- {
- base.Dispose(disposing);
- }
- }
-
- ///
- /// Indicates whether the stream can be read.
- ///
- ///
- /// The return value depends on whether the captive stream supports reading.
- ///
- public override bool CanRead
- {
- get
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("ZlibStream");
- }
- return _baseStream._stream.CanRead;
- }
- }
-
- ///
- /// Indicates whether the stream supports Seek operations.
- ///
- ///
- /// Always returns false.
- ///
- public override bool CanSeek
- {
- get { return false; }
- }
-
- ///
- /// Indicates whether the stream can be written.
- ///
- ///
- /// The return value depends on whether the captive stream supports writing.
- ///
- public override bool CanWrite
- {
- get
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("ZlibStream");
- }
- return _baseStream._stream.CanWrite;
- }
- }
-
- ///
- /// Flush the stream.
- ///
- public override void Flush()
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("ZlibStream");
- }
- _baseStream.Flush();
- }
-
- ///
- /// Reading this property always throws a .
- ///
- public override long Length
- {
- get { throw new NotSupportedException(); }
- }
-
- ///
- /// The position of the stream pointer.
- ///
- ///
- ///
- /// Setting this property always throws a . Reading will return the total bytes
- /// written out, if used in writing, or the total bytes read in, if used in
- /// reading. The count may refer to compressed bytes or uncompressed bytes,
- /// depending on how you've used the stream.
- ///
- public override long Position
- {
- get
- {
- if (this._baseStream._streamMode == ZlibBaseStream.StreamMode.Writer)
- {
- return this._baseStream._z.TotalBytesOut;
- }
- if (this._baseStream._streamMode == ZlibBaseStream.StreamMode.Reader)
- {
- return this._baseStream._z.TotalBytesIn;
- }
- return 0;
- }
-
- set { throw new NotSupportedException(); }
- }
-
- ///
- /// Read data from the stream.
- ///
- ///
- ///
- ///
- ///
- /// If you wish to use the ZlibStream to compress data while reading,
- /// you can create a ZlibStream with CompressionMode.Compress,
- /// providing an uncompressed data stream. Then call Read() on that
- /// ZlibStream, and the data read will be compressed. If you wish to
- /// use the ZlibStream to decompress data while reading, you can create
- /// a ZlibStream with CompressionMode.Decompress, providing a
- /// readable compressed data stream. Then call Read() on that
- /// ZlibStream, and the data will be decompressed as it is read.
- ///
- ///
- ///
- /// A ZlibStream can be used for Read() or Write(), but
- /// not both.
- ///
- ///
- ///
- ///
- ///
- /// The buffer into which the read data should be placed.
- ///
- ///
- /// the offset within that data array to put the first byte read.
- ///
- /// the number of bytes to read.
- ///
- /// the number of bytes read
- public override int Read(byte[] buffer, int offset, int count)
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("ZlibStream");
- }
- return _baseStream.Read(buffer, offset, count);
- }
-
- ///
- /// Calling this method always throws a .
- ///
- ///
- /// The offset to seek to....
- /// IF THIS METHOD ACTUALLY DID ANYTHING.
- ///
- ///
- /// The reference specifying how to apply the offset.... IF
- /// THIS METHOD ACTUALLY DID ANYTHING.
- ///
- ///
- /// nothing. This method always throws.
- public override long Seek(long offset, System.IO.SeekOrigin origin)
- {
- throw new NotSupportedException();
- }
-
- ///
- /// Calling this method always throws a .
- ///
- ///
- /// The new value for the stream length.... IF
- /// THIS METHOD ACTUALLY DID ANYTHING.
- ///
- public override void SetLength(long value)
- {
- throw new NotSupportedException();
- }
-
- ///
- /// Write data to the stream.
- ///
- ///
- ///
- ///
- ///
- /// If you wish to use the ZlibStream to compress data while writing,
- /// you can create a ZlibStream with CompressionMode.Compress,
- /// and a writable output stream. Then call Write() on that
- /// ZlibStream, providing uncompressed data as input. The data sent to
- /// the output stream will be the compressed form of the data written. If you
- /// wish to use the ZlibStream to decompress data while writing, you
- /// can create a ZlibStream with CompressionMode.Decompress, and a
- /// writable output stream. Then call Write() on that stream,
- /// providing previously compressed data. The data sent to the output stream
- /// will be the decompressed form of the data written.
- ///
- ///
- ///
- /// A ZlibStream can be used for Read() or Write(), but not both.
- ///
- ///
- /// The buffer holding data to write to the stream.
- /// the offset within that data array to find the first byte to write.
- /// the number of bytes to write.
- public override void Write(byte[] buffer, int offset, int count)
- {
- if (_disposed)
- {
- throw new ObjectDisposedException("ZlibStream");
- }
- _baseStream.Write(buffer, offset, count);
- }
-
- #endregion
-
- ///
- /// Compress a string into a byte array using ZLIB.
- ///
- ///
- ///
- /// Uncompress it with .
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- /// A string to compress. The string will first be encoded
- /// using UTF8, then compressed.
- ///
- ///
- /// The string in compressed form
- public static byte[] CompressString(String s)
- {
- using (var ms = new MemoryStream())
- {
- Stream compressor =
- new ZlibStream(ms, CompressionMode.Compress, CompressionLevel.BestCompression);
- ZlibBaseStream.CompressString(s, compressor);
- return ms.ToArray();
- }
- }
-
- ///
- /// Compress a byte array into a new byte array using ZLIB.
- ///
- ///
- ///
- /// Uncompress it with .
- ///
- ///
- ///
- ///
- ///
- ///
- /// A buffer to compress.
- ///
- ///
- /// The data in compressed form
- public static byte[] CompressBuffer(byte[] b)
- {
- using (var ms = new MemoryStream())
- {
- Stream compressor =
- new ZlibStream( ms, CompressionMode.Compress, CompressionLevel.BestCompression );
-
- ZlibBaseStream.CompressBuffer(b, compressor);
- return ms.ToArray();
- }
- }
-
- ///
- /// Uncompress a ZLIB-compressed byte array into a single string.
- ///
- ///
- ///
- ///
- ///
- ///
- /// A buffer containing ZLIB-compressed data.
- ///
- ///
- /// The uncompressed string
- public static String UncompressString(byte[] compressed)
- {
- using (var input = new MemoryStream(compressed))
- {
- Stream decompressor =
- new ZlibStream(input, CompressionMode.Decompress);
-
- return ZlibBaseStream.UncompressString(compressed, decompressor);
- }
- }
-
- ///
- /// Uncompress a ZLIB-compressed byte array into a byte array.
- ///
- ///
- ///
- ///
- ///
- ///
- /// A buffer containing ZLIB-compressed data.
- ///
- ///
- /// The data in uncompressed form
- public static byte[] UncompressBuffer(byte[] compressed)
- {
- using (var input = new MemoryStream(compressed))
- {
- Stream decompressor =
- new ZlibStream( input, CompressionMode.Decompress );
-
- return ZlibBaseStream.UncompressBuffer(compressed, decompressor);
- }
- }
- }
-}
\ No newline at end of file
diff --git a/SabreTools.Library/FileTypes/GZipArchive.cs b/SabreTools.Library/FileTypes/GZipArchive.cs
index 0314ab23..732a4033 100644
--- a/SabreTools.Library/FileTypes/GZipArchive.cs
+++ b/SabreTools.Library/FileTypes/GZipArchive.cs
@@ -20,7 +20,9 @@ using MemoryStream = System.IO.MemoryStream;
using SeekOrigin = System.IO.SeekOrigin;
using Stream = System.IO.Stream;
#endif
-using Ionic.Zlib;
+using Compress;
+using Compress.gZip;
+using Compress.ZipFile.ZLib;
namespace SabreTools.Library.FileTypes
{
@@ -72,12 +74,15 @@ namespace SabreTools.Library.FileTypes
// Decompress the _filename stream
FileStream outstream = Utilities.TryCreate(Path.Combine(outDir, Path.GetFileNameWithoutExtension(this.Filename)));
- GZipStream gzstream = new GZipStream(Utilities.TryOpenRead(this.Filename), Ionic.Zlib.CompressionMode.Decompress);
+ var gz = new gZip();
+ ZipReturn ret = gz.ZipFileOpen(this.Filename);
+ ret = gz.ZipFileOpenReadStream(0, out Stream gzstream, out ulong streamSize);
gzstream.CopyTo(outstream);
// Dispose of the streams
outstream.Dispose();
- gzstream.Dispose();
+ ret = gz.ZipFileCloseReadStream();
+ gz.ZipFileClose();
encounteredErrors = false;
}
@@ -159,7 +164,9 @@ namespace SabreTools.Library.FileTypes
{
// Decompress the _filename stream
realEntry = Path.GetFileNameWithoutExtension(this.Filename);
- GZipStream gzstream = new GZipStream(Utilities.TryOpenRead(this.Filename), Ionic.Zlib.CompressionMode.Decompress);
+ var gz = new gZip();
+ ZipReturn ret = gz.ZipFileOpen(this.Filename);
+ ret = gz.ZipFileOpenReadStream(0, out Stream gzstream, out ulong streamSize);
// Write the file out
byte[] gbuffer = new byte[_bufferSize];
@@ -233,11 +240,13 @@ namespace SabreTools.Library.FileTypes
// Otherwise, use the stream directly
else
{
- GZipStream gzstream = new GZipStream(Utilities.TryOpenRead(this.Filename), Ionic.Zlib.CompressionMode.Decompress);
+ var gz = new gZip();
+ ZipReturn ret = gz.ZipFileOpen(this.Filename);
+ ret = gz.ZipFileOpenReadStream(0, out Stream gzstream, out ulong streamSize);
BaseFile gzipEntryRom = Utilities.GetStreamInfo(gzstream, gzstream.Length, omitFromScan: omitFromScan);
- gzipEntryRom.Filename = gzstream.FileName;
+ gzipEntryRom.Filename = gz.Filename(0);
gzipEntryRom.Parent = gamename;
- gzipEntryRom.Date = (date && gzstream.LastModified != null ? gzstream.LastModified?.ToString("yyyy/MM/dd hh:mm:ss") : null);
+ gzipEntryRom.Date = (date && gz.TimeStamp > 0 ? gz.TimeStamp.ToString() : null);
_children.Add(gzipEntryRom);
gzstream.Dispose();
}
@@ -507,7 +516,7 @@ namespace SabreTools.Library.FileTypes
sw.Write((ulong)rom.Size); // Long size (Unsigned, Mirrored)
// Now create a deflatestream from the input file
- DeflateStream ds = new DeflateStream(outputStream, Ionic.Zlib.CompressionMode.Compress, Ionic.Zlib.CompressionLevel.BestCompression, true);
+ ZlibBaseStream ds = new ZlibBaseStream(outputStream, CompressionMode.Compress, CompressionLevel.BestCompression, ZlibStreamFlavor.DEFLATE, true);
// Copy the input stream to the output
byte[] ibuffer = new byte[_bufferSize];
diff --git a/SabreTools.Library/FileTypes/SevenZipArchive.cs b/SabreTools.Library/FileTypes/SevenZipArchive.cs
index d9752ba8..da5350f7 100644
--- a/SabreTools.Library/FileTypes/SevenZipArchive.cs
+++ b/SabreTools.Library/FileTypes/SevenZipArchive.cs
@@ -18,7 +18,7 @@ using MemoryStream = System.IO.MemoryStream;
using SeekOrigin = System.IO.SeekOrigin;
using Stream = System.IO.Stream;
#endif
-using ROMVault2.SupportedFiles.Zip;
+using Compress.ZipFile;
using SevenZip; // TODO: Remove this when 7zip write is implemented in SharpCompress
using SharpCompress.Archives;
using SharpCompress.Archives.SevenZip;
@@ -447,7 +447,7 @@ namespace SabreTools.Library.FileTypes
// Get the order for the entries with the new file
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Copy over all files to the new archive
foreach (string key in keys)
@@ -606,7 +606,7 @@ namespace SabreTools.Library.FileTypes
// Sort the keys in TZIP order
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Create the temp directory
string tempPath = Path.Combine(outDir, Guid.NewGuid().ToString());
@@ -673,7 +673,7 @@ namespace SabreTools.Library.FileTypes
// Get the order for the entries with the new file
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Copy over all files to the new archive
foreach (string key in keys)
diff --git a/SabreTools.Library/FileTypes/TapeArchive.cs b/SabreTools.Library/FileTypes/TapeArchive.cs
index d2793e94..e5e575ee 100644
--- a/SabreTools.Library/FileTypes/TapeArchive.cs
+++ b/SabreTools.Library/FileTypes/TapeArchive.cs
@@ -17,7 +17,7 @@ using MemoryStream = System.IO.MemoryStream;
using SeekOrigin = System.IO.SeekOrigin;
using Stream = System.IO.Stream;
#endif
-using ROMVault2.SupportedFiles.Zip;
+using Compress.ZipFile;
using SharpCompress.Archives;
using SharpCompress.Archives.Tar;
using SharpCompress.Common;
@@ -395,7 +395,7 @@ namespace SabreTools.Library.FileTypes
// Get the order for the entries with the new file
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Copy over all files to the new archive
foreach (string key in keys)
@@ -521,7 +521,7 @@ namespace SabreTools.Library.FileTypes
// Sort the keys in TZIP order
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Now add all of the files in order
foreach (string key in keys)
@@ -578,7 +578,7 @@ namespace SabreTools.Library.FileTypes
// Get the order for the entries with the new file
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Copy over all files to the new archive
foreach (string key in keys)
diff --git a/SabreTools.Library/FileTypes/XZArchive.cs b/SabreTools.Library/FileTypes/XZArchive.cs
index ce080002..57eb293b 100644
--- a/SabreTools.Library/FileTypes/XZArchive.cs
+++ b/SabreTools.Library/FileTypes/XZArchive.cs
@@ -18,7 +18,7 @@ using MemoryStream = System.IO.MemoryStream;
using SeekOrigin = System.IO.SeekOrigin;
using Stream = System.IO.Stream;
#endif
-using ROMVault2.SupportedFiles.Zip;
+using Compress.ZipFile;
using SevenZip;
using SharpCompress.Archives;
using SharpCompress.Archives.SevenZip;
@@ -341,7 +341,7 @@ namespace SabreTools.Library.FileTypes
// Get the order for the entries with the new file
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Copy over all files to the new archive
foreach (string key in keys)
@@ -500,7 +500,7 @@ namespace SabreTools.Library.FileTypes
// Sort the keys in TZIP order
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Create the temp directory
string tempPath = Path.Combine(outDir, Guid.NewGuid().ToString());
@@ -567,7 +567,7 @@ namespace SabreTools.Library.FileTypes
// Get the order for the entries with the new file
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Copy over all files to the new archive
foreach (string key in keys)
diff --git a/SabreTools.Library/FileTypes/ZipArchive.cs b/SabreTools.Library/FileTypes/ZipArchive.cs
index 699ec232..3e042250 100644
--- a/SabreTools.Library/FileTypes/ZipArchive.cs
+++ b/SabreTools.Library/FileTypes/ZipArchive.cs
@@ -17,7 +17,8 @@ using MemoryStream = System.IO.MemoryStream;
using SeekOrigin = System.IO.SeekOrigin;
using Stream = System.IO.Stream;
#endif
-using ROMVault2.SupportedFiles.Zip;
+using Compress;
+using Compress.ZipFile;
using SharpCompress.Readers;
namespace SabreTools.Library.FileTypes
@@ -70,32 +71,32 @@ namespace SabreTools.Library.FileTypes
// Extract all files to the temp directory
ZipFile zf = new ZipFile();
- ZipReturn zr = zf.Open(this.Filename, new FileInfo(this.Filename).LastWriteTime.Ticks, true);
+ ZipReturn zr = zf.ZipFileOpen(this.Filename, new FileInfo(this.Filename).LastWriteTime.Ticks, true);
if (zr != ZipReturn.ZipGood)
{
throw new Exception(ZipFile.ZipErrorMessageText(zr));
}
- for (int i = 0; i < zf.EntriesCount && zr == ZipReturn.ZipGood; i++)
+ for (int i = 0; i < zf.LocalFilesCount() && zr == ZipReturn.ZipGood; i++)
{
// Open the read stream
- zr = zf.OpenReadStream(i, false, out Stream readStream, out ulong streamsize, out SabreTools.Library.Data.CompressionMethod cm, out uint lastMod);
+ zr = zf.ZipFileOpenReadStream(i, false, out Stream readStream, out ulong streamsize, out ushort cm);
// Create the rest of the path, if needed
- if (!String.IsNullOrWhiteSpace(Path.GetDirectoryName(zf.Entries[i].FileName)))
+ if (!String.IsNullOrWhiteSpace(Path.GetDirectoryName(zf.Filename(i))))
{
- Directory.CreateDirectory(Path.Combine(outDir, Path.GetDirectoryName(zf.Entries[i].FileName)));
+ Directory.CreateDirectory(Path.Combine(outDir, Path.GetDirectoryName(zf.Filename(i))));
}
// If the entry ends with a directory separator, continue to the next item, if any
- if (zf.Entries[i].FileName.EndsWith(Path.DirectorySeparatorChar.ToString())
- || zf.Entries[i].FileName.EndsWith(Path.AltDirectorySeparatorChar.ToString())
- || zf.Entries[i].FileName.EndsWith(Path.PathSeparator.ToString()))
+ if (zf.Filename(i).EndsWith(Path.DirectorySeparatorChar.ToString())
+ || zf.Filename(i).EndsWith(Path.AltDirectorySeparatorChar.ToString())
+ || zf.Filename(i).EndsWith(Path.PathSeparator.ToString()))
{
continue;
}
- FileStream writeStream = Utilities.TryCreate(Path.Combine(outDir, zf.Entries[i].FileName));
+ FileStream writeStream = Utilities.TryCreate(Path.Combine(outDir, zf.Filename(i)));
// If the stream is smaller than the buffer, just run one loop through to avoid issues
if (streamsize < _bufferSize)
@@ -118,10 +119,11 @@ namespace SabreTools.Library.FileTypes
}
}
- zr = zf.CloseReadStream();
+ zr = zf.ZipFileCloseReadStream();
writeStream.Dispose();
}
- zf.Close();
+
+ zf.ZipFileClose();
encounteredErrors = false;
}
catch (EndOfStreamException)
@@ -201,19 +203,19 @@ namespace SabreTools.Library.FileTypes
try
{
ZipFile zf = new ZipFile();
- ZipReturn zr = zf.Open(this.Filename, new FileInfo(this.Filename).LastWriteTime.Ticks, true);
+ ZipReturn zr = zf.ZipFileOpen(this.Filename, new FileInfo(this.Filename).LastWriteTime.Ticks, true);
if (zr != ZipReturn.ZipGood)
{
throw new Exception(ZipFile.ZipErrorMessageText(zr));
}
- for (int i = 0; i < zf.EntriesCount && zr == ZipReturn.ZipGood; i++)
+ for (int i = 0; i < zf.LocalFilesCount() && zr == ZipReturn.ZipGood; i++)
{
- if (zf.Entries[i].FileName.Contains(entryName))
+ if (zf.Filename(i).Contains(entryName))
{
// Open the read stream
- realEntry = zf.Entries[i].FileName;
- zr = zf.OpenReadStream(i, false, out Stream readStream, out ulong streamsize, out SabreTools.Library.Data.CompressionMethod cm, out uint lastMod);
+ realEntry = zf.Filename(i);
+ zr = zf.ZipFileOpenReadStream(i, false, out Stream readStream, out ulong streamsize, out ushort cm);
// If the stream is smaller than the buffer, just run one loop through to avoid issues
if (streamsize < _bufferSize)
@@ -241,11 +243,11 @@ namespace SabreTools.Library.FileTypes
ms.Flush();
}
- zr = zf.CloseReadStream();
+ zr = zf.ZipFileCloseReadStream();
}
}
- zf.Dispose();
+ zf.ZipFileClose();
}
catch (Exception ex)
{
@@ -276,16 +278,16 @@ namespace SabreTools.Library.FileTypes
try
{
ZipFile zf = new ZipFile();
- ZipReturn zr = zf.Open(this.Filename, new FileInfo(this.Filename).LastWriteTime.Ticks, true);
+ ZipReturn zr = zf.ZipFileOpen(this.Filename, new FileInfo(this.Filename).LastWriteTime.Ticks, true);
if (zr != ZipReturn.ZipGood)
{
throw new Exception(ZipFile.ZipErrorMessageText(zr));
}
- for (int i = 0; i < zf.EntriesCount; i++)
+ for (int i = 0; i < zf.LocalFilesCount(); i++)
{
// Open the read stream
- zr = zf.OpenReadStream(i, false, out Stream readStream, out ulong streamsize, out SabreTools.Library.Data.CompressionMethod cm, out uint lastMod);
+ zr = zf.ZipFileOpenReadStream(i, false, out Stream readStream, out ulong streamsize, out ushort cm);
// If we get a read error, log it and continue
if (zr != ZipReturn.ZipGood)
@@ -295,9 +297,9 @@ namespace SabreTools.Library.FileTypes
}
// If the entry ends with a directory separator, continue to the next item, if any
- if (zf.Entries[i].FileName.EndsWith(Path.DirectorySeparatorChar.ToString())
- || zf.Entries[i].FileName.EndsWith(Path.AltDirectorySeparatorChar.ToString())
- || zf.Entries[i].FileName.EndsWith(Path.PathSeparator.ToString()))
+ if (zf.Filename(i).EndsWith(Path.DirectorySeparatorChar.ToString())
+ || zf.Filename(i).EndsWith(Path.AltDirectorySeparatorChar.ToString())
+ || zf.Filename(i).EndsWith(Path.PathSeparator.ToString()))
{
continue;
}
@@ -305,10 +307,10 @@ namespace SabreTools.Library.FileTypes
// If secure hashes are disabled, do a quickscan
if (omitFromScan == Hash.SecureHashes)
{
- string newname = zf.Entries[i].FileName;
- long newsize = (long)zf.Entries[i].UncompressedSize;
- byte[] newcrc = zf.Entries[i].CRC.Reverse().ToArray();
- string convertedDate = Utilities.ConvertMsDosTimeFormatToDateTime(zf.Entries[i].LastMod).ToString("yyyy/MM/dd hh:mm:ss");
+ string newname = zf.Filename(i);
+ long newsize = (long)zf.UncompressedSize(i);
+ byte[] newcrc = zf.CRC32(i);
+ string convertedDate = zf.LastModified(i).ToString("yyyy/MM/dd hh:mm:ss");
found.Add(new BaseFile
{
@@ -323,18 +325,18 @@ namespace SabreTools.Library.FileTypes
// Otherwise, use the stream directly
else
{
- BaseFile zipEntryRom = Utilities.GetStreamInfo(readStream, (long)zf.Entries[i].UncompressedSize, omitFromScan: omitFromScan, keepReadOpen: true);
- zipEntryRom.Filename = zf.Entries[i].FileName;
+ BaseFile zipEntryRom = Utilities.GetStreamInfo(readStream, (long)zf.UncompressedSize(i), omitFromScan: omitFromScan, keepReadOpen: true);
+ zipEntryRom.Filename = zf.Filename(i);
zipEntryRom.Parent = gamename;
- string convertedDate = Utilities.ConvertMsDosTimeFormatToDateTime(zf.Entries[i].LastMod).ToString("yyyy/MM/dd hh:mm:ss");
+ string convertedDate = zf.LastModified(i).ToString("yyyy/MM/dd hh:mm:ss");
zipEntryRom.Date = (date ? convertedDate : null);
found.Add(zipEntryRom);
}
}
// Dispose of the archive
- zr = zf.CloseReadStream();
- zf.Close();
+ zr = zf.ZipFileCloseReadStream();
+ zf.ZipFileClose();
}
catch (Exception ex)
{
@@ -465,7 +467,7 @@ namespace SabreTools.Library.FileTypes
if (!File.Exists(archiveFileName))
{
inputStream.Seek(0, SeekOrigin.Begin);
- zipReturn = zipFile.Create(tempFile);
+ zipReturn = zipFile.ZipFileCreate(tempFile);
// Open the input file for reading
ulong istreamSize = (ulong)(inputStream.Length);
@@ -474,12 +476,11 @@ namespace SabreTools.Library.FileTypes
if (date && !String.IsNullOrWhiteSpace(rom.Date) && DateTime.TryParse(rom.Date.Replace('\\', '/'), out dt))
{
uint msDosDateTime = Utilities.ConvertDateTimeToMsDosTimeFormat(dt);
- zipFile.OpenWriteStream(false, false, rom.Name.Replace('\\', '/'), istreamSize,
- SabreTools.Library.Data.CompressionMethod.Deflated, out writeStream, lastMod: msDosDateTime);
+ zipFile.ZipFileOpenWriteStream(false, false, rom.Name.Replace('\\', '/'), istreamSize, (ushort)CompressionMethod.Deflated, out writeStream);
}
else
{
- zipFile.OpenWriteStream(false, true, rom.Name.Replace('\\', '/'), istreamSize, SabreTools.Library.Data.CompressionMethod.Deflated, out writeStream);
+ zipFile.ZipFileOpenWriteStream(false, true, rom.Name.Replace('\\', '/'), istreamSize, (ushort)CompressionMethod.Deflated, out writeStream);
}
// Copy the input stream to the output
@@ -491,43 +492,48 @@ namespace SabreTools.Library.FileTypes
writeStream.Flush();
}
inputStream.Dispose();
- zipFile.CloseWriteStream(Convert.ToUInt32(rom.CRC, 16));
+ zipFile.ZipFileCloseWriteStream(Utilities.StringToByteArray(rom.CRC));
}
// Otherwise, sort the input files and write out in the correct order
else
{
// Open the old archive for reading
- oldZipFile.Open(archiveFileName, new FileInfo(archiveFileName).LastWriteTime.Ticks, true);
+ oldZipFile.ZipFileOpen(archiveFileName, new FileInfo(archiveFileName).LastWriteTime.Ticks, true);
// Map all inputs to index
Dictionary inputIndexMap = new Dictionary();
+ var oldZipFileContents = new List();
+ for (int i = 0; i < oldZipFile.LocalFilesCount(); i++)
+ {
+ oldZipFileContents.Add(oldZipFile.Filename(i));
+ }
// If the old one doesn't contain the new file, then add it
- if (!oldZipFile.Contains(rom.Name.Replace('\\', '/')))
+ if (!oldZipFileContents.Contains(rom.Name.Replace('\\', '/')))
{
inputIndexMap.Add(rom.Name.Replace('\\', '/'), -1);
}
// Then add all of the old entries to it too
- for (int i = 0; i < oldZipFile.EntriesCount; i++)
+ for (int i = 0; i < oldZipFile.LocalFilesCount(); i++)
{
inputIndexMap.Add(oldZipFile.Filename(i), i);
}
// If the number of entries is the same as the old archive, skip out
- if (inputIndexMap.Keys.Count <= oldZipFile.EntriesCount)
+ if (inputIndexMap.Keys.Count <= oldZipFile.LocalFilesCount())
{
success = true;
return success;
}
// Otherwise, process the old zipfile
- zipFile.Create(tempFile);
+ zipFile.ZipFileCreate(tempFile);
// Get the order for the entries with the new file
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Copy over all files to the new archive
foreach (string key in keys)
@@ -545,12 +551,11 @@ namespace SabreTools.Library.FileTypes
if (date && !String.IsNullOrWhiteSpace(rom.Date) && DateTime.TryParse(rom.Date.Replace('\\', '/'), out dt))
{
uint msDosDateTime = Utilities.ConvertDateTimeToMsDosTimeFormat(dt);
- zipFile.OpenWriteStream(false, false, rom.Name.Replace('\\', '/'), istreamSize,
- SabreTools.Library.Data.CompressionMethod.Deflated, out writeStream, lastMod: msDosDateTime);
+ zipFile.ZipFileOpenWriteStream(false, false, rom.Name.Replace('\\', '/'), istreamSize, (ushort)CompressionMethod.Deflated, out writeStream);
}
else
{
- zipFile.OpenWriteStream(false, true, rom.Name.Replace('\\', '/'), istreamSize, SabreTools.Library.Data.CompressionMethod.Deflated, out writeStream);
+ zipFile.ZipFileOpenWriteStream(false, true, rom.Name.Replace('\\', '/'), istreamSize, (ushort)CompressionMethod.Deflated, out writeStream);
}
// Copy the input stream to the output
@@ -561,17 +566,17 @@ namespace SabreTools.Library.FileTypes
writeStream.Write(ibuffer, 0, ilen);
writeStream.Flush();
}
+
inputStream.Dispose();
- zipFile.CloseWriteStream(Convert.ToUInt32(rom.CRC, 16));
+ zipFile.ZipFileCloseWriteStream(Utilities.StringToByteArray(rom.CRC));
}
// Otherwise, copy the file from the old archive
else
{
// Instantiate the streams
- oldZipFile.OpenReadStream(index, false, out Stream zreadStream, out ulong istreamSize, out SabreTools.Library.Data.CompressionMethod icompressionMethod, out uint lastMod);
- zipFile.OpenWriteStream(false, lastMod == Constants.TorrentZipFileDateTime, oldZipFile.Filename(index),
- istreamSize, SabreTools.Library.Data.CompressionMethod.Deflated, out writeStream, lastMod: lastMod);
+ oldZipFile.ZipFileOpenReadStream(index, false, out Stream zreadStream, out ulong istreamSize, out ushort icompressionMethod);
+ zipFile.ZipFileOpenWriteStream(false, true, oldZipFile.Filename(index), istreamSize, (ushort)CompressionMethod.Deflated, out writeStream);
// Copy the input stream to the output
byte[] ibuffer = new byte[_bufferSize];
@@ -581,13 +586,16 @@ namespace SabreTools.Library.FileTypes
writeStream.Write(ibuffer, 0, ilen);
writeStream.Flush();
}
- zipFile.CloseWriteStream(BitConverter.ToUInt32(oldZipFile.CRC32(index), 0));
+
+ oldZipFile.ZipFileCloseReadStream();
+ zipFile.ZipFileCloseWriteStream(oldZipFile.CRC32(index));
}
}
}
// Close the output zip file
- zipFile.Close();
+ zipFile.ZipFileClose();
+ oldZipFile.ZipFileClose();
success = true;
}
@@ -599,8 +607,6 @@ namespace SabreTools.Library.FileTypes
finally
{
inputStream?.Dispose();
- zipFile.Dispose();
- oldZipFile.Dispose();
}
// If the old file exists, delete it and replace
@@ -668,7 +674,7 @@ namespace SabreTools.Library.FileTypes
// If the archive doesn't exist, create it and put the single file
if (!File.Exists(archiveFileName))
{
- zipReturn = zipFile.Create(tempFile);
+ zipReturn = zipFile.ZipFileCreate(tempFile);
// Map all inputs to index
Dictionary inputIndexMap = new Dictionary();
@@ -679,7 +685,7 @@ namespace SabreTools.Library.FileTypes
// Sort the keys in TZIP order
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Now add all of the files in order
foreach (string key in keys)
@@ -695,12 +701,11 @@ namespace SabreTools.Library.FileTypes
if (date && !String.IsNullOrWhiteSpace(roms[index].Date) && DateTime.TryParse(roms[index].Date.Replace('\\', '/'), out dt))
{
uint msDosDateTime = Utilities.ConvertDateTimeToMsDosTimeFormat(dt);
- zipFile.OpenWriteStream(false, false, roms[index].Name.Replace('\\', '/'), istreamSize,
- SabreTools.Library.Data.CompressionMethod.Deflated, out writeStream, lastMod: msDosDateTime);
+ zipFile.ZipFileOpenWriteStream(false, false, roms[index].Name.Replace('\\', '/'), istreamSize, (ushort)CompressionMethod.Deflated, out writeStream);
}
else
{
- zipFile.OpenWriteStream(false, true, roms[index].Name.Replace('\\', '/'), istreamSize, SabreTools.Library.Data.CompressionMethod.Deflated, out writeStream);
+ zipFile.ZipFileOpenWriteStream(false, true, roms[index].Name.Replace('\\', '/'), istreamSize, (ushort)CompressionMethod.Deflated, out writeStream);
}
// Copy the input stream to the output
@@ -711,8 +716,9 @@ namespace SabreTools.Library.FileTypes
writeStream.Write(ibuffer, 0, ilen);
writeStream.Flush();
}
+
freadStream.Dispose();
- zipFile.CloseWriteStream(Convert.ToUInt32(roms[index].CRC, 16));
+ zipFile.ZipFileCloseWriteStream(Utilities.StringToByteArray(roms[index].CRC));
}
}
@@ -720,14 +726,20 @@ namespace SabreTools.Library.FileTypes
else
{
// Open the old archive for reading
- oldZipFile.Open(archiveFileName, new FileInfo(archiveFileName).LastWriteTime.Ticks, true);
+ oldZipFile.ZipFileOpen(archiveFileName, new FileInfo(archiveFileName).LastWriteTime.Ticks, true);
// Map all inputs to index
Dictionary inputIndexMap = new Dictionary();
for (int i = 0; i < inputFiles.Count; i++)
{
+ var oldZipFileContents = new List();
+ for (int j = 0; j < oldZipFile.LocalFilesCount(); j++)
+ {
+ oldZipFileContents.Add(oldZipFile.Filename(j));
+ }
+
// If the old one contains the new file, then just skip out
- if (oldZipFile.Contains(roms[i].Name.Replace('\\', '/')))
+ if (oldZipFileContents.Contains(roms[i].Name.Replace('\\', '/')))
{
continue;
}
@@ -736,24 +748,24 @@ namespace SabreTools.Library.FileTypes
}
// Then add all of the old entries to it too
- for (int i = 0; i < oldZipFile.EntriesCount; i++)
+ for (int i = 0; i < oldZipFile.LocalFilesCount(); i++)
{
inputIndexMap.Add(oldZipFile.Filename(i), i);
}
// If the number of entries is the same as the old archive, skip out
- if (inputIndexMap.Keys.Count <= oldZipFile.EntriesCount)
+ if (inputIndexMap.Keys.Count <= oldZipFile.LocalFilesCount())
{
success = true;
return success;
}
// Otherwise, process the old zipfile
- zipFile.Create(tempFile);
+ zipFile.ZipFileCreate(tempFile);
// Get the order for the entries with the new file
List keys = inputIndexMap.Keys.ToList();
- keys.Sort(ZipFile.TorrentZipStringCompare);
+ keys.Sort(ZipFile.TrrntZipStringCompare);
// Copy over all files to the new archive
foreach (string key in keys)
@@ -772,12 +784,11 @@ namespace SabreTools.Library.FileTypes
if (date && !String.IsNullOrWhiteSpace(roms[-index - 1].Date) && DateTime.TryParse(roms[-index - 1].Date.Replace('\\', '/'), out dt))
{
uint msDosDateTime = Utilities.ConvertDateTimeToMsDosTimeFormat(dt);
- zipFile.OpenWriteStream(false, false, roms[-index - 1].Name.Replace('\\', '/'), istreamSize,
- SabreTools.Library.Data.CompressionMethod.Deflated, out writeStream, lastMod: msDosDateTime);
+ zipFile.ZipFileOpenWriteStream(false, false, roms[-index - 1].Name.Replace('\\', '/'), istreamSize, (ushort)CompressionMethod.Deflated, out writeStream);
}
else
{
- zipFile.OpenWriteStream(false, true, roms[-index - 1].Name.Replace('\\', '/'), istreamSize, SabreTools.Library.Data.CompressionMethod.Deflated, out writeStream);
+ zipFile.ZipFileOpenWriteStream(false, true, roms[-index - 1].Name.Replace('\\', '/'), istreamSize, (ushort)CompressionMethod.Deflated, out writeStream);
}
// Copy the input stream to the output
@@ -789,16 +800,15 @@ namespace SabreTools.Library.FileTypes
writeStream.Flush();
}
freadStream.Dispose();
- zipFile.CloseWriteStream(Convert.ToUInt32(roms[-index - 1].CRC, 16));
+ zipFile.ZipFileCloseWriteStream(Utilities.StringToByteArray(roms[-index - 1].CRC));
}
// Otherwise, copy the file from the old archive
else
{
// Instantiate the streams
- oldZipFile.OpenReadStream(index, false, out Stream zreadStream, out ulong istreamSize, out SabreTools.Library.Data.CompressionMethod icompressionMethod, out uint lastMod);
- zipFile.OpenWriteStream(false, lastMod == Constants.TorrentZipFileDateTime, oldZipFile.Filename(index),
- istreamSize, SabreTools.Library.Data.CompressionMethod.Deflated, out writeStream, lastMod: lastMod);
+ oldZipFile.ZipFileOpenReadStream(index, false, out Stream zreadStream, out ulong istreamSize, out ushort icompressionMethod);
+ zipFile.ZipFileOpenWriteStream(false, true, oldZipFile.Filename(index), istreamSize, (ushort)CompressionMethod.Deflated, out writeStream);
// Copy the input stream to the output
byte[] ibuffer = new byte[_bufferSize];
@@ -808,13 +818,15 @@ namespace SabreTools.Library.FileTypes
writeStream.Write(ibuffer, 0, ilen);
writeStream.Flush();
}
- zipFile.CloseWriteStream(BitConverter.ToUInt32(oldZipFile.CRC32(index), 0));
+
+ zipFile.ZipFileCloseWriteStream(oldZipFile.CRC32(index));
}
}
}
// Close the output zip file
- zipFile.Close();
+ zipFile.ZipFileClose();
+ oldZipFile.ZipFileClose();
success = true;
}
@@ -823,11 +835,6 @@ namespace SabreTools.Library.FileTypes
Console.WriteLine(ex);
success = false;
}
- finally
- {
- zipFile.Dispose();
- oldZipFile.Dispose();
- }
// If the old file exists, delete it and replace
if (File.Exists(archiveFileName))