mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-04 13:34:59 +00:00
Compare commits
27 Commits
tar_redux
...
zip_encryp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
703e4bd49e | ||
|
|
30d4251332 | ||
|
|
3983db08ff | ||
|
|
72114bceea | ||
|
|
c303f96682 | ||
|
|
0e785968c4 | ||
|
|
15110e18e2 | ||
|
|
5465af041b | ||
|
|
310d56fc16 | ||
|
|
231258ef69 | ||
|
|
16b7e3ffc8 | ||
|
|
513e59f830 | ||
|
|
b10a1cf2bd | ||
|
|
1656edaa29 | ||
|
|
cff49aacba | ||
|
|
19c32aff6c | ||
|
|
db3ec8337f | ||
|
|
e7bfc40461 | ||
|
|
3d3ca254ba | ||
|
|
b45bc859a4 | ||
|
|
912d7a8775 | ||
|
|
16885da1b5 | ||
|
|
26714052eb | ||
|
|
3df763a783 | ||
|
|
3f24a744c0 | ||
|
|
9270d7cabf | ||
|
|
69fc74e376 |
@@ -1,5 +1,8 @@
|
||||
dist: trusty
|
||||
language: csharp
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.dotnet
|
||||
solution: SharpCompress.sln
|
||||
matrix:
|
||||
include:
|
||||
|
||||
44
FORMATS.md
44
FORMATS.md
@@ -1,10 +1,10 @@
|
||||
# Archive Formats
|
||||
# Formats
|
||||
|
||||
## Accessing Archives
|
||||
|
||||
Archive classes allow random access to a seekable stream.
|
||||
Reader classes allow forward-only reading
|
||||
Writer classes allow forward-only Writing
|
||||
- Archive classes allow random access to a seekable stream.
|
||||
- Reader classes allow forward-only reading on a stream.
|
||||
- Writer classes allow forward-only Writing on a stream.
|
||||
|
||||
## Supported Format Table
|
||||
|
||||
@@ -12,18 +12,24 @@ Writer classes allow forward-only Writing
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| Rar | Rar | Decompress (1) | RarArchive | RarReader | N/A |
|
||||
| Zip (2) | None, DEFLATE, BZip2, LZMA/LZMA2, PPMd | Both | ZipArchive | ZipReader | ZipWriter |
|
||||
| Tar | None, BZip2, GZip, LZip | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| GZip (single file) | GZip | Both | GZipArchive | GZipReader | GZipWriter |
|
||||
| Tar | None | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.GZip | DEFLATE | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.BZip2 | BZip2 | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.LZip | LZMA | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.XZ | LZMA2 | Decompress | TarArchive | TarReader | TarWriter (3) |
|
||||
| GZip (single file) | DEFLATE | Both | GZipArchive | GZipReader | GZipWriter |
|
||||
| 7Zip (4) | LZMA, LZMA2, BZip2, PPMd, BCJ, BCJ2, Deflate | Decompress | SevenZipArchive | N/A | N/A |
|
||||
| LZip (single file) (5) | LZip (LZMA) | Both | LZipArchive | LZipReader | LZipWriter |
|
||||
|
||||
1. SOLID Rars are only supported in the RarReader API.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading is supported.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading/writing is supported but only with seekable streams as the Zip spec doesn't support Zip64 data in post data descriptors.
|
||||
3. The Tar format requires a file size in the header. If no size is specified to the TarWriter and the stream is not seekable, then an exception will be thrown.
|
||||
4. The 7Zip format doesn't allow for reading as a forward-only stream so 7Zip is only supported through the Archive API
|
||||
5. LZip has no support for extra data like the file name or timestamp. There is a default filename used when looking at the entry Key on the archive.
|
||||
|
||||
## Compressors
|
||||
## Compression Streams
|
||||
|
||||
For those who want to directly compress/decompress bits
|
||||
For those who want to directly compress/decompress bits. The single file formats are represented here as well. However, BZip2, LZip and XZ have no metadata (GZip has a little) so using them without something like a Tar file makes little sense.
|
||||
|
||||
| Compressor | Compress/Decompress |
|
||||
| --- | --- |
|
||||
@@ -33,4 +39,22 @@ For those who want to directly compress/decompress bits
|
||||
| LZMAStream | Both |
|
||||
| PPMdStream | Both |
|
||||
| ADCStream | Decompress |
|
||||
| LZipStream | Decompress |
|
||||
| LZipStream | Both |
|
||||
| XZStream | Decompress |
|
||||
|
||||
## Archive Formats vs Compression
|
||||
|
||||
Sometimes the terminology gets mixed.
|
||||
|
||||
### Compression
|
||||
|
||||
DEFLATE, LZMA are pure compression algorithms
|
||||
|
||||
### Formats
|
||||
|
||||
Formats like Zip, 7Zip, Rar are archive formats only. They use other compression methods (e.g. DEFLATE, LZMA, etc.) or propriatory (e.g RAR)
|
||||
|
||||
### Overlap
|
||||
|
||||
GZip, BZip2 and LZip are single file archival formats. The overlap in the API happens because Tar uses the single file formats as "compression" methods and the API tries to hide this a bit.
|
||||
|
||||
|
||||
32
README.md
32
README.md
@@ -15,6 +15,16 @@ Post Issues on Github!
|
||||
|
||||
Check the [Supported Formats](FORMATS.md) and [Basic Usage.](USAGE.md)
|
||||
|
||||
## Recommended Formats
|
||||
|
||||
In general, I recommend GZip (Deflate)/BZip2 (BZip)/LZip (LZMA) as the simplicity of the formats lend to better long term archival as well as the streamability. Tar is often used in conjunction for multiple files in a single archive (e.g. `.tar.gz`)
|
||||
|
||||
Zip is okay, but it's a very hap-hazard format and the variation in headers and implementations makes it hard to get correct. Uses Deflate by default but supports a lot of compression methods.
|
||||
|
||||
RAR is not recommended as it's a propriatory format and the compression is closed source. Use Tar/LZip for LZMA
|
||||
|
||||
7Zip and XZ both are overly complicated. 7Zip does not support streamable formats. XZ has known holes explained here: (http://www.nongnu.org/lzip/xz_inadequate.html) Use Tar/LZip for LZMA compression instead.
|
||||
|
||||
## A Simple Request
|
||||
|
||||
Hi everyone. I hope you're using SharpCompress and finding it useful. Please give me feedback on what you'd like to see changed especially as far as usability goes. New feature suggestions are always welcome as well. I would also like to know what projects SharpCompress is being used in. I like seeing how it is used to give me ideas for future versions. Thanks!
|
||||
@@ -34,6 +44,26 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
|
||||
|
||||
## Version Log
|
||||
|
||||
### Version 0.18
|
||||
|
||||
* [Now on Github releases](https://github.com/adamhathcock/sharpcompress/releases/tag/0.18)
|
||||
|
||||
### Version 0.17.1
|
||||
|
||||
* Fix - [Bug Fix for .NET Core on Windows](https://github.com/adamhathcock/sharpcompress/pull/257)
|
||||
|
||||
### Version 0.17.0
|
||||
|
||||
* New - Full LZip support! Can read and write LZip files and Tars inside LZip files. [Make LZip a first class citizen. #241](https://github.com/adamhathcock/sharpcompress/issues/241)
|
||||
* New - XZ read support! Can read XZ files and Tars inside XZ files. [XZ in SharpCompress #91](https://github.com/adamhathcock/sharpcompress/issues/94)
|
||||
* Fix - [Regression - zip file writing on seekable streams always assumed stream start was 0. Introduced with Zip64 writing.](https://github.com/adamhathcock/sharpcompress/issues/244)
|
||||
* Fix - [Zip files with post-data descriptors can be properly skipped via decompression](https://github.com/adamhathcock/sharpcompress/issues/162)
|
||||
|
||||
### Version 0.16.2
|
||||
|
||||
* Fix [.NET 3.5 should support files and cryptography (was a regression from 0.16.0)](https://github.com/adamhathcock/sharpcompress/pull/251)
|
||||
* Fix [Zip per entry compression customization wrote the wrong method into the zip archive](https://github.com/adamhathcock/sharpcompress/pull/249)
|
||||
|
||||
### Version 0.16.1
|
||||
|
||||
* Fix [Preserve compression method when getting a compressed stream](https://github.com/adamhathcock/sharpcompress/pull/235)
|
||||
@@ -152,6 +182,8 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
|
||||
* Embedded some BouncyCastle crypto classes to allow RAR Decryption and Winzip AES Decryption in Portable and Windows Store DLLs
|
||||
* Built in Release (I think)
|
||||
|
||||
XZ implementation based on: https://github.com/sambott/XZ.NET by @sambott
|
||||
|
||||
7Zip implementation based on: https://code.google.com/p/managed-lzma/
|
||||
|
||||
LICENSE
|
||||
|
||||
@@ -6,6 +6,7 @@ using SharpCompress.Archives.SevenZip;
|
||||
using SharpCompress.Archives.Tar;
|
||||
using SharpCompress.Archives.Zip;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Archives
|
||||
@@ -55,7 +56,7 @@ namespace SharpCompress.Archives
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
return TarArchive.Open(stream, readerOptions);
|
||||
}
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip");
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, LZip");
|
||||
}
|
||||
|
||||
public static IWritableArchive Create(ArchiveType type)
|
||||
|
||||
@@ -14,6 +14,7 @@ namespace SharpCompress.Archives.GZip
|
||||
public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
{
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -36,6 +37,7 @@ namespace SharpCompress.Archives.GZip
|
||||
return new GZipArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -54,11 +56,11 @@ namespace SharpCompress.Archives.GZip
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="options"></param>
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="options"></param>
|
||||
internal GZipArchive(FileInfo fileInfo, ReaderOptions options)
|
||||
: base(ArchiveType.GZip, fileInfo, options)
|
||||
{
|
||||
@@ -104,15 +106,9 @@ namespace SharpCompress.Archives.GZip
|
||||
{
|
||||
// read the header on the first read
|
||||
byte[] header = new byte[10];
|
||||
int n = stream.Read(header, 0, header.Length);
|
||||
|
||||
// workitem 8501: handle edge case (decompress empty stream)
|
||||
if (n == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (n != 10)
|
||||
if (!stream.ReadFully(header))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@@ -158,7 +154,7 @@ namespace SharpCompress.Archives.GZip
|
||||
{
|
||||
throw new InvalidOperationException("Only one entry is allowed in a GZip Archive");
|
||||
}
|
||||
using (var writer = new GZipWriter(stream))
|
||||
using (var writer = new GZipWriter(stream, new GZipWriterOptions(options)))
|
||||
{
|
||||
foreach (var entry in oldEntries.Concat(newEntries)
|
||||
.Where(x => !x.IsDirectory))
|
||||
@@ -179,7 +175,7 @@ namespace SharpCompress.Archives.GZip
|
||||
protected override IEnumerable<GZipArchiveEntry> LoadEntries(IEnumerable<GZipVolume> volumes)
|
||||
{
|
||||
Stream stream = volumes.Single().Stream;
|
||||
yield return new GZipArchiveEntry(this, new GZipFilePart(stream));
|
||||
yield return new GZipArchiveEntry(this, new GZipFilePart(stream, ReaderOptions.ArchiveEncoding));
|
||||
}
|
||||
|
||||
protected override IReader CreateReaderForSolidExtraction()
|
||||
|
||||
@@ -106,7 +106,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
for (int i = 0; i < database.Files.Count; i++)
|
||||
{
|
||||
var file = database.Files[i];
|
||||
yield return new SevenZipArchiveEntry(this, new SevenZipFilePart(stream, database, i, file));
|
||||
yield return new SevenZipArchiveEntry(this, new SevenZipFilePart(stream, database, i, file, ReaderOptions.ArchiveEncoding));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using SharpCompress.Readers;
|
||||
using SharpCompress.Readers.Tar;
|
||||
@@ -15,7 +16,7 @@ namespace SharpCompress.Archives.Tar
|
||||
public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
{
|
||||
#if !NO_FILE
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -38,7 +39,7 @@ namespace SharpCompress.Archives.Tar
|
||||
return new TarArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -51,6 +52,7 @@ namespace SharpCompress.Archives.Tar
|
||||
}
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
public static bool IsTarFile(string filePath)
|
||||
{
|
||||
return IsTarFile(new FileInfo(filePath));
|
||||
@@ -73,9 +75,9 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
try
|
||||
{
|
||||
var input = new TarInputStream(stream);
|
||||
var header = input.GetNextEntry();
|
||||
return header.Name.Length > 0;
|
||||
TarHeader tar = new TarHeader(new ArchiveEncoding());
|
||||
tar.Read(new BinaryReader(stream));
|
||||
return tar.Name.Length > 0 && Enum.IsDefined(typeof(EntryType), tar.EntryType);
|
||||
}
|
||||
catch
|
||||
{
|
||||
@@ -97,7 +99,6 @@ namespace SharpCompress.Archives.Tar
|
||||
|
||||
protected override IEnumerable<TarVolume> LoadVolumes(FileInfo file)
|
||||
{
|
||||
|
||||
return new TarVolume(file.OpenRead(), ReaderOptions).AsEnumerable();
|
||||
}
|
||||
#endif
|
||||
@@ -126,11 +127,11 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
Stream stream = volumes.Single().Stream;
|
||||
TarHeader previousHeader = null;
|
||||
foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream))
|
||||
foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream, ReaderOptions.ArchiveEncoding))
|
||||
{
|
||||
if (header != null)
|
||||
{
|
||||
if (header.TypeFlag == TarHeader.LF_GNU_LONGNAME)
|
||||
if (header.EntryType == EntryType.LongName)
|
||||
{
|
||||
previousHeader = header;
|
||||
}
|
||||
@@ -151,7 +152,7 @@ namespace SharpCompress.Archives.Tar
|
||||
memoryStream.Position = 0;
|
||||
var bytes = memoryStream.ToArray();
|
||||
|
||||
header.Name = ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length).TrimNulls();
|
||||
header.Name = ReaderOptions.ArchiveEncoding.Decode(bytes).TrimNulls();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ namespace SharpCompress.Archives.Zip
|
||||
public CompressionLevel DeflateCompressionLevel { get; set; }
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -46,6 +47,7 @@ namespace SharpCompress.Archives.Zip
|
||||
return new ZipArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -58,6 +60,7 @@ namespace SharpCompress.Archives.Zip
|
||||
}
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
public static bool IsZipFile(string filePath, string password = null)
|
||||
{
|
||||
return IsZipFile(new FileInfo(filePath), password);
|
||||
@@ -78,7 +81,7 @@ namespace SharpCompress.Archives.Zip
|
||||
|
||||
public static bool IsZipFile(Stream stream, string password = null)
|
||||
{
|
||||
StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password);
|
||||
StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password, new ArchiveEncoding());
|
||||
try
|
||||
{
|
||||
ZipHeader header =
|
||||
@@ -109,7 +112,7 @@ namespace SharpCompress.Archives.Zip
|
||||
internal ZipArchive(FileInfo fileInfo, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Zip, fileInfo, readerOptions)
|
||||
{
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password);
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding);
|
||||
}
|
||||
|
||||
protected override IEnumerable<ZipVolume> LoadVolumes(FileInfo file)
|
||||
@@ -131,7 +134,7 @@ namespace SharpCompress.Archives.Zip
|
||||
internal ZipArchive(Stream stream, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Zip, stream, readerOptions)
|
||||
{
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password);
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding);
|
||||
}
|
||||
|
||||
protected override IEnumerable<ZipVolume> LoadVolumes(IEnumerable<Stream> streams)
|
||||
@@ -150,19 +153,19 @@ namespace SharpCompress.Archives.Zip
|
||||
switch (h.ZipHeaderType)
|
||||
{
|
||||
case ZipHeaderType.DirectoryEntry:
|
||||
{
|
||||
yield return new ZipArchiveEntry(this,
|
||||
new SeekableZipFilePart(headerFactory,
|
||||
h as DirectoryEntryHeader,
|
||||
stream));
|
||||
}
|
||||
{
|
||||
yield return new ZipArchiveEntry(this,
|
||||
new SeekableZipFilePart(headerFactory,
|
||||
h as DirectoryEntryHeader,
|
||||
stream));
|
||||
}
|
||||
break;
|
||||
case ZipHeaderType.DirectoryEnd:
|
||||
{
|
||||
byte[] bytes = (h as DirectoryEndHeader).Comment;
|
||||
volume.Comment = ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length);
|
||||
yield break;
|
||||
}
|
||||
{
|
||||
byte[] bytes = (h as DirectoryEndHeader).Comment;
|
||||
volume.Comment = ReaderOptions.ArchiveEncoding.Decode(bytes);
|
||||
yield break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,23 +1,60 @@
|
||||
using System.Text;
|
||||
using System;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common
|
||||
{
|
||||
public static class ArchiveEncoding
|
||||
public class ArchiveEncoding
|
||||
{
|
||||
/// <summary>
|
||||
/// Default encoding to use when archive format doesn't specify one.
|
||||
/// </summary>
|
||||
public static Encoding Default { get; set; }
|
||||
public Encoding Default { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Encoding used by encryption schemes which don't comply with RFC 2898.
|
||||
/// ArchiveEncoding used by encryption schemes which don't comply with RFC 2898.
|
||||
/// </summary>
|
||||
public static Encoding Password { get; set; }
|
||||
public Encoding Password { get; set; }
|
||||
|
||||
static ArchiveEncoding()
|
||||
/// <summary>
|
||||
/// Set this encoding when you want to force it for all encoding operations.
|
||||
/// </summary>
|
||||
public Encoding Forced { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Set this when you want to use a custom method for all decoding operations.
|
||||
/// </summary>
|
||||
/// <returns>string Func(bytes, index, length)</returns>
|
||||
public Func<byte[], int, int, string> CustomDecoder { get; set; }
|
||||
|
||||
public ArchiveEncoding()
|
||||
{
|
||||
Default = Encoding.UTF8;
|
||||
Password = Encoding.UTF8;
|
||||
}
|
||||
|
||||
public string Decode(byte[] bytes)
|
||||
{
|
||||
return Decode(bytes, 0, bytes.Length);
|
||||
}
|
||||
|
||||
public string Decode(byte[] bytes, int start, int length)
|
||||
{
|
||||
return GetDecoder().Invoke(bytes, start, length);
|
||||
}
|
||||
|
||||
public byte[] Encode(string str)
|
||||
{
|
||||
return GetEncoding().GetBytes(str);
|
||||
}
|
||||
|
||||
public Encoding GetEncoding()
|
||||
{
|
||||
return Forced ?? Default ?? Encoding.UTF8;
|
||||
}
|
||||
|
||||
public Func<byte[], int, int, string> GetDecoder()
|
||||
{
|
||||
return CustomDecoder ?? ((bytes, index, count) => (Default ?? Encoding.UTF8).GetString(bytes, index, count));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,7 @@
|
||||
BCJ,
|
||||
BCJ2,
|
||||
LZip,
|
||||
Xz,
|
||||
Unknown
|
||||
}
|
||||
}
|
||||
@@ -4,9 +4,17 @@ namespace SharpCompress.Common
|
||||
{
|
||||
public abstract class FilePart
|
||||
{
|
||||
protected FilePart(ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
|
||||
internal abstract string FilePartName { get; }
|
||||
|
||||
internal abstract Stream GetCompressedStream();
|
||||
internal abstract Stream GetRawStream();
|
||||
internal bool Skipped { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.GZip
|
||||
{
|
||||
@@ -39,9 +40,9 @@ namespace SharpCompress.Common.GZip
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => filePart.AsEnumerable<FilePart>();
|
||||
|
||||
internal static IEnumerable<GZipEntry> GetEntries(Stream stream)
|
||||
internal static IEnumerable<GZipEntry> GetEntries(Stream stream, OptionsBase options)
|
||||
{
|
||||
yield return new GZipEntry(new GZipFilePart(stream));
|
||||
yield return new GZipEntry(new GZipFilePart(stream, options.ArchiveEncoding));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,38 +1,41 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.GZip
|
||||
{
|
||||
internal class GZipFilePart : FilePart
|
||||
{
|
||||
private string name;
|
||||
private readonly Stream stream;
|
||||
private string _name;
|
||||
private readonly Stream _stream;
|
||||
|
||||
internal GZipFilePart(Stream stream)
|
||||
internal GZipFilePart(Stream stream, ArchiveEncoding archiveEncoding)
|
||||
: base(archiveEncoding)
|
||||
{
|
||||
ReadAndValidateGzipHeader(stream);
|
||||
EntryStartPosition = stream.Position;
|
||||
this.stream = stream;
|
||||
this._stream = stream;
|
||||
}
|
||||
|
||||
internal long EntryStartPosition { get; }
|
||||
|
||||
internal DateTime? DateModified { get; private set; }
|
||||
|
||||
internal override string FilePartName => name;
|
||||
internal override string FilePartName => _name;
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
return new DeflateStream(stream, CompressionMode.Decompress, CompressionLevel.Default, false);
|
||||
return new DeflateStream(_stream, CompressionMode.Decompress, CompressionLevel.Default, false);
|
||||
}
|
||||
|
||||
internal override Stream GetRawStream()
|
||||
{
|
||||
return stream;
|
||||
return _stream;
|
||||
}
|
||||
|
||||
private void ReadAndValidateGzipHeader(Stream stream)
|
||||
@@ -66,15 +69,16 @@ namespace SharpCompress.Common.GZip
|
||||
|
||||
Int16 extraLength = (Int16)(header[0] + header[1] * 256);
|
||||
byte[] extra = new byte[extraLength];
|
||||
n = stream.Read(extra, 0, extra.Length);
|
||||
if (n != extraLength)
|
||||
|
||||
if (!stream.ReadFully(extra))
|
||||
{
|
||||
throw new ZlibException("Unexpected end-of-file reading GZIP header.");
|
||||
}
|
||||
n = extraLength;
|
||||
}
|
||||
if ((header[3] & 0x08) == 0x08)
|
||||
{
|
||||
name = ReadZeroTerminatedString(stream);
|
||||
_name = ReadZeroTerminatedString(stream);
|
||||
}
|
||||
if ((header[3] & 0x10) == 0x010)
|
||||
{
|
||||
@@ -86,7 +90,7 @@ namespace SharpCompress.Common.GZip
|
||||
}
|
||||
}
|
||||
|
||||
private static string ReadZeroTerminatedString(Stream stream)
|
||||
private string ReadZeroTerminatedString(Stream stream)
|
||||
{
|
||||
byte[] buf1 = new byte[1];
|
||||
var list = new List<byte>();
|
||||
@@ -109,8 +113,8 @@ namespace SharpCompress.Common.GZip
|
||||
}
|
||||
}
|
||||
while (!done);
|
||||
byte[] a = list.ToArray();
|
||||
return ArchiveEncoding.Default.GetString(a, 0, a.Length);
|
||||
byte[] buffer = list.ToArray();
|
||||
return ArchiveEncoding.Decode(buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
namespace SharpCompress.Common
|
||||
|
||||
namespace SharpCompress.Common
|
||||
{
|
||||
public class OptionsBase
|
||||
{
|
||||
@@ -6,5 +7,7 @@
|
||||
/// SharpCompress will keep the supplied streams open. Default is true.
|
||||
/// </summary>
|
||||
public bool LeaveStreamOpen { get; set; } = true;
|
||||
|
||||
public ArchiveEncoding ArchiveEncoding { get; set; } = new ArchiveEncoding();
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
using SharpCompress.IO;
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
@@ -52,50 +52,50 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
switch (HeaderType)
|
||||
{
|
||||
case HeaderType.FileHeader:
|
||||
{
|
||||
if (FileFlags.HasFlag(FileFlags.UNICODE))
|
||||
{
|
||||
int length = 0;
|
||||
while (length < fileNameBytes.Length
|
||||
&& fileNameBytes[length] != 0)
|
||||
if (FileFlags.HasFlag(FileFlags.UNICODE))
|
||||
{
|
||||
length++;
|
||||
}
|
||||
if (length != nameSize)
|
||||
{
|
||||
length++;
|
||||
FileName = FileNameDecoder.Decode(fileNameBytes, length);
|
||||
int length = 0;
|
||||
while (length < fileNameBytes.Length
|
||||
&& fileNameBytes[length] != 0)
|
||||
{
|
||||
length++;
|
||||
}
|
||||
if (length != nameSize)
|
||||
{
|
||||
length++;
|
||||
FileName = FileNameDecoder.Decode(fileNameBytes, length);
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = ArchiveEncoding.Decode(fileNameBytes);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = DecodeDefault(fileNameBytes);
|
||||
FileName = ArchiveEncoding.Decode(fileNameBytes);
|
||||
}
|
||||
FileName = ConvertPath(FileName, HostOS);
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = DecodeDefault(fileNameBytes);
|
||||
}
|
||||
FileName = ConvertPath(FileName, HostOS);
|
||||
}
|
||||
break;
|
||||
case HeaderType.NewSubHeader:
|
||||
{
|
||||
int datasize = HeaderSize - NEWLHD_SIZE - nameSize;
|
||||
if (FileFlags.HasFlag(FileFlags.SALT))
|
||||
{
|
||||
datasize -= SALT_SIZE;
|
||||
}
|
||||
if (datasize > 0)
|
||||
{
|
||||
SubData = reader.ReadBytes(datasize);
|
||||
}
|
||||
int datasize = HeaderSize - NEWLHD_SIZE - nameSize;
|
||||
if (FileFlags.HasFlag(FileFlags.SALT))
|
||||
{
|
||||
datasize -= SALT_SIZE;
|
||||
}
|
||||
if (datasize > 0)
|
||||
{
|
||||
SubData = reader.ReadBytes(datasize);
|
||||
}
|
||||
|
||||
if (NewSubHeaderType.SUBHEAD_TYPE_RR.Equals(fileNameBytes))
|
||||
{
|
||||
RecoverySectors = SubData[8] + (SubData[9] << 8)
|
||||
+ (SubData[10] << 16) + (SubData[11] << 24);
|
||||
if (NewSubHeaderType.SUBHEAD_TYPE_RR.Equals(fileNameBytes))
|
||||
{
|
||||
RecoverySectors = SubData[8] + (SubData[9] << 8)
|
||||
+ (SubData[10] << 16) + (SubData[11] << 24);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -118,12 +118,6 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
//only the full .net framework will do other code pages than unicode/utf8
|
||||
private string DecodeDefault(byte[] bytes)
|
||||
{
|
||||
return ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length);
|
||||
}
|
||||
|
||||
private long UInt32To64(uint x, uint y)
|
||||
{
|
||||
long l = x;
|
||||
@@ -178,6 +172,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
|
||||
internal long DataStartPosition { get; set; }
|
||||
|
||||
internal HostOS HostOS { get; private set; }
|
||||
|
||||
internal uint FileCRC { get; private set; }
|
||||
@@ -199,6 +194,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
internal FileFlags FileFlags => (FileFlags)Flags;
|
||||
|
||||
internal long CompressedSize { get; private set; }
|
||||
|
||||
internal long UncompressedSize { get; private set; }
|
||||
|
||||
internal string FileName { get; private set; }
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
@@ -17,14 +18,16 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
HeaderSize = baseHeader.HeaderSize;
|
||||
AdditionalSize = baseHeader.AdditionalSize;
|
||||
ReadBytes = baseHeader.ReadBytes;
|
||||
ArchiveEncoding = baseHeader.ArchiveEncoding;
|
||||
}
|
||||
|
||||
internal static RarHeader Create(RarCrcBinaryReader reader)
|
||||
internal static RarHeader Create(RarCrcBinaryReader reader, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
try
|
||||
{
|
||||
RarHeader header = new RarHeader();
|
||||
|
||||
header.ArchiveEncoding = archiveEncoding;
|
||||
reader.Mark();
|
||||
header.ReadStartFromReader(reader);
|
||||
header.ReadBytes += reader.CurrentReadByteCount;
|
||||
@@ -50,7 +53,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
protected virtual void ReadFromReader(MarkingBinaryReader reader) {
|
||||
protected virtual void ReadFromReader(MarkingBinaryReader reader)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
@@ -76,10 +80,11 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
return header;
|
||||
}
|
||||
|
||||
private void VerifyHeaderCrc(ushort crc) {
|
||||
if (HeaderType != HeaderType.MarkHeader)
|
||||
private void VerifyHeaderCrc(ushort crc)
|
||||
{
|
||||
if (HeaderType != HeaderType.MarkHeader)
|
||||
{
|
||||
if (crc != HeadCRC)
|
||||
if (crc != HeadCRC)
|
||||
{
|
||||
throw new InvalidFormatException("rar header crc mismatch");
|
||||
}
|
||||
@@ -106,6 +111,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
protected short HeaderSize { get; private set; }
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; private set; }
|
||||
|
||||
/// <summary>
|
||||
/// This additional size of the header could be file data
|
||||
/// </summary>
|
||||
|
||||
@@ -117,7 +117,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
#if !NO_CRYPTO
|
||||
var reader = new RarCryptoBinaryReader(stream, Options.Password);
|
||||
|
||||
|
||||
if (IsEncrypted)
|
||||
{
|
||||
if (Options.Password == null)
|
||||
@@ -133,7 +133,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
#endif
|
||||
|
||||
RarHeader header = RarHeader.Create(reader);
|
||||
RarHeader header = RarHeader.Create(reader, Options.ArchiveEncoding);
|
||||
if (header == null)
|
||||
{
|
||||
return null;
|
||||
@@ -141,110 +141,110 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
switch (header.HeaderType)
|
||||
{
|
||||
case HeaderType.ArchiveHeader:
|
||||
{
|
||||
var ah = header.PromoteHeader<ArchiveHeader>(reader);
|
||||
IsEncrypted = ah.HasPassword;
|
||||
return ah;
|
||||
}
|
||||
{
|
||||
var ah = header.PromoteHeader<ArchiveHeader>(reader);
|
||||
IsEncrypted = ah.HasPassword;
|
||||
return ah;
|
||||
}
|
||||
case HeaderType.MarkHeader:
|
||||
{
|
||||
return header.PromoteHeader<MarkHeader>(reader);
|
||||
}
|
||||
{
|
||||
return header.PromoteHeader<MarkHeader>(reader);
|
||||
}
|
||||
|
||||
case HeaderType.ProtectHeader:
|
||||
{
|
||||
ProtectHeader ph = header.PromoteHeader<ProtectHeader>(reader);
|
||||
|
||||
// skip the recovery record data, we do not use it.
|
||||
switch (StreamingMode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
reader.BaseStream.Position += ph.DataSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
reader.BaseStream.Skip(ph.DataSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
ProtectHeader ph = header.PromoteHeader<ProtectHeader>(reader);
|
||||
|
||||
return ph;
|
||||
}
|
||||
// skip the recovery record data, we do not use it.
|
||||
switch (StreamingMode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
reader.BaseStream.Position += ph.DataSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
reader.BaseStream.Skip(ph.DataSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
|
||||
return ph;
|
||||
}
|
||||
|
||||
case HeaderType.NewSubHeader:
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
//skip the data because it's useless?
|
||||
reader.BaseStream.Skip(fh.CompressedSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
//skip the data because it's useless?
|
||||
reader.BaseStream.Skip(fh.CompressedSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
case HeaderType.FileHeader:
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
var ms = new ReadOnlySubStream(reader.BaseStream, fh.CompressedSize);
|
||||
if (fh.Salt == null)
|
||||
{
|
||||
fh.PackedStream = ms;
|
||||
}
|
||||
else
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
var ms = new ReadOnlySubStream(reader.BaseStream, fh.CompressedSize);
|
||||
if (fh.Salt == null)
|
||||
{
|
||||
fh.PackedStream = ms;
|
||||
}
|
||||
else
|
||||
{
|
||||
#if !NO_CRYPTO
|
||||
fh.PackedStream = new RarCryptoWrapper(ms, Options.Password, fh.Salt);
|
||||
fh.PackedStream = new RarCryptoWrapper(ms, Options.Password, fh.Salt);
|
||||
#else
|
||||
throw new NotSupportedException("RarCrypto not supported");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
case HeaderType.EndArchiveHeader:
|
||||
{
|
||||
return header.PromoteHeader<EndArchiveHeader>(reader);
|
||||
}
|
||||
{
|
||||
return header.PromoteHeader<EndArchiveHeader>(reader);
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid Rar Header: " + header.HeaderType);
|
||||
}
|
||||
{
|
||||
throw new InvalidFormatException("Invalid Rar Header: " + header.HeaderType);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,7 @@ namespace SharpCompress.Common.Rar
|
||||
internal abstract class RarFilePart : FilePart
|
||||
{
|
||||
internal RarFilePart(MarkHeader mh, FileHeader fh)
|
||||
: base(fh.ArchiveEncoding)
|
||||
{
|
||||
MarkHeader = mh;
|
||||
FileHeader = fh;
|
||||
|
||||
@@ -7,14 +7,15 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
internal class SevenZipFilePart : FilePart
|
||||
{
|
||||
private CompressionType? type;
|
||||
private readonly Stream stream;
|
||||
private readonly ArchiveDatabase database;
|
||||
private CompressionType? _type;
|
||||
private readonly Stream _stream;
|
||||
private readonly ArchiveDatabase _database;
|
||||
|
||||
internal SevenZipFilePart(Stream stream, ArchiveDatabase database, int index, CFileItem fileEntry)
|
||||
internal SevenZipFilePart(Stream stream, ArchiveDatabase database, int index, CFileItem fileEntry, ArchiveEncoding archiveEncoding)
|
||||
: base(archiveEncoding)
|
||||
{
|
||||
this.stream = stream;
|
||||
this.database = database;
|
||||
this._stream = stream;
|
||||
this._database = database;
|
||||
Index = index;
|
||||
Header = fileEntry;
|
||||
if (Header.HasStream)
|
||||
@@ -41,14 +42,14 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
return null;
|
||||
}
|
||||
var folderStream = database.GetFolderStream(stream, Folder, null);
|
||||
var folderStream = _database.GetFolderStream(_stream, Folder, null);
|
||||
|
||||
int firstFileIndex = database.FolderStartFileIndex[database.Folders.IndexOf(Folder)];
|
||||
int firstFileIndex = _database.FolderStartFileIndex[_database.Folders.IndexOf(Folder)];
|
||||
int skipCount = Index - firstFileIndex;
|
||||
long skipSize = 0;
|
||||
for (int i = 0; i < skipCount; i++)
|
||||
{
|
||||
skipSize += database.Files[firstFileIndex + i].Size;
|
||||
skipSize += _database.Files[firstFileIndex + i].Size;
|
||||
}
|
||||
if (skipSize > 0)
|
||||
{
|
||||
@@ -61,11 +62,11 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
get
|
||||
{
|
||||
if (type == null)
|
||||
if (_type == null)
|
||||
{
|
||||
type = GetCompression();
|
||||
_type = GetCompression();
|
||||
}
|
||||
return type.Value;
|
||||
return _type.Value;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +85,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
var coder = Folder.Coders.First();
|
||||
switch (coder.MethodId.Id)
|
||||
{
|
||||
{
|
||||
case k_LZMA:
|
||||
case k_LZMA2:
|
||||
{
|
||||
|
||||
19
src/SharpCompress/Common/Tar/Headers/EntryType.cs
Normal file
19
src/SharpCompress/Common/Tar/Headers/EntryType.cs
Normal file
@@ -0,0 +1,19 @@
|
||||
namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
internal enum EntryType : byte
|
||||
{
|
||||
File = 0,
|
||||
OldFile = (byte)'0',
|
||||
HardLink = (byte)'1',
|
||||
SymLink = (byte)'2',
|
||||
CharDevice = (byte)'3',
|
||||
BlockDevice = (byte)'4',
|
||||
Directory = (byte)'5',
|
||||
Fifo = (byte)'6',
|
||||
LongLink = (byte)'K',
|
||||
LongName = (byte)'L',
|
||||
SparseFile = (byte)'S',
|
||||
VolumeHeader = (byte)'V',
|
||||
GlobalExtendedHeader = (byte)'g'
|
||||
}
|
||||
}
|
||||
275
src/SharpCompress/Common/Tar/Headers/TarHeader.cs
Normal file
275
src/SharpCompress/Common/Tar/Headers/TarHeader.cs
Normal file
@@ -0,0 +1,275 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using SharpCompress.Converters;
|
||||
|
||||
namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
internal class TarHeader
|
||||
{
|
||||
internal static readonly DateTime Epoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
public TarHeader(ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal string Name { get; set; }
|
||||
|
||||
//internal int Mode { get; set; }
|
||||
//internal int UserId { get; set; }
|
||||
//internal string UserName { get; set; }
|
||||
//internal int GroupId { get; set; }
|
||||
//internal string GroupName { get; set; }
|
||||
internal long Size { get; set; }
|
||||
internal DateTime LastModifiedTime { get; set; }
|
||||
internal EntryType EntryType { get; set; }
|
||||
internal Stream PackedStream { get; set; }
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
|
||||
internal const int BlockSize = 512;
|
||||
|
||||
internal void Write(Stream output)
|
||||
{
|
||||
byte[] buffer = new byte[BlockSize];
|
||||
|
||||
WriteOctalBytes(511, buffer, 100, 8); // file mode
|
||||
WriteOctalBytes(0, buffer, 108, 8); // owner ID
|
||||
WriteOctalBytes(0, buffer, 116, 8); // group ID
|
||||
|
||||
//ArchiveEncoding.UTF8.GetBytes("magic").CopyTo(buffer, 257);
|
||||
if (Name.Length > 100)
|
||||
{
|
||||
// Set mock filename and filetype to indicate the next block is the actual name of the file
|
||||
WriteStringBytes("././@LongLink", buffer, 0, 100);
|
||||
buffer[156] = (byte)EntryType.LongName;
|
||||
WriteOctalBytes(Name.Length + 1, buffer, 124, 12);
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteStringBytes(Name, buffer, 0, 100);
|
||||
WriteOctalBytes(Size, buffer, 124, 12);
|
||||
var time = (long)(LastModifiedTime.ToUniversalTime() - Epoch).TotalSeconds;
|
||||
WriteOctalBytes(time, buffer, 136, 12);
|
||||
buffer[156] = (byte)EntryType;
|
||||
|
||||
if (Size >= 0x1FFFFFFFF)
|
||||
{
|
||||
byte[] bytes = DataConverter.BigEndian.GetBytes(Size);
|
||||
var bytes12 = new byte[12];
|
||||
bytes.CopyTo(bytes12, 12 - bytes.Length);
|
||||
bytes12[0] |= 0x80;
|
||||
bytes12.CopyTo(buffer, 124);
|
||||
}
|
||||
}
|
||||
|
||||
int crc = RecalculateChecksum(buffer);
|
||||
WriteOctalBytes(crc, buffer, 148, 8);
|
||||
|
||||
output.Write(buffer, 0, buffer.Length);
|
||||
|
||||
if (Name.Length > 100)
|
||||
{
|
||||
WriteLongFilenameHeader(output);
|
||||
Name = Name.Substring(0, 100);
|
||||
Write(output);
|
||||
}
|
||||
}
|
||||
|
||||
private void WriteLongFilenameHeader(Stream output)
|
||||
{
|
||||
byte[] nameBytes = ArchiveEncoding.Encode(Name);
|
||||
output.Write(nameBytes, 0, nameBytes.Length);
|
||||
|
||||
// pad to multiple of BlockSize bytes, and make sure a terminating null is added
|
||||
int numPaddingBytes = BlockSize - (nameBytes.Length % BlockSize);
|
||||
if (numPaddingBytes == 0)
|
||||
{
|
||||
numPaddingBytes = BlockSize;
|
||||
}
|
||||
output.Write(new byte[numPaddingBytes], 0, numPaddingBytes);
|
||||
}
|
||||
|
||||
internal bool Read(BinaryReader reader)
|
||||
{
|
||||
var buffer = ReadBlock(reader);
|
||||
if (buffer.Length == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ReadEntryType(buffer) == EntryType.LongName)
|
||||
{
|
||||
Name = ReadLongName(reader, buffer);
|
||||
buffer = ReadBlock(reader);
|
||||
}
|
||||
else
|
||||
{
|
||||
Name = ArchiveEncoding.Decode(buffer, 0, 100).TrimNulls();
|
||||
}
|
||||
|
||||
EntryType = ReadEntryType(buffer);
|
||||
Size = ReadSize(buffer);
|
||||
|
||||
//Mode = ReadASCIIInt32Base8(buffer, 100, 7);
|
||||
//UserId = ReadASCIIInt32Base8(buffer, 108, 7);
|
||||
//GroupId = ReadASCIIInt32Base8(buffer, 116, 7);
|
||||
long unixTimeStamp = ReadASCIIInt64Base8(buffer, 136, 11);
|
||||
LastModifiedTime = Epoch.AddSeconds(unixTimeStamp).ToLocalTime();
|
||||
|
||||
Magic = ArchiveEncoding.Decode(buffer, 257, 6).TrimNulls();
|
||||
|
||||
if (!string.IsNullOrEmpty(Magic)
|
||||
&& "ustar".Equals(Magic))
|
||||
{
|
||||
string namePrefix = ArchiveEncoding.Decode(buffer, 345, 157);
|
||||
namePrefix = namePrefix.TrimNulls();
|
||||
if (!string.IsNullOrEmpty(namePrefix))
|
||||
{
|
||||
Name = namePrefix + "/" + Name;
|
||||
}
|
||||
}
|
||||
if (EntryType != EntryType.LongName
|
||||
&& Name.Length == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private string ReadLongName(BinaryReader reader, byte[] buffer)
|
||||
{
|
||||
var size = ReadSize(buffer);
|
||||
var nameLength = (int)size;
|
||||
var nameBytes = reader.ReadBytes(nameLength);
|
||||
var remainingBytesToRead = BlockSize - (nameLength % BlockSize);
|
||||
|
||||
// Read the rest of the block and discard the data
|
||||
if (remainingBytesToRead < BlockSize)
|
||||
{
|
||||
reader.ReadBytes(remainingBytesToRead);
|
||||
}
|
||||
return ArchiveEncoding.Decode(nameBytes, 0, nameBytes.Length).TrimNulls();
|
||||
}
|
||||
|
||||
private static EntryType ReadEntryType(byte[] buffer)
|
||||
{
|
||||
return (EntryType)buffer[156];
|
||||
}
|
||||
|
||||
private long ReadSize(byte[] buffer)
|
||||
{
|
||||
if ((buffer[124] & 0x80) == 0x80) // if size in binary
|
||||
{
|
||||
return DataConverter.BigEndian.GetInt64(buffer, 0x80);
|
||||
}
|
||||
return ReadASCIIInt64Base8(buffer, 124, 11);
|
||||
}
|
||||
|
||||
private static byte[] ReadBlock(BinaryReader reader)
|
||||
{
|
||||
byte[] buffer = reader.ReadBytes(BlockSize);
|
||||
|
||||
if (buffer.Length != 0 && buffer.Length < BlockSize)
|
||||
{
|
||||
throw new InvalidOperationException("Buffer is invalid size");
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
private static void WriteStringBytes(string name, byte[] buffer, int offset, int length)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < length - 1 && i < name.Length; ++i)
|
||||
{
|
||||
buffer[offset + i] = (byte)name[i];
|
||||
}
|
||||
|
||||
for (; i < length; ++i)
|
||||
{
|
||||
buffer[offset + i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
private static void WriteOctalBytes(long value, byte[] buffer, int offset, int length)
|
||||
{
|
||||
string val = Convert.ToString(value, 8);
|
||||
int shift = length - val.Length - 1;
|
||||
for (int i = 0; i < shift; i++)
|
||||
{
|
||||
buffer[offset + i] = (byte)' ';
|
||||
}
|
||||
for (int i = 0; i < val.Length; i++)
|
||||
{
|
||||
buffer[offset + i + shift] = (byte)val[i];
|
||||
}
|
||||
}
|
||||
|
||||
private static int ReadASCIIInt32Base8(byte[] buffer, int offset, int count)
|
||||
{
|
||||
string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls();
|
||||
if (string.IsNullOrEmpty(s))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return Convert.ToInt32(s, 8);
|
||||
}
|
||||
|
||||
private static long ReadASCIIInt64Base8(byte[] buffer, int offset, int count)
|
||||
{
|
||||
string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls();
|
||||
if (string.IsNullOrEmpty(s))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return Convert.ToInt64(s, 8);
|
||||
}
|
||||
|
||||
private static long ReadASCIIInt64(byte[] buffer, int offset, int count)
|
||||
{
|
||||
string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls();
|
||||
if (string.IsNullOrEmpty(s))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return Convert.ToInt64(s);
|
||||
}
|
||||
|
||||
internal static int RecalculateChecksum(byte[] buf)
|
||||
{
|
||||
// Set default value for checksum. That is 8 spaces.
|
||||
Encoding.UTF8.GetBytes(" ").CopyTo(buf, 148);
|
||||
|
||||
// Calculate checksum
|
||||
int headerChecksum = 0;
|
||||
foreach (byte b in buf)
|
||||
{
|
||||
headerChecksum += b;
|
||||
}
|
||||
return headerChecksum;
|
||||
}
|
||||
|
||||
internal static int RecalculateAltChecksum(byte[] buf)
|
||||
{
|
||||
Encoding.UTF8.GetBytes(" ").CopyTo(buf, 148);
|
||||
int headerChecksum = 0;
|
||||
foreach (byte b in buf)
|
||||
{
|
||||
if ((b & 0x80) == 0x80)
|
||||
{
|
||||
headerChecksum -= b ^ 0x80;
|
||||
}
|
||||
else
|
||||
{
|
||||
headerChecksum += b;
|
||||
}
|
||||
}
|
||||
return headerChecksum;
|
||||
}
|
||||
|
||||
public long? DataStartPosition { get; set; }
|
||||
|
||||
public string Magic { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -1,541 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// The TarBuffer class implements the tar archive concept
|
||||
/// of a buffered input stream. This concept goes back to the
|
||||
/// days of blocked tape drives and special io devices. In the
|
||||
/// C# universe, the only real function that this class
|
||||
/// performs is to ensure that files have the correct "record"
|
||||
/// size, or other tars will complain.
|
||||
/// <p>
|
||||
/// You should never have a need to access this class directly.
|
||||
/// TarBuffers are created by Tar IO Streams.
|
||||
/// </p>
|
||||
/// </summary>
|
||||
public class TarBuffer
|
||||
{
|
||||
|
||||
/* A quote from GNU tar man file on blocking and records
|
||||
A `tar' archive file contains a series of blocks. Each block
|
||||
contains `BLOCKSIZE' bytes. Although this format may be thought of as
|
||||
being on magnetic tape, other media are often used.
|
||||
|
||||
Each file archived is represented by a header block which describes
|
||||
the file, followed by zero or more blocks which give the contents of
|
||||
the file. At the end of the archive file there may be a block filled
|
||||
with binary zeros as an end-of-file marker. A reasonable system should
|
||||
write a block of zeros at the end, but must not assume that such a
|
||||
block exists when reading an archive.
|
||||
|
||||
The blocks may be "blocked" for physical I/O operations. Each
|
||||
record of N blocks is written with a single 'write ()'
|
||||
operation. On magnetic tapes, the result of such a write is a single
|
||||
record. When writing an archive, the last record of blocks should be
|
||||
written at the full size, with blocks after the zero block containing
|
||||
all zeros. When reading an archive, a reasonable system should
|
||||
properly handle an archive whose last record is shorter than the rest,
|
||||
or which contains garbage records after a zero block.
|
||||
*/
|
||||
|
||||
#region Constants
|
||||
/// <summary>
|
||||
/// The size of a block in a tar archive in bytes.
|
||||
/// </summary>
|
||||
/// <remarks>This is 512 bytes.</remarks>
|
||||
public const int BlockSize = 512;
|
||||
|
||||
/// <summary>
|
||||
/// The number of blocks in a default record.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The default value is 20 blocks per record.
|
||||
/// </remarks>
|
||||
public const int DefaultBlockFactor = 20;
|
||||
|
||||
/// <summary>
|
||||
/// The size in bytes of a default record.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The default size is 10KB.
|
||||
/// </remarks>
|
||||
public const int DefaultRecordSize = BlockSize * DefaultBlockFactor;
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size for this buffer
|
||||
/// </summary>
|
||||
/// <value>The record size in bytes.
|
||||
/// This is equal to the <see cref="BlockFactor"/> multiplied by the <see cref="BlockSize"/></value>
|
||||
public int RecordSize => recordSize;
|
||||
|
||||
/// <summary>
|
||||
/// Get the TAR Buffer's record size.
|
||||
/// </summary>
|
||||
/// <returns>The record size in bytes.
|
||||
/// This is equal to the <see cref="BlockFactor"/> multiplied by the <see cref="BlockSize"/></returns>
|
||||
[Obsolete("Use RecordSize property instead")]
|
||||
public int GetRecordSize()
|
||||
{
|
||||
return recordSize;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the Blocking factor for the buffer
|
||||
/// </summary>
|
||||
/// <value>This is the number of blocks in each record.</value>
|
||||
public int BlockFactor => blockFactor;
|
||||
|
||||
/// <summary>
|
||||
/// Get the TAR Buffer's block factor
|
||||
/// </summary>
|
||||
/// <returns>The block factor; the number of blocks per record.</returns>
|
||||
[Obsolete("Use BlockFactor property instead")]
|
||||
public int GetBlockFactor()
|
||||
{
|
||||
return blockFactor;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct a default TarBuffer
|
||||
/// </summary>
|
||||
protected TarBuffer()
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Create TarBuffer for reading with default BlockFactor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">Stream to buffer</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for input.</returns>
|
||||
public static TarBuffer CreateInputTarBuffer(Stream inputStream)
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new ArgumentNullException(nameof(inputStream));
|
||||
}
|
||||
|
||||
return CreateInputTarBuffer(inputStream, DefaultBlockFactor);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarBuffer for reading inputStream setting BlockFactor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">Stream to buffer</param>
|
||||
/// <param name="blockFactor">Blocking factor to apply</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for input.</returns>
|
||||
public static TarBuffer CreateInputTarBuffer(Stream inputStream, int blockFactor)
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new ArgumentNullException(nameof(inputStream));
|
||||
}
|
||||
|
||||
if (blockFactor <= 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(blockFactor), "Factor cannot be negative");
|
||||
}
|
||||
|
||||
var tarBuffer = new TarBuffer
|
||||
{
|
||||
inputStream = inputStream,
|
||||
outputStream = null
|
||||
};
|
||||
tarBuffer.Initialize(blockFactor);
|
||||
|
||||
return tarBuffer;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarBuffer for writing with default BlockFactor
|
||||
/// </summary>
|
||||
/// <param name="outputStream">output stream for buffer</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for output.</returns>
|
||||
public static TarBuffer CreateOutputTarBuffer(Stream outputStream)
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new ArgumentNullException(nameof(outputStream));
|
||||
}
|
||||
|
||||
return CreateOutputTarBuffer(outputStream, DefaultBlockFactor);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarBuffer for writing Tar output to streams.
|
||||
/// </summary>
|
||||
/// <param name="outputStream">Output stream to write to.</param>
|
||||
/// <param name="blockFactor">Blocking factor to apply</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for output.</returns>
|
||||
public static TarBuffer CreateOutputTarBuffer(Stream outputStream, int blockFactor)
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new ArgumentNullException(nameof(outputStream));
|
||||
}
|
||||
|
||||
if (blockFactor <= 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(blockFactor), "Factor cannot be negative");
|
||||
}
|
||||
|
||||
var tarBuffer = new TarBuffer();
|
||||
tarBuffer.inputStream = null;
|
||||
tarBuffer.outputStream = outputStream;
|
||||
tarBuffer.Initialize(blockFactor);
|
||||
|
||||
return tarBuffer;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Initialization common to all constructors.
|
||||
/// </summary>
|
||||
void Initialize(int archiveBlockFactor)
|
||||
{
|
||||
blockFactor = archiveBlockFactor;
|
||||
recordSize = archiveBlockFactor * BlockSize;
|
||||
recordBuffer = new byte[RecordSize];
|
||||
|
||||
if (inputStream != null) {
|
||||
currentRecordIndex = -1;
|
||||
currentBlockIndex = BlockFactor;
|
||||
} else {
|
||||
currentRecordIndex = 0;
|
||||
currentBlockIndex = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Determine if an archive block indicates End of Archive. End of
|
||||
/// archive is indicated by a block that consists entirely of null bytes.
|
||||
/// All remaining blocks for the record should also be null's
|
||||
/// However some older tars only do a couple of null blocks (Old GNU tar for one)
|
||||
/// and also partial records
|
||||
/// </summary>
|
||||
/// <param name = "block">The data block to check.</param>
|
||||
/// <returns>Returns true if the block is an EOF block; false otherwise.</returns>
|
||||
[Obsolete("Use IsEndOfArchiveBlock instead")]
|
||||
public bool IsEOFBlock(byte[] block)
|
||||
{
|
||||
if (block == null) {
|
||||
throw new ArgumentNullException(nameof(block));
|
||||
}
|
||||
|
||||
if (block.Length != BlockSize) {
|
||||
throw new ArgumentException("block length is invalid");
|
||||
}
|
||||
|
||||
for (int i = 0; i < BlockSize; ++i) {
|
||||
if (block[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Determine if an archive block indicates the End of an Archive has been reached.
|
||||
/// End of archive is indicated by a block that consists entirely of null bytes.
|
||||
/// All remaining blocks for the record should also be null's
|
||||
/// However some older tars only do a couple of null blocks (Old GNU tar for one)
|
||||
/// and also partial records
|
||||
/// </summary>
|
||||
/// <param name = "block">The data block to check.</param>
|
||||
/// <returns>Returns true if the block is an EOF block; false otherwise.</returns>
|
||||
public static bool IsEndOfArchiveBlock(byte[] block)
|
||||
{
|
||||
if (block == null) {
|
||||
throw new ArgumentNullException(nameof(block));
|
||||
}
|
||||
|
||||
if (block.Length != BlockSize) {
|
||||
throw new ArgumentException("block length is invalid");
|
||||
}
|
||||
|
||||
for (int i = 0; i < BlockSize; ++i) {
|
||||
if (block[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Skip over a block on the input stream.
|
||||
/// </summary>
|
||||
public void SkipBlock()
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new TarException("no input stream defined");
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
if (!ReadRecord()) {
|
||||
throw new TarException("Failed to read a record");
|
||||
}
|
||||
}
|
||||
|
||||
currentBlockIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read a block from the input stream.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The block of data read.
|
||||
/// </returns>
|
||||
public byte[] ReadBlock()
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new TarException("TarBuffer.ReadBlock - no input stream defined");
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
if (!ReadRecord()) {
|
||||
throw new TarException("Failed to read a record");
|
||||
}
|
||||
}
|
||||
|
||||
byte[] result = new byte[BlockSize];
|
||||
|
||||
Array.Copy(recordBuffer, (currentBlockIndex * BlockSize), result, 0, BlockSize);
|
||||
currentBlockIndex++;
|
||||
return result;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read a record from data stream.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// false if End-Of-File, else true.
|
||||
/// </returns>
|
||||
bool ReadRecord()
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new TarException("no input stream stream defined");
|
||||
}
|
||||
|
||||
currentBlockIndex = 0;
|
||||
|
||||
int offset = 0;
|
||||
int bytesNeeded = RecordSize;
|
||||
|
||||
while (bytesNeeded > 0) {
|
||||
long numBytes = inputStream.Read(recordBuffer, offset, bytesNeeded);
|
||||
|
||||
//
|
||||
// NOTE
|
||||
// We have found EOF, and the record is not full!
|
||||
//
|
||||
// This is a broken archive. It does not follow the standard
|
||||
// blocking algorithm. However, because we are generous, and
|
||||
// it requires little effort, we will simply ignore the error
|
||||
// and continue as if the entire record were read. This does
|
||||
// not appear to break anything upstream. We used to return
|
||||
// false in this case.
|
||||
//
|
||||
// Thanks to 'Yohann.Roussel@alcatel.fr' for this fix.
|
||||
//
|
||||
if (numBytes <= 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
offset += (int)numBytes;
|
||||
bytesNeeded -= (int)numBytes;
|
||||
}
|
||||
|
||||
currentRecordIndex++;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the current block number, within the current record, zero based.
|
||||
/// </summary>
|
||||
/// <remarks>Block numbers are zero based values</remarks>
|
||||
/// <seealso cref="RecordSize"/>
|
||||
public int CurrentBlock => currentBlockIndex;
|
||||
|
||||
/// <summary>
|
||||
/// Get/set flag indicating ownership of the underlying stream.
|
||||
/// When the flag is true <see cref="Close"></see> will close the underlying stream also.
|
||||
/// </summary>
|
||||
public bool IsStreamOwner {
|
||||
get => isStreamOwner_;
|
||||
set => isStreamOwner_ = value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the current block number, within the current record, zero based.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The current zero based block number.
|
||||
/// </returns>
|
||||
/// <remarks>
|
||||
/// The absolute block number = (<see cref="GetCurrentRecordNum">record number</see> * <see cref="BlockFactor">block factor</see>) + <see cref="GetCurrentBlockNum">block number</see>.
|
||||
/// </remarks>
|
||||
[Obsolete("Use CurrentBlock property instead")]
|
||||
public int GetCurrentBlockNum()
|
||||
{
|
||||
return currentBlockIndex;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the current record number.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The current zero based record number.
|
||||
/// </returns>
|
||||
public int CurrentRecord => currentRecordIndex;
|
||||
|
||||
/// <summary>
|
||||
/// Get the current record number.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The current zero based record number.
|
||||
/// </returns>
|
||||
[Obsolete("Use CurrentRecord property instead")]
|
||||
public int GetCurrentRecordNum()
|
||||
{
|
||||
return currentRecordIndex;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write a block of data to the archive.
|
||||
/// </summary>
|
||||
/// <param name="block">
|
||||
/// The data to write to the archive.
|
||||
/// </param>
|
||||
public void WriteBlock(byte[] block)
|
||||
{
|
||||
if (block == null) {
|
||||
throw new ArgumentNullException(nameof(block));
|
||||
}
|
||||
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteBlock - no output stream defined");
|
||||
}
|
||||
|
||||
if (block.Length != BlockSize) {
|
||||
string errorText = string.Format("TarBuffer.WriteBlock - block to write has length '{0}' which is not the block size of '{1}'",
|
||||
block.Length, BlockSize);
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
WriteRecord();
|
||||
}
|
||||
|
||||
Array.Copy(block, 0, recordBuffer, (currentBlockIndex * BlockSize), BlockSize);
|
||||
currentBlockIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write an archive record to the archive, where the record may be
|
||||
/// inside of a larger array buffer. The buffer must be "offset plus
|
||||
/// record size" long.
|
||||
/// </summary>
|
||||
/// <param name="buffer">
|
||||
/// The buffer containing the record data to write.
|
||||
/// </param>
|
||||
/// <param name="offset">
|
||||
/// The offset of the record data within buffer.
|
||||
/// </param>
|
||||
public void WriteBlock(byte[] buffer, int offset)
|
||||
{
|
||||
if (buffer == null) {
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteBlock - no output stream stream defined");
|
||||
}
|
||||
|
||||
if ((offset < 0) || (offset >= buffer.Length)) {
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
|
||||
if ((offset + BlockSize) > buffer.Length) {
|
||||
string errorText = string.Format("TarBuffer.WriteBlock - record has length '{0}' with offset '{1}' which is less than the record size of '{2}'",
|
||||
buffer.Length, offset, recordSize);
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
WriteRecord();
|
||||
}
|
||||
|
||||
Array.Copy(buffer, offset, recordBuffer, (currentBlockIndex * BlockSize), BlockSize);
|
||||
|
||||
currentBlockIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write a TarBuffer record to the archive.
|
||||
/// </summary>
|
||||
void WriteRecord()
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteRecord no output stream defined");
|
||||
}
|
||||
|
||||
outputStream.Write(recordBuffer, 0, RecordSize);
|
||||
outputStream.Flush();
|
||||
|
||||
currentBlockIndex = 0;
|
||||
currentRecordIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// WriteFinalRecord writes the current record buffer to output any unwritten data is present.
|
||||
/// </summary>
|
||||
/// <remarks>Any trailing bytes are set to zero which is by definition correct behaviour
|
||||
/// for the end of a tar stream.</remarks>
|
||||
void WriteFinalRecord()
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteFinalRecord no output stream defined");
|
||||
}
|
||||
|
||||
if (currentBlockIndex > 0) {
|
||||
int dataBytes = currentBlockIndex * BlockSize;
|
||||
Array.Clear(recordBuffer, dataBytes, RecordSize - dataBytes);
|
||||
WriteRecord();
|
||||
}
|
||||
|
||||
outputStream.Flush();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Close the TarBuffer. If this is an output buffer, also flush the
|
||||
/// current block before closing.
|
||||
/// </summary>
|
||||
public void Close()
|
||||
{
|
||||
if (outputStream != null) {
|
||||
WriteFinalRecord();
|
||||
|
||||
if (isStreamOwner_) {
|
||||
outputStream.Dispose();
|
||||
}
|
||||
outputStream = null;
|
||||
} else if (inputStream != null) {
|
||||
if (isStreamOwner_) {
|
||||
inputStream.Dispose();
|
||||
}
|
||||
inputStream = null;
|
||||
}
|
||||
}
|
||||
|
||||
#region Instance Fields
|
||||
Stream inputStream;
|
||||
Stream outputStream;
|
||||
|
||||
byte[] recordBuffer;
|
||||
int currentBlockIndex;
|
||||
int currentRecordIndex;
|
||||
|
||||
int recordSize = DefaultRecordSize;
|
||||
int blockFactor = DefaultBlockFactor;
|
||||
bool isStreamOwner_ = true;
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,9 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
@@ -15,8 +17,6 @@ namespace SharpCompress.Common.Tar
|
||||
CompressionType = type;
|
||||
}
|
||||
|
||||
internal TarHeader Header => filePart.Header;
|
||||
|
||||
public override CompressionType CompressionType { get; }
|
||||
|
||||
public override long Crc => 0;
|
||||
@@ -27,7 +27,7 @@ namespace SharpCompress.Common.Tar
|
||||
|
||||
public override long Size => filePart.Header.Size;
|
||||
|
||||
public override DateTime? LastModifiedTime => filePart.Header.ModTime;
|
||||
public override DateTime? LastModifiedTime => filePart.Header.LastModifiedTime;
|
||||
|
||||
public override DateTime? CreatedTime => null;
|
||||
|
||||
@@ -37,27 +37,26 @@ namespace SharpCompress.Common.Tar
|
||||
|
||||
public override bool IsEncrypted => false;
|
||||
|
||||
public override bool IsDirectory => filePart.Header.TypeFlag == TarHeader.LF_DIR;
|
||||
public override bool IsDirectory => filePart.Header.EntryType == EntryType.Directory;
|
||||
|
||||
public override bool IsSplit => false;
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => filePart.AsEnumerable<FilePart>();
|
||||
|
||||
internal static IEnumerable<TarEntry> GetEntries(StreamingMode mode, Stream stream,
|
||||
CompressionType compressionType)
|
||||
CompressionType compressionType, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
using (var tarStream = new TarInputStream(stream))
|
||||
foreach (TarHeader h in TarHeaderFactory.ReadHeader(mode, stream, archiveEncoding))
|
||||
{
|
||||
TarHeader header = null;
|
||||
while ((header = tarStream.GetNextEntry()) != null)
|
||||
if (h != null)
|
||||
{
|
||||
if (mode == StreamingMode.Seekable)
|
||||
{
|
||||
yield return new TarEntry(new TarFilePart(header, stream), compressionType);
|
||||
yield return new TarEntry(new TarFilePart(h, stream), compressionType);
|
||||
}
|
||||
else
|
||||
{
|
||||
yield return new TarEntry(new TarFilePart(header, null), compressionType);
|
||||
yield return new TarEntry(new TarFilePart(h, null), compressionType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// TarException represents exceptions specific to Tar classes and code.
|
||||
/// </summary>
|
||||
public class TarException : ArchiveException
|
||||
{
|
||||
/// <summary>
|
||||
/// Initialise a new instance of <see cref="TarException" /> with its message string.
|
||||
/// </summary>
|
||||
/// <param name="message">A <see cref="string"/> that describes the error.</param>
|
||||
public TarException(string message)
|
||||
: base(message)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,17 @@
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal class TarFilePart : FilePart
|
||||
{
|
||||
private readonly Stream seekableStream;
|
||||
private readonly Stream _seekableStream;
|
||||
|
||||
internal TarFilePart(TarHeader header, Stream seekableStream)
|
||||
: base(header.ArchiveEncoding)
|
||||
{
|
||||
this.seekableStream = seekableStream;
|
||||
this._seekableStream = seekableStream;
|
||||
Header = header;
|
||||
}
|
||||
|
||||
@@ -19,10 +21,10 @@ namespace SharpCompress.Common.Tar
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (seekableStream != null)
|
||||
if (_seekableStream != null)
|
||||
{
|
||||
seekableStream.Position = Header.DataStartPosition.Value;
|
||||
return new ReadOnlySubStream(seekableStream, Header.Size);
|
||||
_seekableStream.Position = Header.DataStartPosition.Value;
|
||||
return new ReadOnlySubStream(_seekableStream, Header.Size);
|
||||
}
|
||||
return Header.PackedStream;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
64
src/SharpCompress/Common/Tar/TarHeaderFactory.cs
Normal file
64
src/SharpCompress/Common/Tar/TarHeaderFactory.cs
Normal file
@@ -0,0 +1,64 @@
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal static class TarHeaderFactory
|
||||
{
|
||||
internal static IEnumerable<TarHeader> ReadHeader(StreamingMode mode, Stream stream, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
TarHeader header = null;
|
||||
try
|
||||
{
|
||||
BinaryReader reader = new BinaryReader(stream);
|
||||
header = new TarHeader(archiveEncoding);
|
||||
|
||||
if (!header.Read(reader))
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
switch (mode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
header.DataStartPosition = reader.BaseStream.Position;
|
||||
|
||||
//skip to nearest 512
|
||||
reader.BaseStream.Position += PadTo512(header.Size);
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
header.PackedStream = new TarReadOnlySubStream(stream, header.Size);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
}
|
||||
catch
|
||||
{
|
||||
header = null;
|
||||
}
|
||||
yield return header;
|
||||
}
|
||||
}
|
||||
|
||||
private static long PadTo512(long size)
|
||||
{
|
||||
int zeros = (int)(size % 512);
|
||||
if (zeros == 0)
|
||||
{
|
||||
return size;
|
||||
}
|
||||
return 512 - zeros + size;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,547 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// The TarInputStream reads a UNIX tar archive as an InputStream.
|
||||
/// methods are provided to position at each successive entry in
|
||||
/// the archive, and the read each entry as a normal input stream
|
||||
/// using read().
|
||||
/// </summary>
|
||||
public class TarInputStream : Stream
|
||||
{
|
||||
#region Constructors
|
||||
|
||||
/// <summary>
|
||||
/// Construct a TarInputStream with default block factor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">stream to source data from</param>
|
||||
public TarInputStream(Stream inputStream)
|
||||
: this(inputStream, TarBuffer.DefaultBlockFactor)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct a TarInputStream with user specified block factor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">stream to source data from</param>
|
||||
/// <param name="blockFactor">block factor to apply to archive</param>
|
||||
public TarInputStream(Stream inputStream, int blockFactor)
|
||||
{
|
||||
this.inputStream = inputStream;
|
||||
tarBuffer = TarBuffer.CreateInputTarBuffer(inputStream, blockFactor);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
/// Get/set flag indicating ownership of the underlying stream.
|
||||
/// When the flag is true <see cref="Close"></see> will close the underlying stream also.
|
||||
/// </summary>
|
||||
public bool IsStreamOwner { get => tarBuffer.IsStreamOwner; set => tarBuffer.IsStreamOwner = value; }
|
||||
|
||||
#region Stream Overrides
|
||||
|
||||
/// <summary>
|
||||
/// Gets a value indicating whether the current stream supports reading
|
||||
/// </summary>
|
||||
public override bool CanRead => inputStream.CanRead;
|
||||
|
||||
/// <summary>
|
||||
/// Gets a value indicating whether the current stream supports seeking
|
||||
/// This property always returns false.
|
||||
/// </summary>
|
||||
public override bool CanSeek => false;
|
||||
|
||||
/// <summary>
|
||||
/// Gets a value indicating if the stream supports writing.
|
||||
/// This property always returns false.
|
||||
/// </summary>
|
||||
public override bool CanWrite => false;
|
||||
|
||||
/// <summary>
|
||||
/// The length in bytes of the stream
|
||||
/// </summary>
|
||||
public override long Length => inputStream.Length;
|
||||
|
||||
/// <summary>
|
||||
/// Gets or sets the position within the stream.
|
||||
/// Setting the Position is not supported and throws a NotSupportedExceptionNotSupportedException
|
||||
/// </summary>
|
||||
/// <exception cref="NotSupportedException">Any attempt to set position</exception>
|
||||
public override long Position { get => inputStream.Position; set => throw new NotSupportedException("TarInputStream Seek not supported"); }
|
||||
|
||||
/// <summary>
|
||||
/// Flushes the baseInputStream
|
||||
/// </summary>
|
||||
public override void Flush()
|
||||
{
|
||||
inputStream.Flush();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Set the streams position. This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="offset">The offset relative to the origin to seek to.</param>
|
||||
/// <param name="origin">The <see cref="SeekOrigin"/> to start seeking from.</param>
|
||||
/// <returns>The new position in the stream.</returns>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream Seek not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the length of the stream
|
||||
/// This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="value">The new stream length.</param>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream SetLength not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes a block of bytes to this stream using data from a buffer.
|
||||
/// This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="buffer">The buffer containing bytes to write.</param>
|
||||
/// <param name="offset">The offset in the buffer of the frist byte to write.</param>
|
||||
/// <param name="count">The number of bytes to write.</param>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream Write not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes a byte to the current position in the file stream.
|
||||
/// This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="value">The byte value to write.</param>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override void WriteByte(byte value)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream WriteByte not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads a byte from the current tar archive entry.
|
||||
/// </summary>
|
||||
/// <returns>A byte cast to an int; -1 if the at the end of the stream.</returns>
|
||||
public override int ReadByte()
|
||||
{
|
||||
byte[] oneByteBuffer = new byte[1];
|
||||
int num = Read(oneByteBuffer, 0, 1);
|
||||
if (num <= 0)
|
||||
{
|
||||
// return -1 to indicate that no byte was read.
|
||||
return -1;
|
||||
}
|
||||
return oneByteBuffer[0];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads bytes from the current tar archive entry.
|
||||
///
|
||||
/// This method is aware of the boundaries of the current
|
||||
/// entry in the archive and will deal with them appropriately
|
||||
/// </summary>
|
||||
/// <param name="buffer">
|
||||
/// The buffer into which to place bytes read.
|
||||
/// </param>
|
||||
/// <param name="offset">
|
||||
/// The offset at which to place bytes read.
|
||||
/// </param>
|
||||
/// <param name="count">
|
||||
/// The number of bytes to read.
|
||||
/// </param>
|
||||
/// <returns>
|
||||
/// The number of bytes read, or 0 at end of stream/EOF.
|
||||
/// </returns>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
int totalRead = 0;
|
||||
|
||||
if (entryOffset >= entrySize)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
long numToRead = count;
|
||||
|
||||
if ((numToRead + entryOffset) > entrySize)
|
||||
{
|
||||
numToRead = entrySize - entryOffset;
|
||||
}
|
||||
|
||||
if (readBuffer != null)
|
||||
{
|
||||
int sz = (numToRead > readBuffer.Length) ? readBuffer.Length : (int)numToRead;
|
||||
|
||||
Array.Copy(readBuffer, 0, buffer, offset, sz);
|
||||
|
||||
if (sz >= readBuffer.Length)
|
||||
{
|
||||
readBuffer = null;
|
||||
}
|
||||
else
|
||||
{
|
||||
int newLen = readBuffer.Length - sz;
|
||||
byte[] newBuf = new byte[newLen];
|
||||
Array.Copy(readBuffer, sz, newBuf, 0, newLen);
|
||||
readBuffer = newBuf;
|
||||
}
|
||||
|
||||
totalRead += sz;
|
||||
numToRead -= sz;
|
||||
offset += sz;
|
||||
}
|
||||
|
||||
while (numToRead > 0)
|
||||
{
|
||||
byte[] rec = tarBuffer.ReadBlock();
|
||||
if (rec == null)
|
||||
{
|
||||
// Unexpected EOF!
|
||||
throw new TarException("unexpected EOF with " + numToRead + " bytes unread");
|
||||
}
|
||||
|
||||
var sz = (int)numToRead;
|
||||
int recLen = rec.Length;
|
||||
|
||||
if (recLen > sz)
|
||||
{
|
||||
Array.Copy(rec, 0, buffer, offset, sz);
|
||||
readBuffer = new byte[recLen - sz];
|
||||
Array.Copy(rec, sz, readBuffer, 0, recLen - sz);
|
||||
}
|
||||
else
|
||||
{
|
||||
sz = recLen;
|
||||
Array.Copy(rec, 0, buffer, offset, recLen);
|
||||
}
|
||||
|
||||
totalRead += sz;
|
||||
numToRead -= sz;
|
||||
offset += sz;
|
||||
}
|
||||
|
||||
entryOffset += totalRead;
|
||||
|
||||
return totalRead;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Closes this stream. Calls the TarBuffer's close() method.
|
||||
/// The underlying stream is closed by the TarBuffer.
|
||||
/// </summary>
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (disposing)
|
||||
{
|
||||
tarBuffer.Close();
|
||||
}
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
public int RecordSize => tarBuffer.RecordSize;
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// TarBuffer record size.
|
||||
/// </returns>
|
||||
[Obsolete("Use RecordSize property instead")]
|
||||
public int GetRecordSize()
|
||||
{
|
||||
return tarBuffer.RecordSize;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the available data that can be read from the current
|
||||
/// entry in the archive. This does not indicate how much data
|
||||
/// is left in the entire archive, only in the current entry.
|
||||
/// This value is determined from the entry's size header field
|
||||
/// and the amount of data already read from the current entry.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The number of available bytes for the current entry.
|
||||
/// </returns>
|
||||
public long Available => entrySize - entryOffset;
|
||||
|
||||
/// <summary>
|
||||
/// Skip bytes in the input buffer. This skips bytes in the
|
||||
/// current entry's data, not the entire archive, and will
|
||||
/// stop at the end of the current entry's data if the number
|
||||
/// to skip extends beyond that point.
|
||||
/// </summary>
|
||||
/// <param name="skipCount">
|
||||
/// The number of bytes to skip.
|
||||
/// </param>
|
||||
public void Skip(long skipCount)
|
||||
{
|
||||
// TODO: REVIEW efficiency of TarInputStream.Skip
|
||||
// This is horribly inefficient, but it ensures that we
|
||||
// properly skip over bytes via the TarBuffer...
|
||||
//
|
||||
byte[] skipBuf = new byte[8 * 1024];
|
||||
|
||||
for (long num = skipCount; num > 0;)
|
||||
{
|
||||
int toRead = num > skipBuf.Length ? skipBuf.Length : (int)num;
|
||||
int numRead = Read(skipBuf, 0, toRead);
|
||||
|
||||
if (numRead == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
num -= numRead;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Return a value of true if marking is supported; false otherwise.
|
||||
/// </summary>
|
||||
/// <remarks>Currently marking is not supported, the return value is always false.</remarks>
|
||||
public bool IsMarkSupported => false;
|
||||
|
||||
/// <summary>
|
||||
/// Since we do not support marking just yet, we do nothing.
|
||||
/// </summary>
|
||||
/// <param name ="markLimit">
|
||||
/// The limit to mark.
|
||||
/// </param>
|
||||
public void Mark(int markLimit)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Since we do not support marking just yet, we do nothing.
|
||||
/// </summary>
|
||||
public void Reset()
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the next entry in this tar archive. This will skip
|
||||
/// over any remaining data in the current entry, if there
|
||||
/// is one, and place the input stream at the header of the
|
||||
/// next entry, and read the header and instantiate a new
|
||||
/// TarEntry from the header bytes and return that entry.
|
||||
/// If there are no more entries in the archive, null will
|
||||
/// be returned to indicate that the end of the archive has
|
||||
/// been reached.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The next TarEntry in the archive, or null.
|
||||
/// </returns>
|
||||
public TarHeader GetNextEntry()
|
||||
{
|
||||
if (hasHitEOF)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
if (currentEntry != null)
|
||||
{
|
||||
SkipToNextEntry();
|
||||
}
|
||||
|
||||
byte[] headerBuf = tarBuffer.ReadBlock();
|
||||
|
||||
if (headerBuf == null)
|
||||
{
|
||||
hasHitEOF = true;
|
||||
}
|
||||
else
|
||||
hasHitEOF |= TarBuffer.IsEndOfArchiveBlock(headerBuf);
|
||||
|
||||
if (hasHitEOF)
|
||||
{
|
||||
currentEntry = null;
|
||||
}
|
||||
else
|
||||
{
|
||||
try
|
||||
{
|
||||
var header = new TarHeader();
|
||||
header.ParseBuffer(headerBuf);
|
||||
if (!header.IsChecksumValid)
|
||||
{
|
||||
throw new TarException("Header checksum is invalid");
|
||||
}
|
||||
this.entryOffset = 0;
|
||||
this.entrySize = header.Size;
|
||||
|
||||
StringBuilder longName = null;
|
||||
|
||||
if (header.TypeFlag == TarHeader.LF_GNU_LONGNAME)
|
||||
{
|
||||
byte[] nameBuffer = new byte[TarBuffer.BlockSize];
|
||||
long numToRead = this.entrySize;
|
||||
|
||||
longName = new StringBuilder();
|
||||
|
||||
while (numToRead > 0)
|
||||
{
|
||||
int numRead = this.Read(nameBuffer, 0, (numToRead > nameBuffer.Length ? nameBuffer.Length : (int)numToRead));
|
||||
|
||||
if (numRead == -1)
|
||||
{
|
||||
throw new TarException("Failed to read long name entry");
|
||||
}
|
||||
|
||||
longName.Append(TarHeader.ParseName(nameBuffer, 0, numRead).ToString());
|
||||
numToRead -= numRead;
|
||||
}
|
||||
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag == TarHeader.LF_GHDR)
|
||||
{
|
||||
// POSIX global extended header
|
||||
// Ignore things we dont understand completely for now
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag == TarHeader.LF_XHDR)
|
||||
{
|
||||
// POSIX extended header
|
||||
// Ignore things we dont understand completely for now
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag == TarHeader.LF_GNU_VOLHDR)
|
||||
{
|
||||
// TODO: could show volume name when verbose
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag != TarHeader.LF_NORMAL &&
|
||||
header.TypeFlag != TarHeader.LF_OLDNORM &&
|
||||
header.TypeFlag != TarHeader.LF_LINK &&
|
||||
header.TypeFlag != TarHeader.LF_SYMLINK &&
|
||||
header.TypeFlag != TarHeader.LF_DIR)
|
||||
{
|
||||
// Ignore things we dont understand completely for now
|
||||
SkipToNextEntry();
|
||||
headerBuf = tarBuffer.ReadBlock();
|
||||
}
|
||||
currentEntry = new TarHeader();
|
||||
|
||||
if (longName != null)
|
||||
{
|
||||
currentEntry.Name = longName.ToString();
|
||||
}
|
||||
|
||||
// Magic was checked here for 'ustar' but there are multiple valid possibilities
|
||||
// so this is not done anymore.
|
||||
|
||||
entryOffset = 0;
|
||||
|
||||
// TODO: Review How do we resolve this discrepancy?!
|
||||
entrySize = this.currentEntry.Size;
|
||||
}
|
||||
catch (TarException ex)
|
||||
{
|
||||
entrySize = 0;
|
||||
entryOffset = 0;
|
||||
currentEntry = null;
|
||||
string errorText = $"Bad header in record {tarBuffer.CurrentRecord} block {tarBuffer.CurrentBlock} {ex.Message}";
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
}
|
||||
return currentEntry;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Copies the contents of the current tar archive entry directly into
|
||||
/// an output stream.
|
||||
/// </summary>
|
||||
/// <param name="outputStream">
|
||||
/// The OutputStream into which to write the entry's data.
|
||||
/// </param>
|
||||
public void CopyEntryContents(Stream outputStream)
|
||||
{
|
||||
byte[] tempBuffer = new byte[32 * 1024];
|
||||
|
||||
while (true)
|
||||
{
|
||||
int numRead = Read(tempBuffer, 0, tempBuffer.Length);
|
||||
if (numRead <= 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
outputStream.Write(tempBuffer, 0, numRead);
|
||||
}
|
||||
}
|
||||
|
||||
private void SkipToNextEntry()
|
||||
{
|
||||
long numToSkip = entrySize - entryOffset;
|
||||
|
||||
if (numToSkip > 0)
|
||||
{
|
||||
Skip(numToSkip);
|
||||
}
|
||||
|
||||
readBuffer = null;
|
||||
}
|
||||
|
||||
#region Instance Fields
|
||||
|
||||
/// <summary>
|
||||
/// Flag set when last block has been read
|
||||
/// </summary>
|
||||
protected bool hasHitEOF;
|
||||
|
||||
/// <summary>
|
||||
/// Size of this entry as recorded in header
|
||||
/// </summary>
|
||||
protected long entrySize;
|
||||
|
||||
/// <summary>
|
||||
/// Number of bytes read for this entry so far
|
||||
/// </summary>
|
||||
protected long entryOffset;
|
||||
|
||||
/// <summary>
|
||||
/// Buffer used with calls to <code>Read()</code>
|
||||
/// </summary>
|
||||
protected byte[] readBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// Working buffer
|
||||
/// </summary>
|
||||
protected TarBuffer tarBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// Current entry being read
|
||||
/// </summary>
|
||||
private TarHeader currentEntry;
|
||||
|
||||
/// <summary>
|
||||
/// Stream used as the source of input data.
|
||||
/// </summary>
|
||||
private readonly Stream inputStream;
|
||||
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
@@ -1,417 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// The TarOutputStream writes a UNIX tar archive as an OutputStream.
|
||||
/// Methods are provided to put entries, and then write their contents
|
||||
/// by writing to this stream using write().
|
||||
/// </summary>
|
||||
/// public
|
||||
public class TarOutputStream : Stream
|
||||
{
|
||||
#region Constructors
|
||||
/// <summary>
|
||||
/// Construct TarOutputStream using default block factor
|
||||
/// </summary>
|
||||
/// <param name="outputStream">stream to write to</param>
|
||||
public TarOutputStream(Stream outputStream)
|
||||
: this(outputStream, TarBuffer.DefaultBlockFactor)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarOutputStream with user specified block factor
|
||||
/// </summary>
|
||||
/// <param name="outputStream">stream to write to</param>
|
||||
/// <param name="blockFactor">blocking factor</param>
|
||||
public TarOutputStream(Stream outputStream, int blockFactor)
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new ArgumentNullException(nameof(outputStream));
|
||||
}
|
||||
|
||||
this.outputStream = outputStream;
|
||||
buffer = TarBuffer.CreateOutputTarBuffer(outputStream, blockFactor);
|
||||
|
||||
assemblyBuffer = new byte[TarBuffer.BlockSize];
|
||||
blockBuffer = new byte[TarBuffer.BlockSize];
|
||||
}
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
/// Get/set flag indicating ownership of the underlying stream.
|
||||
/// When the flag is true <see cref="Close"></see> will close the underlying stream also.
|
||||
/// </summary>
|
||||
public bool IsStreamOwner {
|
||||
get => buffer.IsStreamOwner;
|
||||
set => buffer.IsStreamOwner = value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// true if the stream supports reading; otherwise, false.
|
||||
/// </summary>
|
||||
public override bool CanRead => outputStream.CanRead;
|
||||
|
||||
/// <summary>
|
||||
/// true if the stream supports seeking; otherwise, false.
|
||||
/// </summary>
|
||||
public override bool CanSeek => outputStream.CanSeek;
|
||||
|
||||
/// <summary>
|
||||
/// true if stream supports writing; otherwise, false.
|
||||
/// </summary>
|
||||
public override bool CanWrite => outputStream.CanWrite;
|
||||
|
||||
/// <summary>
|
||||
/// length of stream in bytes
|
||||
/// </summary>
|
||||
public override long Length => outputStream.Length;
|
||||
|
||||
/// <summary>
|
||||
/// gets or sets the position within the current stream.
|
||||
/// </summary>
|
||||
public override long Position {
|
||||
get => outputStream.Position;
|
||||
set => outputStream.Position = value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// set the position within the current stream
|
||||
/// </summary>
|
||||
/// <param name="offset">The offset relative to the <paramref name="origin"/> to seek to</param>
|
||||
/// <param name="origin">The <see cref="SeekOrigin"/> to seek from.</param>
|
||||
/// <returns>The new position in the stream.</returns>
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
return outputStream.Seek(offset, origin);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Set the length of the current stream
|
||||
/// </summary>
|
||||
/// <param name="value">The new stream length.</param>
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
outputStream.SetLength(value);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read a byte from the stream and advance the position within the stream
|
||||
/// by one byte or returns -1 if at the end of the stream.
|
||||
/// </summary>
|
||||
/// <returns>The byte value or -1 if at end of stream</returns>
|
||||
public override int ReadByte()
|
||||
{
|
||||
return outputStream.ReadByte();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// read bytes from the current stream and advance the position within the
|
||||
/// stream by the number of bytes read.
|
||||
/// </summary>
|
||||
/// <param name="buffer">The buffer to store read bytes in.</param>
|
||||
/// <param name="offset">The index into the buffer to being storing bytes at.</param>
|
||||
/// <param name="count">The desired number of bytes to read.</param>
|
||||
/// <returns>The total number of bytes read, or zero if at the end of the stream.
|
||||
/// The number of bytes may be less than the <paramref name="count">count</paramref>
|
||||
/// requested if data is not avialable.</returns>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
return outputStream.Read(buffer, offset, count);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// All buffered data is written to destination
|
||||
/// </summary>
|
||||
public override void Flush()
|
||||
{
|
||||
outputStream.Flush();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ends the TAR archive without closing the underlying OutputStream.
|
||||
/// The result is that the EOF block of nulls is written.
|
||||
/// </summary>
|
||||
public void Finish()
|
||||
{
|
||||
if (IsEntryOpen) {
|
||||
CloseEntry();
|
||||
}
|
||||
WriteEofBlock();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ends the TAR archive and closes the underlying OutputStream.
|
||||
/// </summary>
|
||||
/// <remarks>This means that Finish() is called followed by calling the
|
||||
/// TarBuffer's Close().</remarks>
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (!isClosed) {
|
||||
isClosed = true;
|
||||
Finish();
|
||||
buffer.Close();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
public int RecordSize => buffer.RecordSize;
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The TarBuffer record size.
|
||||
/// </returns>
|
||||
[Obsolete("Use RecordSize property instead")]
|
||||
public int GetRecordSize()
|
||||
{
|
||||
return buffer.RecordSize;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get a value indicating wether an entry is open, requiring more data to be written.
|
||||
/// </summary>
|
||||
bool IsEntryOpen => (currBytes < currSize);
|
||||
|
||||
/// <summary>
|
||||
/// Put an entry on the output stream. This writes the entry's
|
||||
/// header and positions the output stream for writing
|
||||
/// the contents of the entry. Once this method is called, the
|
||||
/// stream is ready for calls to write() to write the entry's
|
||||
/// contents. Once the contents are written, closeEntry()
|
||||
/// <B>MUST</B> be called to ensure that all buffered data
|
||||
/// is completely written to the output stream.
|
||||
/// </summary>
|
||||
/// <param name="entry">
|
||||
/// The TarEntry to be written to the archive.
|
||||
/// </param>
|
||||
public void PutNextEntry(TarEntry entry)
|
||||
{
|
||||
if (entry == null) {
|
||||
throw new ArgumentNullException(nameof(entry));
|
||||
}
|
||||
|
||||
if (entry.TarHeader.Name.Length > TarHeader.NAMELEN) {
|
||||
var longHeader = new TarHeader();
|
||||
longHeader.TypeFlag = TarHeader.LF_GNU_LONGNAME;
|
||||
longHeader.Name = longHeader.Name + "././@LongLink";
|
||||
longHeader.Mode = 420;//644 by default
|
||||
longHeader.UserId = entry.UserId;
|
||||
longHeader.GroupId = entry.GroupId;
|
||||
longHeader.GroupName = entry.GroupName;
|
||||
longHeader.UserName = entry.UserName;
|
||||
longHeader.LinkName = "";
|
||||
longHeader.Size = entry.TarHeader.Name.Length + 1; // Plus one to avoid dropping last char
|
||||
|
||||
longHeader.WriteHeader(blockBuffer);
|
||||
buffer.WriteBlock(blockBuffer); // Add special long filename header block
|
||||
|
||||
int nameCharIndex = 0;
|
||||
|
||||
while (nameCharIndex < entry.TarHeader.Name.Length + 1 /* we've allocated one for the null char, now we must make sure it gets written out */) {
|
||||
Array.Clear(blockBuffer, 0, blockBuffer.Length);
|
||||
TarHeader.GetAsciiBytes(entry.TarHeader.Name, nameCharIndex, this.blockBuffer, 0, TarBuffer.BlockSize); // This func handles OK the extra char out of string length
|
||||
nameCharIndex += TarBuffer.BlockSize;
|
||||
buffer.WriteBlock(blockBuffer);
|
||||
}
|
||||
}
|
||||
|
||||
entry.WriteEntryHeader(blockBuffer);
|
||||
buffer.WriteBlock(blockBuffer);
|
||||
|
||||
currBytes = 0;
|
||||
|
||||
currSize = entry.IsDirectory ? 0 : entry.Size;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Close an entry. This method MUST be called for all file
|
||||
/// entries that contain data. The reason is that we must
|
||||
/// buffer data written to the stream in order to satisfy
|
||||
/// the buffer's block based writes. Thus, there may be
|
||||
/// data fragments still being assembled that must be written
|
||||
/// to the output stream before this entry is closed and the
|
||||
/// next entry written.
|
||||
/// </summary>
|
||||
public void CloseEntry()
|
||||
{
|
||||
if (assemblyBufferLength > 0) {
|
||||
Array.Clear(assemblyBuffer, assemblyBufferLength, assemblyBuffer.Length - assemblyBufferLength);
|
||||
|
||||
buffer.WriteBlock(assemblyBuffer);
|
||||
|
||||
currBytes += assemblyBufferLength;
|
||||
assemblyBufferLength = 0;
|
||||
}
|
||||
|
||||
if (currBytes < currSize) {
|
||||
string errorText = string.Format(
|
||||
"Entry closed at '{0}' before the '{1}' bytes specified in the header were written",
|
||||
currBytes, currSize);
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes a byte to the current tar archive entry.
|
||||
/// This method simply calls Write(byte[], int, int).
|
||||
/// </summary>
|
||||
/// <param name="value">
|
||||
/// The byte to be written.
|
||||
/// </param>
|
||||
public override void WriteByte(byte value)
|
||||
{
|
||||
Write(new byte[] { value }, 0, 1);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes bytes to the current tar archive entry. This method
|
||||
/// is aware of the current entry and will throw an exception if
|
||||
/// you attempt to write bytes past the length specified for the
|
||||
/// current entry. The method is also (painfully) aware of the
|
||||
/// record buffering required by TarBuffer, and manages buffers
|
||||
/// that are not a multiple of recordsize in length, including
|
||||
/// assembling records from small buffers.
|
||||
/// </summary>
|
||||
/// <param name = "buffer">
|
||||
/// The buffer to write to the archive.
|
||||
/// </param>
|
||||
/// <param name = "offset">
|
||||
/// The offset in the buffer from which to get bytes.
|
||||
/// </param>
|
||||
/// <param name = "count">
|
||||
/// The number of bytes to write.
|
||||
/// </param>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (buffer == null) {
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
if (offset < 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(offset), "Cannot be negative");
|
||||
}
|
||||
|
||||
if (buffer.Length - offset < count) {
|
||||
throw new ArgumentException("offset and count combination is invalid");
|
||||
}
|
||||
|
||||
if (count < 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(count), "Cannot be negative");
|
||||
}
|
||||
|
||||
if ((currBytes + count) > currSize) {
|
||||
string errorText = string.Format("request to write '{0}' bytes exceeds size in header of '{1}' bytes",
|
||||
count, this.currSize);
|
||||
throw new ArgumentOutOfRangeException(nameof(count), errorText);
|
||||
}
|
||||
|
||||
//
|
||||
// We have to deal with assembly!!!
|
||||
// The programmer can be writing little 32 byte chunks for all
|
||||
// we know, and we must assemble complete blocks for writing.
|
||||
// TODO REVIEW Maybe this should be in TarBuffer? Could that help to
|
||||
// eliminate some of the buffer copying.
|
||||
//
|
||||
if (assemblyBufferLength > 0) {
|
||||
if ((assemblyBufferLength + count) >= blockBuffer.Length) {
|
||||
int aLen = blockBuffer.Length - assemblyBufferLength;
|
||||
|
||||
Array.Copy(assemblyBuffer, 0, blockBuffer, 0, assemblyBufferLength);
|
||||
Array.Copy(buffer, offset, blockBuffer, assemblyBufferLength, aLen);
|
||||
|
||||
this.buffer.WriteBlock(blockBuffer);
|
||||
|
||||
currBytes += blockBuffer.Length;
|
||||
|
||||
offset += aLen;
|
||||
count -= aLen;
|
||||
|
||||
assemblyBufferLength = 0;
|
||||
} else {
|
||||
Array.Copy(buffer, offset, assemblyBuffer, assemblyBufferLength, count);
|
||||
offset += count;
|
||||
assemblyBufferLength += count;
|
||||
count -= count;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// When we get here we have EITHER:
|
||||
// o An empty "assembly" buffer.
|
||||
// o No bytes to write (count == 0)
|
||||
//
|
||||
while (count > 0) {
|
||||
if (count < blockBuffer.Length) {
|
||||
Array.Copy(buffer, offset, assemblyBuffer, assemblyBufferLength, count);
|
||||
assemblyBufferLength += count;
|
||||
break;
|
||||
}
|
||||
|
||||
this.buffer.WriteBlock(buffer, offset);
|
||||
|
||||
int bufferLength = blockBuffer.Length;
|
||||
currBytes += bufferLength;
|
||||
count -= bufferLength;
|
||||
offset += bufferLength;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write an EOF (end of archive) block to the tar archive.
|
||||
/// An EOF block consists of all zeros.
|
||||
/// </summary>
|
||||
void WriteEofBlock()
|
||||
{
|
||||
Array.Clear(blockBuffer, 0, blockBuffer.Length);
|
||||
buffer.WriteBlock(blockBuffer);
|
||||
}
|
||||
|
||||
#region Instance Fields
|
||||
/// <summary>
|
||||
/// bytes written for this entry so far
|
||||
/// </summary>
|
||||
long currBytes;
|
||||
|
||||
/// <summary>
|
||||
/// current 'Assembly' buffer length
|
||||
/// </summary>
|
||||
int assemblyBufferLength;
|
||||
|
||||
/// <summary>
|
||||
/// Flag indicating wether this instance has been closed or not.
|
||||
/// </summary>
|
||||
bool isClosed;
|
||||
|
||||
/// <summary>
|
||||
/// Size for the current entry
|
||||
/// </summary>
|
||||
protected long currSize;
|
||||
|
||||
/// <summary>
|
||||
/// single block working buffer
|
||||
/// </summary>
|
||||
protected byte[] blockBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// 'Assembly' buffer used to assemble data before writing
|
||||
/// </summary>
|
||||
protected byte[] assemblyBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// TarBuffer used to provide correct blocking factor
|
||||
/// </summary>
|
||||
protected TarBuffer buffer;
|
||||
|
||||
/// <summary>
|
||||
/// the destination stream for the archive contents
|
||||
/// </summary>
|
||||
protected Stream outputStream;
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
90
src/SharpCompress/Common/Tar/TarReadOnlySubStream.cs
Normal file
90
src/SharpCompress/Common/Tar/TarReadOnlySubStream.cs
Normal file
@@ -0,0 +1,90 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal class TarReadOnlySubStream : Stream
|
||||
{
|
||||
private bool isDisposed;
|
||||
private long amountRead;
|
||||
|
||||
public TarReadOnlySubStream(Stream stream, long bytesToRead)
|
||||
{
|
||||
Stream = stream;
|
||||
BytesLeftToRead = bytesToRead;
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
isDisposed = true;
|
||||
if (disposing)
|
||||
{
|
||||
long skipBytes = amountRead % 512;
|
||||
if (skipBytes == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
skipBytes = 512 - skipBytes;
|
||||
if (skipBytes == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
var buffer = new byte[skipBytes];
|
||||
Stream.ReadFully(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
private long BytesLeftToRead { get; set; }
|
||||
|
||||
public Stream Stream { get; }
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); }
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (BytesLeftToRead < count)
|
||||
{
|
||||
count = (int)BytesLeftToRead;
|
||||
}
|
||||
int read = Stream.Read(buffer, offset, count);
|
||||
if (read > 0)
|
||||
{
|
||||
BytesLeftToRead -= read;
|
||||
amountRead += read;
|
||||
}
|
||||
return read;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
8
src/SharpCompress/Common/Zip/CryptoMode.cs
Normal file
8
src/SharpCompress/Common/Zip/CryptoMode.cs
Normal file
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal enum CryptoMode
|
||||
{
|
||||
Encrypt,
|
||||
Decrypt
|
||||
}
|
||||
}
|
||||
@@ -6,8 +6,8 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal class DirectoryEntryHeader : ZipFileEntry
|
||||
{
|
||||
public DirectoryEntryHeader()
|
||||
: base(ZipHeaderType.DirectoryEntry)
|
||||
public DirectoryEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
: base(ZipHeaderType.DirectoryEntry, archiveEncoding)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -31,10 +31,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
RelativeOffsetOfEntryHeader = reader.ReadUInt32();
|
||||
|
||||
byte[] name = reader.ReadBytes(nameLength);
|
||||
Name = DecodeString(name);
|
||||
Name = ArchiveEncoding.Decode(name);
|
||||
byte[] extra = reader.ReadBytes(extraLength);
|
||||
byte[] comment = reader.ReadBytes(commentLength);
|
||||
Comment = DecodeString(comment);
|
||||
Comment = ArchiveEncoding.Decode(comment);
|
||||
LoadExtra(extra);
|
||||
|
||||
var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField);
|
||||
|
||||
@@ -5,6 +5,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
[Flags]
|
||||
internal enum HeaderFlags : ushort
|
||||
{
|
||||
None = 0,
|
||||
Encrypted = 1, // http://www.pkware.com/documents/casestudies/APPNOTE.TXT
|
||||
Bit1 = 2,
|
||||
Bit2 = 4,
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal class LocalEntryHeader : ZipFileEntry
|
||||
{
|
||||
public LocalEntryHeader()
|
||||
: base(ZipHeaderType.LocalEntry)
|
||||
public LocalEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
: base(ZipHeaderType.LocalEntry, archiveEncoding)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -24,7 +25,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
ushort extraLength = reader.ReadUInt16();
|
||||
byte[] name = reader.ReadBytes(nameLength);
|
||||
byte[] extra = reader.ReadBytes(extraLength);
|
||||
Name = DecodeString(name);
|
||||
Name = ArchiveEncoding.Decode(name);
|
||||
LoadExtra(extra);
|
||||
|
||||
var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField);
|
||||
|
||||
@@ -8,10 +8,11 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal abstract class ZipFileEntry : ZipHeader
|
||||
{
|
||||
protected ZipFileEntry(ZipHeaderType type)
|
||||
protected ZipFileEntry(ZipHeaderType type, ArchiveEncoding archiveEncoding)
|
||||
: base(type)
|
||||
{
|
||||
Extra = new List<ExtraData>();
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal bool IsDirectory
|
||||
@@ -29,28 +30,11 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
&& Name.EndsWith("\\");
|
||||
}
|
||||
}
|
||||
|
||||
protected string DecodeString(byte[] str)
|
||||
{
|
||||
if (FlagUtility.HasFlag(Flags, HeaderFlags.UTF8))
|
||||
{
|
||||
return Encoding.UTF8.GetString(str, 0, str.Length);
|
||||
}
|
||||
|
||||
return ArchiveEncoding.Default.GetString(str, 0, str.Length);
|
||||
}
|
||||
|
||||
protected byte[] EncodeString(string str)
|
||||
{
|
||||
if (FlagUtility.HasFlag(Flags, HeaderFlags.UTF8))
|
||||
{
|
||||
return Encoding.UTF8.GetBytes(str);
|
||||
}
|
||||
return ArchiveEncoding.Default.GetBytes(str);
|
||||
}
|
||||
|
||||
|
||||
internal Stream PackedStream { get; set; }
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
|
||||
internal string Name { get; set; }
|
||||
|
||||
internal HeaderFlags Flags { get; set; }
|
||||
@@ -64,7 +48,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
internal long UncompressedSize { get; set; }
|
||||
|
||||
internal List<ExtraData> Extra { get; set; }
|
||||
|
||||
|
||||
public string Password { get; set; }
|
||||
|
||||
internal PkwareTraditionalEncryptionData ComposeEncryptionData(Stream archiveStream)
|
||||
@@ -75,10 +59,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
}
|
||||
|
||||
var buffer = new byte[12];
|
||||
archiveStream.Read(buffer, 0, 12);
|
||||
archiveStream.ReadFully(buffer);
|
||||
|
||||
PkwareTraditionalEncryptionData encryptionData = PkwareTraditionalEncryptionData.ForRead(Password, this, buffer);
|
||||
|
||||
|
||||
return encryptionData;
|
||||
}
|
||||
|
||||
|
||||
@@ -3,12 +3,6 @@ using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal enum CryptoMode
|
||||
{
|
||||
Encrypt,
|
||||
Decrypt
|
||||
}
|
||||
|
||||
internal class PkwareTraditionalCryptoStream : Stream
|
||||
{
|
||||
private readonly PkwareTraditionalEncryptionData encryptor;
|
||||
@@ -42,7 +36,7 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException("buffer");
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
byte[] temp = new byte[count];
|
||||
|
||||
@@ -9,9 +9,11 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
private static readonly CRC32 crc32 = new CRC32();
|
||||
private readonly UInt32[] _Keys = {0x12345678, 0x23456789, 0x34567890};
|
||||
private readonly ArchiveEncoding _archiveEncoding;
|
||||
|
||||
private PkwareTraditionalEncryptionData(string password)
|
||||
private PkwareTraditionalEncryptionData(string password, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
_archiveEncoding = archiveEncoding;
|
||||
Initialize(password);
|
||||
}
|
||||
|
||||
@@ -27,14 +29,10 @@ namespace SharpCompress.Common.Zip
|
||||
public static PkwareTraditionalEncryptionData ForRead(string password, ZipFileEntry header,
|
||||
byte[] encryptionHeader)
|
||||
{
|
||||
var encryptor = new PkwareTraditionalEncryptionData(password);
|
||||
var encryptor = new PkwareTraditionalEncryptionData(password, header.ArchiveEncoding);
|
||||
byte[] plainTextHeader = encryptor.Decrypt(encryptionHeader, encryptionHeader.Length);
|
||||
if (plainTextHeader[11] != (byte)((header.Crc >> 24) & 0xff))
|
||||
{
|
||||
if (!FlagUtility.HasFlag(header.Flags, HeaderFlags.UsePostDataDescriptor))
|
||||
{
|
||||
throw new CryptographicException("The password did not match.");
|
||||
}
|
||||
if (plainTextHeader[11] != (byte)((header.LastModifiedTime >> 8) & 0xff))
|
||||
{
|
||||
throw new CryptographicException("The password did not match.");
|
||||
@@ -47,7 +45,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
if (length > cipherText.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("length",
|
||||
throw new ArgumentOutOfRangeException(nameof(length),
|
||||
"Bad length during Decryption: the length parameter must be smaller than or equal to the size of the destination array.");
|
||||
}
|
||||
|
||||
@@ -70,7 +68,7 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (length > plainText.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("length",
|
||||
throw new ArgumentOutOfRangeException(nameof(length),
|
||||
"Bad length during Encryption: The length parameter must be smaller than or equal to the size of the destination array.");
|
||||
}
|
||||
|
||||
@@ -93,17 +91,12 @@ namespace SharpCompress.Common.Zip
|
||||
}
|
||||
}
|
||||
|
||||
internal static byte[] StringToByteArray(string value, Encoding encoding)
|
||||
internal byte[] StringToByteArray(string value)
|
||||
{
|
||||
byte[] a = encoding.GetBytes(value);
|
||||
byte[] a = _archiveEncoding.Password.GetBytes(value);
|
||||
return a;
|
||||
}
|
||||
|
||||
internal static byte[] StringToByteArray(string value)
|
||||
{
|
||||
return StringToByteArray(value, ArchiveEncoding.Password);
|
||||
}
|
||||
|
||||
private void UpdateKeys(byte byteValue)
|
||||
{
|
||||
_Keys[0] = (UInt32)crc32.ComputeCrc32((int)_Keys[0], byteValue);
|
||||
@@ -111,5 +104,10 @@ namespace SharpCompress.Common.Zip
|
||||
_Keys[1] = _Keys[1] * 0x08088405 + 1;
|
||||
_Keys[2] = (UInt32)crc32.ComputeCrc32((int)_Keys[2], (byte)(_Keys[1] >> 24));
|
||||
}
|
||||
|
||||
public static PkwareTraditionalEncryptionData ForWrite(string password, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
return new PkwareTraditionalEncryptionData(password, archiveEncoding);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,21 +5,21 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class SeekableZipFilePart : ZipFilePart
|
||||
{
|
||||
private bool isLocalHeaderLoaded;
|
||||
private readonly SeekableZipHeaderFactory headerFactory;
|
||||
private bool _isLocalHeaderLoaded;
|
||||
private readonly SeekableZipHeaderFactory _headerFactory;
|
||||
|
||||
internal SeekableZipFilePart(SeekableZipHeaderFactory headerFactory, DirectoryEntryHeader header, Stream stream)
|
||||
: base(header, stream)
|
||||
{
|
||||
this.headerFactory = headerFactory;
|
||||
this._headerFactory = headerFactory;
|
||||
}
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (!isLocalHeaderLoaded)
|
||||
if (!_isLocalHeaderLoaded)
|
||||
{
|
||||
LoadLocalHeader();
|
||||
isLocalHeaderLoaded = true;
|
||||
_isLocalHeaderLoaded = true;
|
||||
}
|
||||
return base.GetCompressedStream();
|
||||
}
|
||||
@@ -29,7 +29,7 @@ namespace SharpCompress.Common.Zip
|
||||
private void LoadLocalHeader()
|
||||
{
|
||||
bool hasData = Header.HasData;
|
||||
Header = headerFactory.GetLocalHeader(BaseStream, Header as DirectoryEntryHeader);
|
||||
Header = _headerFactory.GetLocalHeader(BaseStream, Header as DirectoryEntryHeader);
|
||||
Header.HasData = hasData;
|
||||
}
|
||||
|
||||
|
||||
@@ -3,16 +3,17 @@ using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class SeekableZipHeaderFactory : ZipHeaderFactory
|
||||
{
|
||||
private const int MAX_ITERATIONS_FOR_DIRECTORY_HEADER = 4096;
|
||||
private bool zip64;
|
||||
private bool _zip64;
|
||||
|
||||
internal SeekableZipHeaderFactory(string password)
|
||||
: base(StreamingMode.Seekable, password)
|
||||
internal SeekableZipHeaderFactory(string password, ArchiveEncoding archiveEncoding)
|
||||
: base(StreamingMode.Seekable, password, archiveEncoding)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -26,14 +27,14 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (entry.IsZip64)
|
||||
{
|
||||
zip64 = true;
|
||||
_zip64 = true;
|
||||
SeekBackToHeader(stream, reader, ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR);
|
||||
var zip64Locator = new Zip64DirectoryEndLocatorHeader();
|
||||
zip64Locator.Read(reader);
|
||||
|
||||
stream.Seek(zip64Locator.RelativeOffsetOfTheEndOfDirectoryRecord, SeekOrigin.Begin);
|
||||
uint zip64Signature = reader.ReadUInt32();
|
||||
if(zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
|
||||
if (zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
|
||||
throw new ArchiveException("Failed to locate the Zip64 Header");
|
||||
|
||||
var zip64Entry = new Zip64DirectoryEndHeader();
|
||||
@@ -50,7 +51,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
stream.Position = position;
|
||||
uint signature = reader.ReadUInt32();
|
||||
var directoryEntryHeader = ReadHeader(signature, reader, zip64) as DirectoryEntryHeader;
|
||||
var directoryEntryHeader = ReadHeader(signature, reader, _zip64) as DirectoryEntryHeader;
|
||||
position = stream.Position;
|
||||
if (directoryEntryHeader == null)
|
||||
{
|
||||
@@ -91,7 +92,7 @@ namespace SharpCompress.Common.Zip
|
||||
stream.Seek(directoryEntryHeader.RelativeOffsetOfEntryHeader, SeekOrigin.Begin);
|
||||
BinaryReader reader = new BinaryReader(stream);
|
||||
uint signature = reader.ReadUInt32();
|
||||
var localEntryHeader = ReadHeader(signature, reader, zip64) as LocalEntryHeader;
|
||||
var localEntryHeader = ReadHeader(signature, reader, _zip64) as LocalEntryHeader;
|
||||
if (localEntryHeader == null)
|
||||
{
|
||||
throw new InvalidOperationException();
|
||||
|
||||
@@ -39,19 +39,20 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return new BinaryReader(rewindableStream);
|
||||
}
|
||||
if (Header.HasData)
|
||||
if (Header.HasData && !Skipped)
|
||||
{
|
||||
if (decompressionStream == null)
|
||||
{
|
||||
decompressionStream = GetCompressedStream();
|
||||
}
|
||||
decompressionStream.SkipAll();
|
||||
decompressionStream.Skip();
|
||||
|
||||
DeflateStream deflateStream = decompressionStream as DeflateStream;
|
||||
if (deflateStream != null)
|
||||
{
|
||||
rewindableStream.Rewind(deflateStream.InputBuffer);
|
||||
}
|
||||
Skipped = true;
|
||||
}
|
||||
var reader = new BinaryReader(rewindableStream);
|
||||
decompressionStream = null;
|
||||
|
||||
@@ -2,13 +2,14 @@
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class StreamingZipHeaderFactory : ZipHeaderFactory
|
||||
{
|
||||
internal StreamingZipHeaderFactory(string password)
|
||||
: base(StreamingMode.Streaming, password)
|
||||
internal StreamingZipHeaderFactory(string password, ArchiveEncoding archiveEncoding)
|
||||
: base(StreamingMode.Streaming, password, archiveEncoding)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
//read out last 10 auth bytes
|
||||
var ten = new byte[10];
|
||||
stream.Read(ten, 0, 10);
|
||||
stream.ReadFully(ten);
|
||||
stream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ namespace SharpCompress.Common.Zip
|
||||
internal abstract class ZipFilePart : FilePart
|
||||
{
|
||||
internal ZipFilePart(ZipFileEntry header, Stream stream)
|
||||
: base(header.ArchiveEncoding)
|
||||
{
|
||||
Header = header;
|
||||
header.Part = this;
|
||||
@@ -88,7 +89,7 @@ namespace SharpCompress.Common.Zip
|
||||
case ZipCompressionMethod.PPMd:
|
||||
{
|
||||
var props = new byte[2];
|
||||
stream.Read(props, 0, props.Length);
|
||||
stream.ReadFully(props);
|
||||
return new PpmdStream(new PpmdProperties(props), stream, false);
|
||||
}
|
||||
case ZipCompressionMethod.WinzipAes:
|
||||
@@ -127,11 +128,6 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
bool isFileEncrypted = FlagUtility.HasFlag(Header.Flags, HeaderFlags.Encrypted);
|
||||
|
||||
if (Header.CompressedSize == 0 && isFileEncrypted)
|
||||
{
|
||||
throw new NotSupportedException("Cannot encrypt file with unknown size at start.");
|
||||
}
|
||||
|
||||
if ((Header.CompressedSize == 0
|
||||
&& FlagUtility.HasFlag(Header.Flags, HeaderFlags.UsePostDataDescriptor))
|
||||
|| Header.IsZip64)
|
||||
@@ -175,7 +171,6 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return plainStream;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ using System.Linq;
|
||||
#endif
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
@@ -23,11 +24,13 @@ namespace SharpCompress.Common.Zip
|
||||
protected LocalEntryHeader lastEntryHeader;
|
||||
private readonly string password;
|
||||
private readonly StreamingMode mode;
|
||||
private readonly ArchiveEncoding archiveEncoding;
|
||||
|
||||
protected ZipHeaderFactory(StreamingMode mode, string password)
|
||||
protected ZipHeaderFactory(StreamingMode mode, string password, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
this.mode = mode;
|
||||
this.password = password;
|
||||
this.archiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
protected ZipHeader ReadHeader(uint headerBytes, BinaryReader reader, bool zip64 = false)
|
||||
@@ -36,7 +39,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
case ENTRY_HEADER_BYTES:
|
||||
{
|
||||
var entryHeader = new LocalEntryHeader();
|
||||
var entryHeader = new LocalEntryHeader(archiveEncoding);
|
||||
entryHeader.Read(reader);
|
||||
LoadHeader(entryHeader, reader.BaseStream);
|
||||
|
||||
@@ -45,48 +48,48 @@ namespace SharpCompress.Common.Zip
|
||||
}
|
||||
case DIRECTORY_START_HEADER_BYTES:
|
||||
{
|
||||
var entry = new DirectoryEntryHeader();
|
||||
var entry = new DirectoryEntryHeader(archiveEncoding);
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case POST_DATA_DESCRIPTOR:
|
||||
{
|
||||
if (FlagUtility.HasFlag(lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor))
|
||||
{
|
||||
lastEntryHeader.Crc = reader.ReadUInt32();
|
||||
lastEntryHeader.CompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
lastEntryHeader.UncompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
if (FlagUtility.HasFlag(lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor))
|
||||
{
|
||||
lastEntryHeader.Crc = reader.ReadUInt32();
|
||||
lastEntryHeader.CompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
lastEntryHeader.UncompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
}
|
||||
else
|
||||
{
|
||||
reader.ReadBytes(zip64 ? 20 : 12);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
else
|
||||
{
|
||||
reader.ReadBytes(zip64 ? 20 : 12);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
case DIGITAL_SIGNATURE:
|
||||
return null;
|
||||
case DIRECTORY_END_HEADER_BYTES:
|
||||
{
|
||||
var entry = new DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case SPLIT_ARCHIVE_HEADER_BYTES:
|
||||
{
|
||||
return new SplitHeader();
|
||||
}
|
||||
{
|
||||
return new SplitHeader();
|
||||
}
|
||||
case ZIP64_END_OF_CENTRAL_DIRECTORY:
|
||||
{
|
||||
var entry = new Zip64DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new Zip64DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR:
|
||||
{
|
||||
var entry = new Zip64DirectoryEndLocatorHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new Zip64DirectoryEndLocatorHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
default:
|
||||
throw new NotSupportedException("Unknown header: " + headerBytes);
|
||||
}
|
||||
@@ -165,22 +168,22 @@ namespace SharpCompress.Common.Zip
|
||||
switch (mode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
entryHeader.DataStartPosition = stream.Position;
|
||||
stream.Position += entryHeader.CompressedSize;
|
||||
break;
|
||||
}
|
||||
{
|
||||
entryHeader.DataStartPosition = stream.Position;
|
||||
stream.Position += entryHeader.CompressedSize;
|
||||
break;
|
||||
}
|
||||
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
entryHeader.PackedStream = stream;
|
||||
break;
|
||||
}
|
||||
{
|
||||
entryHeader.PackedStream = stream;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
|
||||
//}
|
||||
|
||||
@@ -105,19 +105,19 @@ namespace SharpCompress.Compressors.ADC
|
||||
}
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException("buffer");
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
if (count < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
if (offset < buffer.GetLowerBound(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
if ((offset + count) > buffer.GetLength(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
|
||||
int size = -1;
|
||||
|
||||
@@ -99,7 +99,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// </summary>
|
||||
/// <param name="input">The stream over which to calculate the CRC32</param>
|
||||
/// <returns>the CRC32 calculation</returns>
|
||||
public Int32 GetCrc32(Stream input)
|
||||
public UInt32 GetCrc32(Stream input)
|
||||
{
|
||||
return GetCrc32AndCopy(input, null);
|
||||
}
|
||||
@@ -111,7 +111,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// <param name="input">The stream over which to calculate the CRC32</param>
|
||||
/// <param name="output">The stream into which to deflate the input</param>
|
||||
/// <returns>the CRC32 calculation</returns>
|
||||
public Int32 GetCrc32AndCopy(Stream input, Stream output)
|
||||
public UInt32 GetCrc32AndCopy(Stream input, Stream output)
|
||||
{
|
||||
if (input == null)
|
||||
{
|
||||
@@ -143,7 +143,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
TotalBytesRead += count;
|
||||
}
|
||||
|
||||
return (Int32)(~runningCrc32Result);
|
||||
return ~runningCrc32Result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -36,9 +37,10 @@ namespace SharpCompress.Compressors.Deflate
|
||||
|
||||
public DeflateStream(Stream stream, CompressionMode mode,
|
||||
CompressionLevel level = CompressionLevel.Default,
|
||||
bool leaveOpen = false)
|
||||
bool leaveOpen = false,
|
||||
Encoding forceEncoding = null)
|
||||
{
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen);
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen, forceEncoding);
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
|
||||
@@ -30,41 +30,45 @@ using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
public class GZipStream : Stream
|
||||
{
|
||||
internal static readonly DateTime UnixEpoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
internal static readonly DateTime UNIX_EPOCH = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
public DateTime? LastModified { get; set; }
|
||||
|
||||
private string comment;
|
||||
private string fileName;
|
||||
private string _comment;
|
||||
private string _fileName;
|
||||
|
||||
internal ZlibBaseStream BaseStream;
|
||||
private bool disposed;
|
||||
private bool firstReadDone;
|
||||
private int headerByteCount;
|
||||
private bool _disposed;
|
||||
private bool _firstReadDone;
|
||||
private int _headerByteCount;
|
||||
|
||||
private readonly Encoding _encoding;
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, CompressionLevel.Default, false)
|
||||
: this(stream, mode, CompressionLevel.Default, false, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level)
|
||||
: this(stream, mode, level, false)
|
||||
: this(stream, mode, level, false, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen, Encoding encoding)
|
||||
{
|
||||
BaseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen);
|
||||
BaseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen, encoding);
|
||||
_encoding = encoding;
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
@@ -74,7 +78,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
get => (BaseStream._flushMode);
|
||||
set
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -87,7 +91,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
get => BaseStream._bufferSize;
|
||||
set
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -123,7 +127,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
get
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -149,7 +153,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
get
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -179,7 +183,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Writer)
|
||||
{
|
||||
return BaseStream._z.TotalBytesOut + headerByteCount;
|
||||
return BaseStream._z.TotalBytesOut + _headerByteCount;
|
||||
}
|
||||
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Reader)
|
||||
{
|
||||
@@ -202,14 +206,14 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
try
|
||||
{
|
||||
if (!disposed)
|
||||
if (!_disposed)
|
||||
{
|
||||
if (disposing && (BaseStream != null))
|
||||
{
|
||||
BaseStream.Dispose();
|
||||
Crc32 = BaseStream.Crc32;
|
||||
}
|
||||
disposed = true;
|
||||
_disposed = true;
|
||||
}
|
||||
}
|
||||
finally
|
||||
@@ -223,7 +227,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// </summary>
|
||||
public override void Flush()
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -263,7 +267,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// <returns>the number of bytes actually read</returns>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -272,9 +276,9 @@ namespace SharpCompress.Compressors.Deflate
|
||||
// Console.WriteLine("GZipStream::Read(buffer, off({0}), c({1}) = {2}", offset, count, n);
|
||||
// Console.WriteLine( Util.FormatByteArray(buffer, offset, n) );
|
||||
|
||||
if (!firstReadDone)
|
||||
if (!_firstReadDone)
|
||||
{
|
||||
firstReadDone = true;
|
||||
_firstReadDone = true;
|
||||
FileName = BaseStream._GzipFileName;
|
||||
Comment = BaseStream._GzipComment;
|
||||
}
|
||||
@@ -325,7 +329,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// <param name="count">the number of bytes to write.</param>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -335,7 +339,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
if (BaseStream._wantCompress)
|
||||
{
|
||||
// first write in compression, therefore, emit the GZIP header
|
||||
headerByteCount = EmitHeader();
|
||||
_headerByteCount = EmitHeader();
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -346,56 +350,56 @@ namespace SharpCompress.Compressors.Deflate
|
||||
BaseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion Stream methods
|
||||
|
||||
public String Comment
|
||||
{
|
||||
get => comment;
|
||||
get => _comment;
|
||||
set
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
comment = value;
|
||||
_comment = value;
|
||||
}
|
||||
}
|
||||
|
||||
public string FileName
|
||||
{
|
||||
get => fileName;
|
||||
get => _fileName;
|
||||
set
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
fileName = value;
|
||||
if (fileName == null)
|
||||
_fileName = value;
|
||||
if (_fileName == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (fileName.IndexOf("/") != -1)
|
||||
if (_fileName.IndexOf("/") != -1)
|
||||
{
|
||||
fileName = fileName.Replace("/", "\\");
|
||||
_fileName = _fileName.Replace("/", "\\");
|
||||
}
|
||||
if (fileName.EndsWith("\\"))
|
||||
if (_fileName.EndsWith("\\"))
|
||||
{
|
||||
throw new InvalidOperationException("Illegal filename");
|
||||
}
|
||||
|
||||
var index = fileName.IndexOf("\\");
|
||||
var index = _fileName.IndexOf("\\");
|
||||
if (index != -1)
|
||||
{
|
||||
// trim any leading path
|
||||
int length = fileName.Length;
|
||||
int length = _fileName.Length;
|
||||
int num = length;
|
||||
while (--num >= 0)
|
||||
{
|
||||
char c = fileName[num];
|
||||
char c = _fileName[num];
|
||||
if (c == '\\')
|
||||
{
|
||||
fileName = fileName.Substring(num + 1, length - num - 1);
|
||||
_fileName = _fileName.Substring(num + 1, length - num - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -406,8 +410,10 @@ namespace SharpCompress.Compressors.Deflate
|
||||
|
||||
private int EmitHeader()
|
||||
{
|
||||
byte[] commentBytes = (Comment == null) ? null : ArchiveEncoding.Default.GetBytes(Comment);
|
||||
byte[] filenameBytes = (FileName == null) ? null : ArchiveEncoding.Default.GetBytes(FileName);
|
||||
byte[] commentBytes = (Comment == null) ? null
|
||||
: _encoding.GetBytes(Comment);
|
||||
byte[] filenameBytes = (FileName == null) ? null
|
||||
: _encoding.GetBytes(FileName);
|
||||
|
||||
int cbLength = (Comment == null) ? 0 : commentBytes.Length + 1;
|
||||
int fnLength = (FileName == null) ? 0 : filenameBytes.Length + 1;
|
||||
@@ -440,7 +446,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
LastModified = DateTime.Now;
|
||||
}
|
||||
TimeSpan delta = LastModified.Value - UnixEpoch;
|
||||
TimeSpan delta = LastModified.Value - UNIX_EPOCH;
|
||||
var timet = (Int32)delta.TotalSeconds;
|
||||
DataConverter.LittleEndian.PutBytes(header, i, timet);
|
||||
i += 4;
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
// ZlibBaseStream.cs
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
|
||||
// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
|
||||
// All rights reserved.
|
||||
//
|
||||
// This code module is part of DotNetZip, a zipfile class library.
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// This code is licensed under the Microsoft Public License.
|
||||
// This code is licensed under the Microsoft Public License.
|
||||
// See the file License.txt for the license details.
|
||||
// More info on: http://dotnetzip.codeplex.com
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// last saved (in emacs):
|
||||
// last saved (in emacs):
|
||||
// Time-stamp: <2009-October-28 15:45:15>
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
@@ -28,7 +28,9 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -63,6 +65,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
protected internal DateTime _GzipMtime;
|
||||
protected internal int _gzipHeaderByteCount;
|
||||
|
||||
private readonly Encoding _encoding;
|
||||
|
||||
internal int Crc32
|
||||
{
|
||||
get
|
||||
@@ -79,7 +83,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
CompressionMode compressionMode,
|
||||
CompressionLevel level,
|
||||
ZlibStreamFlavor flavor,
|
||||
bool leaveOpen)
|
||||
bool leaveOpen,
|
||||
Encoding encoding)
|
||||
{
|
||||
_flushMode = FlushType.None;
|
||||
|
||||
@@ -90,6 +95,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
_flavor = flavor;
|
||||
_level = level;
|
||||
|
||||
_encoding = encoding;
|
||||
|
||||
// workitem 7159
|
||||
if (flavor == ZlibStreamFlavor.GZIP)
|
||||
{
|
||||
@@ -417,8 +424,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
}
|
||||
}
|
||||
while (!done);
|
||||
byte[] a = list.ToArray();
|
||||
return ArchiveEncoding.Default.GetString(a, 0, a.Length);
|
||||
byte[] buffer = list.ToArray();
|
||||
return _encoding.GetString(buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
private int _ReadAndValidateGzipHeader()
|
||||
@@ -527,19 +534,19 @@ namespace SharpCompress.Compressors.Deflate
|
||||
}
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException("buffer");
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
if (count < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
if (offset < buffer.GetLowerBound(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
if ((offset + count) > buffer.GetLength(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
|
||||
int rc = 0;
|
||||
@@ -592,7 +599,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
while (_z.AvailableBytesOut > 0 && !nomoreinput && rc == ZlibConstants.Z_OK);
|
||||
|
||||
// workitem 8557
|
||||
// is there more room in output?
|
||||
// is there more room in output?
|
||||
if (_z.AvailableBytesOut > 0)
|
||||
{
|
||||
if (rc == ZlibConstants.Z_OK && _z.AvailableBytesIn == 0)
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -36,23 +37,23 @@ namespace SharpCompress.Compressors.Deflate
|
||||
private bool _disposed;
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, CompressionLevel.Default, false)
|
||||
: this(stream, mode, CompressionLevel.Default, false, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level)
|
||||
: this(stream, mode, level, false)
|
||||
: this(stream, mode, level, false, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen, Encoding encoding)
|
||||
{
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen);
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen, encoding);
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
@@ -326,6 +327,6 @@ namespace SharpCompress.Compressors.Deflate
|
||||
_baseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion System.IO.Stream methods
|
||||
}
|
||||
}
|
||||
@@ -58,7 +58,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
}
|
||||
|
||||
return (mBits[index >> 5] & (1u << (index & 31))) != 0;
|
||||
@@ -69,7 +69,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
}
|
||||
|
||||
mBits[index >> 5] |= 1u << (index & 31);
|
||||
@@ -79,7 +79,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
}
|
||||
|
||||
uint bits = mBits[index >> 5];
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Converters;
|
||||
using SharpCompress.Crypto;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
@@ -14,29 +17,62 @@ namespace SharpCompress.Compressors.LZMA
|
||||
public class LZipStream : Stream
|
||||
{
|
||||
private readonly Stream stream;
|
||||
private readonly CountingWritableSubStream rawStream;
|
||||
private bool disposed;
|
||||
private readonly bool leaveOpen;
|
||||
private bool finished;
|
||||
|
||||
public LZipStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, false)
|
||||
{
|
||||
}
|
||||
private long writeCount;
|
||||
|
||||
public LZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
public LZipStream(Stream stream, CompressionMode mode, bool leaveOpen = false)
|
||||
{
|
||||
if (mode != CompressionMode.Decompress)
|
||||
{
|
||||
throw new NotImplementedException("Only LZip decompression is currently supported");
|
||||
}
|
||||
Mode = mode;
|
||||
this.leaveOpen = leaveOpen;
|
||||
int dictionarySize = ValidateAndReadSize(stream);
|
||||
if (dictionarySize == 0)
|
||||
|
||||
if (mode == CompressionMode.Decompress)
|
||||
{
|
||||
throw new IOException("Not an LZip stream");
|
||||
int dSize = ValidateAndReadSize(stream);
|
||||
if (dSize == 0)
|
||||
{
|
||||
throw new IOException("Not an LZip stream");
|
||||
}
|
||||
byte[] properties = GetProperties(dSize);
|
||||
this.stream = new LzmaStream(properties, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
//default
|
||||
int dSize = 104 * 1024;
|
||||
WriteHeaderSize(stream);
|
||||
|
||||
rawStream = new CountingWritableSubStream(stream);
|
||||
this.stream = new Crc32Stream(new LzmaStream(new LzmaEncoderProperties(true, dSize), false, rawStream));
|
||||
}
|
||||
}
|
||||
|
||||
public void Finish()
|
||||
{
|
||||
if (!finished)
|
||||
{
|
||||
if (Mode == CompressionMode.Compress)
|
||||
{
|
||||
var crc32Stream = (Crc32Stream)stream;
|
||||
crc32Stream.WrappedStream.Dispose();
|
||||
crc32Stream.Dispose();
|
||||
var compressedCount = rawStream.Count;
|
||||
|
||||
var bytes = DataConverter.LittleEndian.GetBytes(crc32Stream.Crc);
|
||||
rawStream.Write(bytes, 0, bytes.Length);
|
||||
|
||||
bytes = DataConverter.LittleEndian.GetBytes(writeCount);
|
||||
rawStream.Write(bytes, 0, bytes.Length);
|
||||
|
||||
//total with headers
|
||||
bytes = DataConverter.LittleEndian.GetBytes(compressedCount + 6 + 20);
|
||||
rawStream.Write(bytes, 0, bytes.Length);
|
||||
}
|
||||
finished = true;
|
||||
}
|
||||
byte[] properties = GetProperties(dictionarySize);
|
||||
this.stream = new LzmaStream(properties, stream);
|
||||
}
|
||||
|
||||
#region Stream methods
|
||||
@@ -48,19 +84,23 @@ namespace SharpCompress.Compressors.LZMA
|
||||
return;
|
||||
}
|
||||
disposed = true;
|
||||
if (disposing && !leaveOpen)
|
||||
if (disposing)
|
||||
{
|
||||
stream.Dispose();
|
||||
Finish();
|
||||
if (!leaveOpen)
|
||||
{
|
||||
rawStream.Dispose();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public CompressionMode Mode { get; }
|
||||
|
||||
public override bool CanRead => stream.CanRead;
|
||||
public override bool CanRead => Mode == CompressionMode.Decompress;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
public override bool CanWrite => Mode == CompressionMode.Compress;
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
@@ -75,20 +115,16 @@ namespace SharpCompress.Compressors.LZMA
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) => stream.Read(buffer, offset, count);
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
stream.Write(buffer, offset, count);
|
||||
writeCount += count;
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
@@ -105,7 +141,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
/// couldn't be read or it isn't a validate LZIP header, or the dictionary
|
||||
/// size if it *is* a valid LZIP file.
|
||||
/// </summary>
|
||||
private static int ValidateAndReadSize(Stream stream)
|
||||
public static int ValidateAndReadSize(Stream stream)
|
||||
{
|
||||
if (stream == null)
|
||||
{
|
||||
@@ -131,6 +167,17 @@ namespace SharpCompress.Compressors.LZMA
|
||||
return (1 << basePower) - subtractionNumerator * (1 << (basePower - 4));
|
||||
}
|
||||
|
||||
public static void WriteHeaderSize(Stream stream)
|
||||
{
|
||||
if (stream == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
}
|
||||
// hard coding the dictionary size encoding
|
||||
byte[] header = new byte[6] {(byte)'L', (byte)'Z', (byte)'I', (byte)'P', 1, 113};
|
||||
stream.Write(header, 0, 6);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a byte array to communicate the parameters and dictionary size to LzmaStream.
|
||||
/// </summary>
|
||||
|
||||
@@ -141,10 +141,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
position = encoder.Code(null, true);
|
||||
}
|
||||
if (inputStream != null)
|
||||
{
|
||||
inputStream.Dispose();
|
||||
}
|
||||
inputStream?.Dispose();
|
||||
}
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
@@ -58,22 +58,22 @@ namespace SharpCompress.Compressors.LZMA.Utilites
|
||||
{
|
||||
if (stream == null)
|
||||
{
|
||||
throw new ArgumentNullException("stream");
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
}
|
||||
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException("buffer");
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
if (offset < 0 || offset > buffer.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
|
||||
if (length < 0 || length > buffer.Length - offset)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("length");
|
||||
throw new ArgumentOutOfRangeException(nameof(length));
|
||||
}
|
||||
|
||||
while (length > 0)
|
||||
|
||||
@@ -146,12 +146,12 @@ namespace SharpCompress.Compressors.PPMd.I1
|
||||
{
|
||||
if (target == null)
|
||||
{
|
||||
throw new ArgumentNullException("target");
|
||||
throw new ArgumentNullException(nameof(target));
|
||||
}
|
||||
|
||||
if (source == null)
|
||||
{
|
||||
throw new ArgumentNullException("source");
|
||||
throw new ArgumentNullException(nameof(source));
|
||||
}
|
||||
|
||||
EncodeStart(properties);
|
||||
@@ -235,12 +235,12 @@ namespace SharpCompress.Compressors.PPMd.I1
|
||||
{
|
||||
if (target == null)
|
||||
{
|
||||
throw new ArgumentNullException("target");
|
||||
throw new ArgumentNullException(nameof(target));
|
||||
}
|
||||
|
||||
if (source == null)
|
||||
{
|
||||
throw new ArgumentNullException("source");
|
||||
throw new ArgumentNullException(nameof(source));
|
||||
}
|
||||
|
||||
DecodeStart(source, properties);
|
||||
|
||||
54
src/SharpCompress/Compressors/Xz/BinaryUtils.cs
Normal file
54
src/SharpCompress/Compressors/Xz/BinaryUtils.cs
Normal file
@@ -0,0 +1,54 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public static class BinaryUtils
|
||||
{
|
||||
public static int ReadLittleEndianInt32(this BinaryReader reader)
|
||||
{
|
||||
byte[] bytes = reader.ReadBytes(4);
|
||||
return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24));
|
||||
}
|
||||
|
||||
internal static uint ReadLittleEndianUInt32(this BinaryReader reader)
|
||||
{
|
||||
return unchecked((uint)ReadLittleEndianInt32(reader));
|
||||
}
|
||||
public static int ReadLittleEndianInt32(this Stream stream)
|
||||
{
|
||||
byte[] bytes = new byte[4];
|
||||
var read = stream.ReadFully(bytes);
|
||||
if (!read)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24));
|
||||
}
|
||||
|
||||
internal static uint ReadLittleEndianUInt32(this Stream stream)
|
||||
{
|
||||
return unchecked((uint)ReadLittleEndianInt32(stream));
|
||||
}
|
||||
|
||||
internal static byte[] ToBigEndianBytes(this uint uint32)
|
||||
{
|
||||
var result = BitConverter.GetBytes(uint32);
|
||||
|
||||
if (BitConverter.IsLittleEndian)
|
||||
Array.Reverse(result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
internal static byte[] ToLittleEndianBytes(this uint uint32)
|
||||
{
|
||||
var result = BitConverter.GetBytes(uint32);
|
||||
|
||||
if (!BitConverter.IsLittleEndian)
|
||||
Array.Reverse(result);
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
10
src/SharpCompress/Compressors/Xz/CheckType.cs
Normal file
10
src/SharpCompress/Compressors/Xz/CheckType.cs
Normal file
@@ -0,0 +1,10 @@
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public enum CheckType : byte
|
||||
{
|
||||
NONE = 0x00,
|
||||
CRC32 = 0x01,
|
||||
CRC64 = 0x04,
|
||||
SHA256 = 0x0A
|
||||
}
|
||||
}
|
||||
60
src/SharpCompress/Compressors/Xz/Crc32.cs
Normal file
60
src/SharpCompress/Compressors/Xz/Crc32.cs
Normal file
@@ -0,0 +1,60 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal static class Crc32
|
||||
{
|
||||
public const UInt32 DefaultPolynomial = 0xedb88320u;
|
||||
public const UInt32 DefaultSeed = 0xffffffffu;
|
||||
|
||||
static UInt32[] defaultTable;
|
||||
|
||||
public static UInt32 Compute(byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultSeed, buffer);
|
||||
}
|
||||
|
||||
public static UInt32 Compute(UInt32 seed, byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultPolynomial, seed, buffer);
|
||||
}
|
||||
|
||||
public static UInt32 Compute(UInt32 polynomial, UInt32 seed, byte[] buffer)
|
||||
{
|
||||
return ~CalculateHash(InitializeTable(polynomial), seed, buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
static UInt32[] InitializeTable(UInt32 polynomial)
|
||||
{
|
||||
if (polynomial == DefaultPolynomial && defaultTable != null)
|
||||
return defaultTable;
|
||||
|
||||
var createTable = new UInt32[256];
|
||||
for (var i = 0; i < 256; i++)
|
||||
{
|
||||
var entry = (UInt32)i;
|
||||
for (var j = 0; j < 8; j++)
|
||||
if ((entry & 1) == 1)
|
||||
entry = (entry >> 1) ^ polynomial;
|
||||
else
|
||||
entry = entry >> 1;
|
||||
createTable[i] = entry;
|
||||
}
|
||||
|
||||
if (polynomial == DefaultPolynomial)
|
||||
defaultTable = createTable;
|
||||
|
||||
return createTable;
|
||||
}
|
||||
|
||||
static UInt32 CalculateHash(UInt32[] table, UInt32 seed, IList<byte> buffer, int start, int size)
|
||||
{
|
||||
var crc = seed;
|
||||
for (var i = start; i < size - start; i++)
|
||||
crc = (crc >> 8) ^ table[buffer[i] ^ crc & 0xff];
|
||||
return crc;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
57
src/SharpCompress/Compressors/Xz/Crc64.cs
Normal file
57
src/SharpCompress/Compressors/Xz/Crc64.cs
Normal file
@@ -0,0 +1,57 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal static class Crc64
|
||||
{
|
||||
public const UInt64 DefaultSeed = 0x0;
|
||||
|
||||
internal static UInt64[] Table;
|
||||
|
||||
public const UInt64 Iso3309Polynomial = 0xD800000000000000;
|
||||
|
||||
public static UInt64 Compute(byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultSeed, buffer);
|
||||
}
|
||||
|
||||
public static UInt64 Compute(UInt64 seed, byte[] buffer)
|
||||
{
|
||||
if (Table == null)
|
||||
Table = CreateTable(Iso3309Polynomial);
|
||||
|
||||
return CalculateHash(seed, Table, buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
public static UInt64 CalculateHash(UInt64 seed, UInt64[] table, IList<byte> buffer, int start, int size)
|
||||
{
|
||||
var crc = seed;
|
||||
|
||||
for (var i = start; i < size; i++)
|
||||
unchecked
|
||||
{
|
||||
crc = (crc >> 8) ^ table[(buffer[i] ^ crc) & 0xff];
|
||||
}
|
||||
|
||||
return crc;
|
||||
}
|
||||
|
||||
public static ulong[] CreateTable(ulong polynomial)
|
||||
{
|
||||
var createTable = new UInt64[256];
|
||||
for (var i = 0; i < 256; ++i)
|
||||
{
|
||||
var entry = (UInt64)i;
|
||||
for (var j = 0; j < 8; ++j)
|
||||
if ((entry & 1) == 1)
|
||||
entry = (entry >> 1) ^ polynomial;
|
||||
else
|
||||
entry = entry >> 1;
|
||||
createTable[i] = entry;
|
||||
}
|
||||
return createTable;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
53
src/SharpCompress/Compressors/Xz/Filters/BlockFilter.cs
Normal file
53
src/SharpCompress/Compressors/Xz/Filters/BlockFilter.cs
Normal file
@@ -0,0 +1,53 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz.Filters
|
||||
{
|
||||
internal abstract class BlockFilter : ReadOnlyStream
|
||||
{
|
||||
public enum FilterTypes : ulong
|
||||
{
|
||||
DELTA = 0x03,
|
||||
ARCH_x86_FILTER = 0x04,
|
||||
ARCH_PowerPC_FILTER = 0x05,
|
||||
ARCH_IA64_FILTER = 0x06,
|
||||
ARCH_ARM_FILTER = 0x07,
|
||||
ARCH_ARMTHUMB_FILTER = 0x08,
|
||||
ARCH_SPARC_FILTER = 0x09,
|
||||
LZMA2 = 0x21,
|
||||
}
|
||||
|
||||
static Dictionary<FilterTypes, Type> FilterMap = new Dictionary<FilterTypes, Type>()
|
||||
{
|
||||
{FilterTypes.LZMA2, typeof(Lzma2Filter) }
|
||||
};
|
||||
|
||||
public abstract bool AllowAsLast { get; }
|
||||
public abstract bool AllowAsNonLast { get; }
|
||||
public abstract bool ChangesDataSize { get; }
|
||||
|
||||
public BlockFilter() { }
|
||||
|
||||
public abstract void Init(byte[] properties);
|
||||
public abstract void ValidateFilter();
|
||||
|
||||
public FilterTypes FilterType { get; set; }
|
||||
public static BlockFilter Read(BinaryReader reader)
|
||||
{
|
||||
var filterType = (FilterTypes)reader.ReadXZInteger();
|
||||
if (!FilterMap.ContainsKey(filterType))
|
||||
throw new NotImplementedException($"Filter {filterType} has not yet been implemented");
|
||||
var filter = Activator.CreateInstance(FilterMap[filterType]) as BlockFilter;
|
||||
|
||||
var sizeOfProperties = reader.ReadXZInteger();
|
||||
if (sizeOfProperties > int.MaxValue)
|
||||
throw new InvalidDataException("Block filter information too large");
|
||||
byte[] properties = reader.ReadBytes((int)sizeOfProperties);
|
||||
filter.Init(properties);
|
||||
return filter;
|
||||
}
|
||||
|
||||
public abstract void SetBaseStream(Stream stream);
|
||||
}
|
||||
}
|
||||
54
src/SharpCompress/Compressors/Xz/Filters/Lzma2Filter.cs
Normal file
54
src/SharpCompress/Compressors/Xz/Filters/Lzma2Filter.cs
Normal file
@@ -0,0 +1,54 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz.Filters
|
||||
{
|
||||
internal class Lzma2Filter : BlockFilter
|
||||
{
|
||||
public override bool AllowAsLast => true;
|
||||
public override bool AllowAsNonLast => false;
|
||||
public override bool ChangesDataSize => true;
|
||||
|
||||
byte _dictionarySize;
|
||||
public uint DictionarySize
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_dictionarySize > 40)
|
||||
throw new OverflowException("Dictionary size greater than UInt32.Max");
|
||||
if (_dictionarySize == 40)
|
||||
{
|
||||
return uint.MaxValue;
|
||||
}
|
||||
int mantissa = 2 | (_dictionarySize & 1);
|
||||
int exponent = _dictionarySize / 2 + 11;
|
||||
return (uint)mantissa << exponent;
|
||||
}
|
||||
}
|
||||
|
||||
public override void Init(byte[] properties)
|
||||
{
|
||||
if (properties.Length != 1)
|
||||
throw new InvalidDataException("LZMA properties unexpected length");
|
||||
|
||||
_dictionarySize = (byte)(properties[0] & 0x3F);
|
||||
var reserved = properties[0] & 0xC0;
|
||||
if (reserved != 0)
|
||||
throw new InvalidDataException("Reserved bits used in LZMA properties");
|
||||
}
|
||||
|
||||
public override void ValidateFilter()
|
||||
{
|
||||
}
|
||||
|
||||
public override void SetBaseStream(Stream stream)
|
||||
{
|
||||
BaseStream = new SharpCompress.Compressors.LZMA.LzmaStream(new[] { _dictionarySize }, stream);
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
return BaseStream.Read(buffer, offset, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
32
src/SharpCompress/Compressors/Xz/MultiByteIntegers.cs
Normal file
32
src/SharpCompress/Compressors/Xz/MultiByteIntegers.cs
Normal file
@@ -0,0 +1,32 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal static class MultiByteIntegers
|
||||
{
|
||||
public static ulong ReadXZInteger(this BinaryReader reader, int MaxBytes = 9)
|
||||
{
|
||||
if (MaxBytes <= 0)
|
||||
throw new ArgumentOutOfRangeException();
|
||||
if (MaxBytes > 9)
|
||||
MaxBytes = 9;
|
||||
|
||||
byte LastByte = reader.ReadByte();
|
||||
ulong Output = (ulong)LastByte & 0x7F;
|
||||
|
||||
int i = 0;
|
||||
while ((LastByte & 0x80) != 0)
|
||||
{
|
||||
if (i >= MaxBytes)
|
||||
throw new InvalidDataException();
|
||||
LastByte = reader.ReadByte();
|
||||
if (LastByte == 0)
|
||||
throw new InvalidDataException();
|
||||
|
||||
Output |= ((ulong)(LastByte & 0x7F)) << (i * 7);
|
||||
}
|
||||
return Output;
|
||||
}
|
||||
}
|
||||
}
|
||||
44
src/SharpCompress/Compressors/Xz/ReadOnlyStream.cs
Normal file
44
src/SharpCompress/Compressors/Xz/ReadOnlyStream.cs
Normal file
@@ -0,0 +1,44 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public abstract class ReadOnlyStream : Stream
|
||||
{
|
||||
public Stream BaseStream { get; protected set; }
|
||||
|
||||
public override bool CanRead => BaseStream.CanRead;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
165
src/SharpCompress/Compressors/Xz/XZBlock.cs
Normal file
165
src/SharpCompress/Compressors/Xz/XZBlock.cs
Normal file
@@ -0,0 +1,165 @@
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.Compressors.Xz.Filters;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal sealed class XZBlock : XZReadOnlyStream
|
||||
{
|
||||
public int BlockHeaderSize => (_blockHeaderSizeByte + 1) * 4;
|
||||
public ulong? CompressedSize { get; private set; }
|
||||
public ulong? UncompressedSize { get; private set; }
|
||||
public Stack<BlockFilter> Filters { get; private set; } = new Stack<BlockFilter>();
|
||||
public bool HeaderIsLoaded { get; private set; }
|
||||
private CheckType _checkType;
|
||||
private int _checkSize;
|
||||
private bool _streamConnected;
|
||||
private int _numFilters;
|
||||
private byte _blockHeaderSizeByte;
|
||||
private Stream _decomStream;
|
||||
private bool _endOfStream;
|
||||
private bool _paddingSkipped;
|
||||
private bool _crcChecked;
|
||||
private ulong _bytesRead;
|
||||
|
||||
public XZBlock(Stream stream, CheckType checkType, int checkSize) : base(stream)
|
||||
{
|
||||
_checkType = checkType;
|
||||
_checkSize = checkSize;
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int bytesRead = 0;
|
||||
if (!HeaderIsLoaded)
|
||||
LoadHeader();
|
||||
if (!_streamConnected)
|
||||
ConnectStream();
|
||||
if (!_endOfStream)
|
||||
bytesRead = _decomStream.Read(buffer, offset, count);
|
||||
if (bytesRead != count)
|
||||
_endOfStream = true;
|
||||
if (_endOfStream && !_paddingSkipped)
|
||||
SkipPadding();
|
||||
if (_endOfStream && !_crcChecked)
|
||||
CheckCrc();
|
||||
_bytesRead += (ulong)bytesRead;
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
private void SkipPadding()
|
||||
{
|
||||
int padding = (int)(_bytesRead % 4);
|
||||
if (padding > 0)
|
||||
{
|
||||
byte[] paddingBytes = new byte[padding];
|
||||
BaseStream.Read(paddingBytes, 0, padding);
|
||||
if (paddingBytes.Any(b => b != 0))
|
||||
throw new InvalidDataException("Padding bytes were non-null");
|
||||
}
|
||||
_paddingSkipped = true;
|
||||
}
|
||||
|
||||
private void CheckCrc()
|
||||
{
|
||||
byte[] crc = new byte[_checkSize];
|
||||
BaseStream.Read(crc, 0, _checkSize);
|
||||
// Actually do a check (and read in the bytes
|
||||
// into the function throughout the stream read).
|
||||
_crcChecked = true;
|
||||
}
|
||||
|
||||
private void ConnectStream()
|
||||
{
|
||||
_decomStream = BaseStream;
|
||||
while (Filters.Any())
|
||||
{
|
||||
var filter = Filters.Pop();
|
||||
filter.SetBaseStream(_decomStream);
|
||||
_decomStream = filter;
|
||||
}
|
||||
_streamConnected = true;
|
||||
}
|
||||
|
||||
private void LoadHeader()
|
||||
{
|
||||
ReadHeaderSize();
|
||||
byte[] headerCache = CacheHeader();
|
||||
|
||||
using (var cache = new MemoryStream(headerCache))
|
||||
using (var cachedReader = new BinaryReader(cache))
|
||||
{
|
||||
cachedReader.BaseStream.Position = 1; // skip the header size byte
|
||||
ReadBlockFlags(cachedReader);
|
||||
ReadFilters(cachedReader);
|
||||
}
|
||||
HeaderIsLoaded = true;
|
||||
}
|
||||
|
||||
private void ReadHeaderSize()
|
||||
{
|
||||
_blockHeaderSizeByte = (byte)BaseStream.ReadByte();
|
||||
if (_blockHeaderSizeByte == 0)
|
||||
throw new XZIndexMarkerReachedException();
|
||||
}
|
||||
|
||||
private byte[] CacheHeader()
|
||||
{
|
||||
byte[] blockHeaderWithoutCrc = new byte[BlockHeaderSize - 4];
|
||||
blockHeaderWithoutCrc[0] = _blockHeaderSizeByte;
|
||||
var read = BaseStream.Read(blockHeaderWithoutCrc, 1, BlockHeaderSize - 5);
|
||||
if (read != BlockHeaderSize - 5)
|
||||
throw new EndOfStreamException("Reached end of stream unexectedly");
|
||||
|
||||
uint crc = BaseStream.ReadLittleEndianUInt32();
|
||||
uint calcCrc = Crc32.Compute(blockHeaderWithoutCrc);
|
||||
if (crc != calcCrc)
|
||||
throw new InvalidDataException("Block header corrupt");
|
||||
|
||||
return blockHeaderWithoutCrc;
|
||||
}
|
||||
|
||||
private void ReadBlockFlags(BinaryReader reader)
|
||||
{
|
||||
var blockFlags = reader.ReadByte();
|
||||
_numFilters = (blockFlags & 0x03) + 1;
|
||||
byte reserved = (byte)(blockFlags & 0x3C);
|
||||
|
||||
if (reserved != 0)
|
||||
throw new InvalidDataException("Reserved bytes used, perhaps an unknown XZ implementation");
|
||||
|
||||
bool compressedSizePresent = (blockFlags & 0x40) != 0;
|
||||
bool uncompressedSizePresent = (blockFlags & 0x80) != 0;
|
||||
|
||||
if (compressedSizePresent)
|
||||
CompressedSize = reader.ReadXZInteger();
|
||||
if (uncompressedSizePresent)
|
||||
UncompressedSize = reader.ReadXZInteger();
|
||||
}
|
||||
|
||||
private void ReadFilters(BinaryReader reader, long baseStreamOffset = 0)
|
||||
{
|
||||
int nonLastSizeChangers = 0;
|
||||
for (int i = 0; i < _numFilters; i++)
|
||||
{
|
||||
var filter = BlockFilter.Read(reader);
|
||||
if ((i + 1 == _numFilters && !filter.AllowAsLast)
|
||||
|| (i + 1 < _numFilters && !filter.AllowAsNonLast))
|
||||
throw new InvalidDataException("Block Filters in bad order");
|
||||
if (filter.ChangesDataSize && i + 1 < _numFilters)
|
||||
nonLastSizeChangers++;
|
||||
filter.ValidateFilter();
|
||||
Filters.Push(filter);
|
||||
}
|
||||
if (nonLastSizeChangers > 2)
|
||||
throw new InvalidDataException("More than two non-last block filters cannot change stream size");
|
||||
|
||||
int blockHeaderPaddingSize = BlockHeaderSize -
|
||||
(4 + (int)(reader.BaseStream.Position - baseStreamOffset));
|
||||
byte[] blockHeaderPadding = reader.ReadBytes(blockHeaderPaddingSize);
|
||||
if (!blockHeaderPadding.All(b => b == 0))
|
||||
throw new InvalidDataException("Block header contains unknown fields");
|
||||
}
|
||||
}
|
||||
}
|
||||
49
src/SharpCompress/Compressors/Xz/XZFooter.cs
Normal file
49
src/SharpCompress/Compressors/Xz/XZFooter.cs
Normal file
@@ -0,0 +1,49 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public class XZFooter
|
||||
{
|
||||
private readonly BinaryReader _reader;
|
||||
private readonly byte[] _magicBytes = new byte[] { 0x59, 0x5A };
|
||||
public long StreamStartPosition { get; private set; }
|
||||
public long BackwardSize { get; private set; }
|
||||
public byte[] StreamFlags { get; private set; }
|
||||
|
||||
public XZFooter(BinaryReader reader)
|
||||
{
|
||||
_reader = reader;
|
||||
StreamStartPosition = reader.BaseStream.Position;
|
||||
}
|
||||
|
||||
public static XZFooter FromStream(Stream stream)
|
||||
{
|
||||
var footer = new XZFooter(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8));
|
||||
footer.Process();
|
||||
return footer;
|
||||
}
|
||||
|
||||
public void Process()
|
||||
{
|
||||
uint crc = _reader.ReadLittleEndianUInt32();
|
||||
byte[] footerBytes = _reader.ReadBytes(6);
|
||||
uint myCrc = Crc32.Compute(footerBytes);
|
||||
if (crc != myCrc)
|
||||
throw new InvalidDataException("Footer corrupt");
|
||||
using (var stream = new MemoryStream(footerBytes))
|
||||
using (var reader = new BinaryReader(stream))
|
||||
{
|
||||
BackwardSize = (reader.ReadLittleEndianUInt32() + 1) * 4;
|
||||
StreamFlags = reader.ReadBytes(2);
|
||||
}
|
||||
byte[] magBy = _reader.ReadBytes(2);
|
||||
if (!Enumerable.SequenceEqual(magBy, _magicBytes))
|
||||
{
|
||||
throw new InvalidDataException("Magic footer missing");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
55
src/SharpCompress/Compressors/Xz/XZHeader.cs
Normal file
55
src/SharpCompress/Compressors/Xz/XZHeader.cs
Normal file
@@ -0,0 +1,55 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public class XZHeader
|
||||
{
|
||||
private readonly BinaryReader _reader;
|
||||
private readonly byte[] MagicHeader = { 0xFD, 0x37, 0x7A, 0x58, 0x5a, 0x00 };
|
||||
|
||||
public CheckType BlockCheckType { get; private set; }
|
||||
public int BlockCheckSize => ((((int)BlockCheckType) + 2) / 3) * 4;
|
||||
|
||||
public XZHeader(BinaryReader reader)
|
||||
{
|
||||
_reader = reader;
|
||||
}
|
||||
|
||||
public static XZHeader FromStream(Stream stream)
|
||||
{
|
||||
var header = new XZHeader(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8));
|
||||
header.Process();
|
||||
return header;
|
||||
}
|
||||
|
||||
public void Process()
|
||||
{
|
||||
CheckMagicBytes(_reader.ReadBytes(6));
|
||||
ProcessStreamFlags();
|
||||
}
|
||||
|
||||
private void ProcessStreamFlags()
|
||||
{
|
||||
byte[] streamFlags = _reader.ReadBytes(2);
|
||||
UInt32 crc = _reader.ReadLittleEndianUInt32();
|
||||
UInt32 calcCrc = Crc32.Compute(streamFlags);
|
||||
if (crc != calcCrc)
|
||||
throw new InvalidDataException("Stream header corrupt");
|
||||
|
||||
BlockCheckType = (CheckType)(streamFlags[1] & 0x0F);
|
||||
byte futureUse = (byte)(streamFlags[1] & 0xF0);
|
||||
if (futureUse != 0 || streamFlags[0] != 0)
|
||||
throw new InvalidDataException("Unknown XZ Stream Version");
|
||||
}
|
||||
|
||||
private void CheckMagicBytes(byte[] header)
|
||||
{
|
||||
if (!Enumerable.SequenceEqual(header, MagicHeader))
|
||||
throw new InvalidDataException("Invalid XZ Stream");
|
||||
}
|
||||
}
|
||||
}
|
||||
73
src/SharpCompress/Compressors/Xz/XZIndex.cs
Normal file
73
src/SharpCompress/Compressors/Xz/XZIndex.cs
Normal file
@@ -0,0 +1,73 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
[CLSCompliant(false)]
|
||||
public class XZIndex
|
||||
{
|
||||
private readonly BinaryReader _reader;
|
||||
public long StreamStartPosition { get; private set; }
|
||||
public ulong NumberOfRecords { get; private set; }
|
||||
public List<XZIndexRecord> Records { get; } = new List<XZIndexRecord>();
|
||||
|
||||
private bool _indexMarkerAlreadyVerified;
|
||||
|
||||
public XZIndex(BinaryReader reader, bool indexMarkerAlreadyVerified)
|
||||
{
|
||||
_reader = reader;
|
||||
_indexMarkerAlreadyVerified = indexMarkerAlreadyVerified;
|
||||
StreamStartPosition = reader.BaseStream.Position;
|
||||
if (indexMarkerAlreadyVerified)
|
||||
StreamStartPosition--;
|
||||
}
|
||||
|
||||
public static XZIndex FromStream(Stream stream, bool indexMarkerAlreadyVerified)
|
||||
{
|
||||
var index = new XZIndex(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8), indexMarkerAlreadyVerified);
|
||||
index.Process();
|
||||
return index;
|
||||
}
|
||||
|
||||
public void Process()
|
||||
{
|
||||
if (!_indexMarkerAlreadyVerified)
|
||||
VerifyIndexMarker();
|
||||
NumberOfRecords = _reader.ReadXZInteger();
|
||||
for (ulong i = 0; i < NumberOfRecords; i++)
|
||||
{
|
||||
Records.Add(XZIndexRecord.FromBinaryReader(_reader));
|
||||
}
|
||||
SkipPadding();
|
||||
VerifyCrc32();
|
||||
}
|
||||
|
||||
private void VerifyIndexMarker()
|
||||
{
|
||||
byte marker = _reader.ReadByte();
|
||||
if (marker != 0)
|
||||
throw new InvalidDataException("Not an index block");
|
||||
}
|
||||
|
||||
private void SkipPadding()
|
||||
{
|
||||
int padding = (int)(_reader.BaseStream.Position - StreamStartPosition) % 4;
|
||||
if (padding > 0)
|
||||
{
|
||||
byte[] paddingBytes = _reader.ReadBytes(padding);
|
||||
if (paddingBytes.Any(b => b != 0))
|
||||
throw new InvalidDataException("Padding bytes were non-null");
|
||||
}
|
||||
}
|
||||
|
||||
private void VerifyCrc32()
|
||||
{
|
||||
uint crc = _reader.ReadLittleEndianUInt32();
|
||||
// TODO verify this matches
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public class XZIndexMarkerReachedException : Exception
|
||||
{
|
||||
}
|
||||
}
|
||||
22
src/SharpCompress/Compressors/Xz/XZIndexRecord.cs
Normal file
22
src/SharpCompress/Compressors/Xz/XZIndexRecord.cs
Normal file
@@ -0,0 +1,22 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
[CLSCompliant(false)]
|
||||
public class XZIndexRecord
|
||||
{
|
||||
public ulong UnpaddedSize { get; private set; }
|
||||
public ulong UncompressedSize { get; private set; }
|
||||
|
||||
protected XZIndexRecord() { }
|
||||
|
||||
public static XZIndexRecord FromBinaryReader(BinaryReader br)
|
||||
{
|
||||
var record = new XZIndexRecord();
|
||||
record.UnpaddedSize = br.ReadXZInteger();
|
||||
record.UncompressedSize = br.ReadXZInteger();
|
||||
return record;
|
||||
}
|
||||
}
|
||||
}
|
||||
14
src/SharpCompress/Compressors/Xz/XZReadOnlyStream.cs
Normal file
14
src/SharpCompress/Compressors/Xz/XZReadOnlyStream.cs
Normal file
@@ -0,0 +1,14 @@
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public abstract class XZReadOnlyStream : ReadOnlyStream
|
||||
{
|
||||
public XZReadOnlyStream(Stream stream)
|
||||
{
|
||||
BaseStream = stream;
|
||||
if (!BaseStream.CanRead)
|
||||
throw new InvalidDataException("Must be able to read from stream");
|
||||
}
|
||||
}
|
||||
}
|
||||
116
src/SharpCompress/Compressors/Xz/XZStream.cs
Normal file
116
src/SharpCompress/Compressors/Xz/XZStream.cs
Normal file
@@ -0,0 +1,116 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
[CLSCompliant(false)]
|
||||
public sealed class XZStream : XZReadOnlyStream
|
||||
{
|
||||
public static bool IsXZStream(Stream stream)
|
||||
{
|
||||
try
|
||||
{
|
||||
return null != XZHeader.FromStream(stream);
|
||||
}
|
||||
catch (Exception)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private void AssertBlockCheckTypeIsSupported()
|
||||
{
|
||||
switch (Header.BlockCheckType)
|
||||
{
|
||||
case CheckType.NONE:
|
||||
break;
|
||||
case CheckType.CRC32:
|
||||
break;
|
||||
case CheckType.CRC64:
|
||||
break;
|
||||
case CheckType.SHA256:
|
||||
throw new NotImplementedException();
|
||||
default:
|
||||
throw new NotSupportedException("Check Type unknown to this version of decoder.");
|
||||
}
|
||||
}
|
||||
public XZHeader Header { get; private set; }
|
||||
public XZIndex Index { get; private set; }
|
||||
public XZFooter Footer { get; private set; }
|
||||
public bool HeaderIsRead { get; private set; }
|
||||
private XZBlock _currentBlock;
|
||||
|
||||
bool _endOfStream;
|
||||
|
||||
public XZStream(Stream stream) : base(stream)
|
||||
{
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int bytesRead = 0;
|
||||
if (_endOfStream)
|
||||
return bytesRead;
|
||||
if (!HeaderIsRead)
|
||||
ReadHeader();
|
||||
bytesRead = ReadBlocks(buffer, offset, count);
|
||||
if (bytesRead < count)
|
||||
{
|
||||
_endOfStream = true;
|
||||
ReadIndex();
|
||||
ReadFooter();
|
||||
}
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
private void ReadHeader()
|
||||
{
|
||||
Header = XZHeader.FromStream(BaseStream);
|
||||
AssertBlockCheckTypeIsSupported();
|
||||
HeaderIsRead = true;
|
||||
}
|
||||
|
||||
private void ReadIndex()
|
||||
{
|
||||
Index = XZIndex.FromStream(BaseStream, true);
|
||||
// TODO veryfy Index
|
||||
}
|
||||
|
||||
private void ReadFooter()
|
||||
{
|
||||
Footer = XZFooter.FromStream(BaseStream);
|
||||
// TODO verify footer
|
||||
}
|
||||
|
||||
private int ReadBlocks(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int bytesRead = 0;
|
||||
if (_currentBlock == null)
|
||||
NextBlock();
|
||||
for (;;)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (bytesRead >= count)
|
||||
break;
|
||||
int remaining = count - bytesRead;
|
||||
int newOffset = offset + bytesRead;
|
||||
int justRead = _currentBlock.Read(buffer, newOffset, remaining);
|
||||
if (justRead < remaining)
|
||||
NextBlock();
|
||||
bytesRead += justRead;
|
||||
}
|
||||
catch (XZIndexMarkerReachedException)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
private void NextBlock()
|
||||
{
|
||||
_currentBlock = new XZBlock(BaseStream, Header.BlockCheckType, Header.BlockCheckSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -156,7 +156,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (dest == null)
|
||||
{
|
||||
throw new ArgumentNullException("dest");
|
||||
throw new ArgumentNullException(nameof(dest));
|
||||
}
|
||||
if (destIdx < 0 || destIdx > dest.Length - size)
|
||||
{
|
||||
@@ -170,7 +170,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -195,7 +195,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -221,7 +221,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -247,7 +247,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -273,7 +273,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -299,7 +299,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -325,7 +325,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
@@ -351,7 +351,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
@@ -468,7 +468,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -494,7 +494,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -520,7 +520,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -546,7 +546,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -572,7 +572,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -598,7 +598,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -624,7 +624,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
@@ -650,7 +650,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
|
||||
106
src/SharpCompress/Crypto/Crc32Stream.cs
Normal file
106
src/SharpCompress/Crypto/Crc32Stream.cs
Normal file
@@ -0,0 +1,106 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Crypto
|
||||
{
|
||||
internal sealed class Crc32Stream : Stream
|
||||
{
|
||||
public const uint DefaultPolynomial = 0xedb88320u;
|
||||
public const uint DefaultSeed = 0xffffffffu;
|
||||
|
||||
private static uint[] defaultTable;
|
||||
|
||||
private readonly uint[] table;
|
||||
private uint hash;
|
||||
|
||||
private readonly Stream stream;
|
||||
|
||||
public Crc32Stream(Stream stream)
|
||||
: this(stream, DefaultPolynomial, DefaultSeed)
|
||||
{
|
||||
}
|
||||
|
||||
public Crc32Stream(Stream stream, uint polynomial, uint seed)
|
||||
{
|
||||
this.stream = stream;
|
||||
table = InitializeTable(polynomial);
|
||||
hash = seed;
|
||||
}
|
||||
|
||||
public Stream WrappedStream => stream;
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
stream.Flush();
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) => throw new NotSupportedException();
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
stream.Write(buffer, offset, count);
|
||||
hash = CalculateCrc(table, hash, buffer, offset, count);
|
||||
}
|
||||
|
||||
public override bool CanRead => stream.CanRead;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => stream.CanWrite;
|
||||
public override long Length => throw new NotSupportedException();
|
||||
public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); }
|
||||
|
||||
public uint Crc => ~hash;
|
||||
|
||||
public static uint Compute(byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultSeed, buffer);
|
||||
}
|
||||
|
||||
public static uint Compute(uint seed, byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultPolynomial, seed, buffer);
|
||||
}
|
||||
|
||||
public static uint Compute(uint polynomial, uint seed, byte[] buffer)
|
||||
{
|
||||
return ~CalculateCrc(InitializeTable(polynomial), seed, buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
private static uint[] InitializeTable(uint polynomial)
|
||||
{
|
||||
if (polynomial == DefaultPolynomial && defaultTable != null)
|
||||
return defaultTable;
|
||||
|
||||
var createTable = new uint[256];
|
||||
for (var i = 0; i < 256; i++)
|
||||
{
|
||||
var entry = (uint)i;
|
||||
for (var j = 0; j < 8; j++)
|
||||
if ((entry & 1) == 1)
|
||||
entry = (entry >> 1) ^ polynomial;
|
||||
else
|
||||
entry = entry >> 1;
|
||||
createTable[i] = entry;
|
||||
}
|
||||
|
||||
if (polynomial == DefaultPolynomial)
|
||||
defaultTable = createTable;
|
||||
|
||||
return createTable;
|
||||
}
|
||||
|
||||
private static uint CalculateCrc(uint[] table, uint crc, byte[] buffer, int offset, int count)
|
||||
{
|
||||
unchecked
|
||||
{
|
||||
for (int i = offset, end = offset + count; i < end; i++)
|
||||
crc = (crc >> 8) ^ table[(crc ^ buffer[i]) & 0xFF];
|
||||
}
|
||||
return crc;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@ namespace Org.BouncyCastle.Crypto.Parameters
|
||||
{
|
||||
if (key == null)
|
||||
{
|
||||
throw new ArgumentNullException("key");
|
||||
throw new ArgumentNullException(nameof(key));
|
||||
}
|
||||
|
||||
this.key = (byte[])key.Clone();
|
||||
@@ -25,15 +25,15 @@ namespace Org.BouncyCastle.Crypto.Parameters
|
||||
{
|
||||
if (key == null)
|
||||
{
|
||||
throw new ArgumentNullException("key");
|
||||
throw new ArgumentNullException(nameof(key));
|
||||
}
|
||||
if (keyOff < 0 || keyOff > key.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("keyOff");
|
||||
throw new ArgumentOutOfRangeException(nameof(keyOff));
|
||||
}
|
||||
if (keyLen < 0 || (keyOff + keyLen) > key.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("keyLen");
|
||||
throw new ArgumentOutOfRangeException(nameof(keyLen));
|
||||
}
|
||||
|
||||
this.key = new byte[keyLen];
|
||||
|
||||
@@ -22,6 +22,7 @@ namespace SharpCompress.IO
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
writableStream.Flush();
|
||||
}
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
@@ -105,6 +105,12 @@ namespace SharpCompress.IO
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
//don't actually read if we don't really want to read anything
|
||||
//currently a network stream bug on Windows for .NET Core
|
||||
if (count == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int read;
|
||||
if (isRewound && bufferStream.Position != bufferStream.Length)
|
||||
{
|
||||
|
||||
@@ -139,30 +139,28 @@ namespace SharpCompress.Readers
|
||||
}
|
||||
}
|
||||
|
||||
private readonly byte[] skipBuffer = new byte[4096];
|
||||
|
||||
private void Skip()
|
||||
{
|
||||
if (!Entry.IsSolid)
|
||||
if (ArchiveType != ArchiveType.Rar
|
||||
&& !Entry.IsSolid
|
||||
&& Entry.CompressedSize > 0)
|
||||
{
|
||||
var rawStream = Entry.Parts.First().GetRawStream();
|
||||
//not solid and has a known compressed size then we can skip raw bytes.
|
||||
var part = Entry.Parts.First();
|
||||
var rawStream = part.GetRawStream();
|
||||
|
||||
if (rawStream != null)
|
||||
{
|
||||
var bytesToAdvance = Entry.CompressedSize;
|
||||
for (var i = 0; i < bytesToAdvance / skipBuffer.Length; i++)
|
||||
{
|
||||
rawStream.Read(skipBuffer, 0, skipBuffer.Length);
|
||||
}
|
||||
rawStream.Read(skipBuffer, 0, (int)(bytesToAdvance % skipBuffer.Length));
|
||||
rawStream.Skip(bytesToAdvance);
|
||||
part.Skipped = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
//don't know the size so we have to try to decompress to skip
|
||||
using (var s = OpenEntryStream())
|
||||
{
|
||||
while (s.Read(skipBuffer, 0, skipBuffer.Length) > 0)
|
||||
{
|
||||
}
|
||||
s.Skip();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,11 +29,11 @@ namespace SharpCompress.Readers.GZip
|
||||
return new GZipReader(stream, options ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion Open
|
||||
|
||||
internal override IEnumerable<GZipEntry> GetEntries(Stream stream)
|
||||
{
|
||||
return GZipEntry.GetEntries(stream);
|
||||
return GZipEntry.GetEntries(stream, Options);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -93,10 +93,6 @@ namespace SharpCompress.Readers
|
||||
using (FileStream fs = File.Open(destinationFileName, fm))
|
||||
{
|
||||
reader.WriteEntryTo(fs);
|
||||
//using (Stream s = reader.OpenEntryStream())
|
||||
//{
|
||||
// s.TransferTo(fs);
|
||||
//}
|
||||
}
|
||||
reader.Entry.PreserveExtractionOptions(destinationFileName, options);
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ using SharpCompress.Readers.Rar;
|
||||
using SharpCompress.Readers.Tar;
|
||||
using SharpCompress.Readers.Zip;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
using SharpCompress.Compressors.Xz;
|
||||
|
||||
namespace SharpCompress.Readers
|
||||
{
|
||||
@@ -76,7 +77,6 @@ namespace SharpCompress.Readers
|
||||
return new TarReader(rewindableStream, options, CompressionType.LZip);
|
||||
}
|
||||
}
|
||||
|
||||
rewindableStream.Rewind(false);
|
||||
if (RarArchive.IsRarFile(rewindableStream, options))
|
||||
{
|
||||
@@ -90,7 +90,18 @@ namespace SharpCompress.Readers
|
||||
rewindableStream.Rewind(true);
|
||||
return TarReader.Open(rewindableStream, options);
|
||||
}
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Reader Formats: Zip, GZip, BZip2, Tar, Rar");
|
||||
rewindableStream.Rewind(false);
|
||||
if (XZStream.IsXZStream(rewindableStream))
|
||||
{
|
||||
rewindableStream.Rewind(true);
|
||||
XZStream testStream = new XZStream(rewindableStream);
|
||||
if (TarArchive.IsTarFile(testStream))
|
||||
{
|
||||
rewindableStream.Rewind(true);
|
||||
return new TarReader(rewindableStream, options, CompressionType.Xz);
|
||||
}
|
||||
}
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Reader Formats: Zip, GZip, BZip2, Tar, Rar, LZip, XZ");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,6 +8,7 @@ namespace SharpCompress.Readers
|
||||
/// Look for RarArchive (Check for self-extracting archives or cases where RarArchive isn't at the start of the file)
|
||||
/// </summary>
|
||||
public bool LookForHeader { get; set; }
|
||||
|
||||
public string Password { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,7 @@ using SharpCompress.Compressors.BZip2;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.IO;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
using SharpCompress.Compressors.Xz;
|
||||
|
||||
namespace SharpCompress.Readers.Tar
|
||||
{
|
||||
@@ -43,6 +44,10 @@ namespace SharpCompress.Readers.Tar
|
||||
{
|
||||
return new LZipStream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
case CompressionType.Xz:
|
||||
{
|
||||
return new XZStream(stream);
|
||||
}
|
||||
case CompressionType.None:
|
||||
{
|
||||
return stream;
|
||||
@@ -109,11 +114,11 @@ namespace SharpCompress.Readers.Tar
|
||||
return new TarReader(rewindableStream, options, CompressionType.None);
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion Open
|
||||
|
||||
internal override IEnumerable<TarEntry> GetEntries(Stream stream)
|
||||
{
|
||||
return TarEntry.GetEntries(StreamingMode.Streaming, stream, compressionType);
|
||||
return TarEntry.GetEntries(StreamingMode.Streaming, stream, compressionType, Options.ArchiveEncoding);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,13 +8,13 @@ namespace SharpCompress.Readers.Zip
|
||||
{
|
||||
public class ZipReader : AbstractReader<ZipEntry, ZipVolume>
|
||||
{
|
||||
private readonly StreamingZipHeaderFactory headerFactory;
|
||||
private readonly StreamingZipHeaderFactory _headerFactory;
|
||||
|
||||
internal ZipReader(Stream stream, ReaderOptions options)
|
||||
: base(options, ArchiveType.Zip)
|
||||
{
|
||||
Volume = new ZipVolume(stream, options);
|
||||
headerFactory = new StreamingZipHeaderFactory(options.Password);
|
||||
_headerFactory = new StreamingZipHeaderFactory(options.Password, options.ArchiveEncoding);
|
||||
}
|
||||
|
||||
public override ZipVolume Volume { get; }
|
||||
@@ -33,26 +33,26 @@ namespace SharpCompress.Readers.Zip
|
||||
return new ZipReader(stream, options ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion Open
|
||||
|
||||
internal override IEnumerable<ZipEntry> GetEntries(Stream stream)
|
||||
{
|
||||
foreach (ZipHeader h in headerFactory.ReadStreamHeader(stream))
|
||||
foreach (ZipHeader h in _headerFactory.ReadStreamHeader(stream))
|
||||
{
|
||||
if (h != null)
|
||||
{
|
||||
switch (h.ZipHeaderType)
|
||||
{
|
||||
case ZipHeaderType.LocalEntry:
|
||||
{
|
||||
yield return new ZipEntry(new StreamingZipFilePart(h as LocalEntryHeader,
|
||||
stream));
|
||||
}
|
||||
{
|
||||
yield return new ZipEntry(new StreamingZipFilePart(h as LocalEntryHeader,
|
||||
stream));
|
||||
}
|
||||
break;
|
||||
case ZipHeaderType.DirectoryEnd:
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<AssemblyTitle>SharpCompress - Pure C# Decompression/Compression</AssemblyTitle>
|
||||
<NeutralLanguage>en-US</NeutralLanguage>
|
||||
<VersionPrefix>0.16.1</VersionPrefix>
|
||||
<AssemblyVersion>0.16.1.0</AssemblyVersion>
|
||||
<FileVersion>0.16.1.0</FileVersion>
|
||||
<VersionPrefix>0.18</VersionPrefix>
|
||||
<AssemblyVersion>0.18.0.0</AssemblyVersion>
|
||||
<FileVersion>0.18.0.0</FileVersion>
|
||||
<Authors>Adam Hathcock</Authors>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'==''">net45;net35;netstandard1.0;netstandard1.3</TargetFrameworks>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'!=''">$(LibraryFrameworks)</TargetFrameworks>
|
||||
@@ -16,16 +15,14 @@
|
||||
<SignAssembly>true</SignAssembly>
|
||||
<PublicSign Condition=" '$(OS)' != 'Windows_NT' ">true</PublicSign>
|
||||
<PackageId>SharpCompress</PackageId>
|
||||
<PackageTags>rar;unrar;zip;unzip;bzip2;gzip;tar;7zip</PackageTags>
|
||||
<PackageTags>rar;unrar;zip;unzip;bzip2;gzip;tar;7zip;lzip;xz</PackageTags>
|
||||
<PackageProjectUrl>https://github.com/adamhathcock/sharpcompress</PackageProjectUrl>
|
||||
<PackageLicenseUrl>https://github.com/adamhathcock/sharpcompress/blob/master/LICENSE.txt</PackageLicenseUrl>
|
||||
<GenerateAssemblyTitleAttribute>false</GenerateAssemblyTitleAttribute>
|
||||
<GenerateAssemblyProductAttribute>false</GenerateAssemblyProductAttribute>
|
||||
<Description>SharpCompress is a compression library for NET Standard 1.0 that can unrar, decompress 7zip, zip/unzip, tar/untar bzip2/unbzip2 and gzip/ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip is implemented.</Description>
|
||||
<Description>SharpCompress is a compression library for NET Standard 1.0 that can unrar, decompress 7zip, decompress xz, zip/unzip, tar/untar lzip/unlzip, bzip2/unbzip2 and gzip/ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip is implemented.</Description>
|
||||
</PropertyGroup>
|
||||
|
||||
<PropertyGroup Condition=" '$(TargetFramework)' == 'netstandard1.0' ">
|
||||
<DefineConstants>$(DefineConstants);NO_FILE;NO_CRYPTO;SILVERLIGHT</DefineConstants>
|
||||
</PropertyGroup>
|
||||
|
||||
</Project>
|
||||
</Project>
|
||||
@@ -7,7 +7,7 @@ using SharpCompress.Readers;
|
||||
namespace SharpCompress
|
||||
{
|
||||
internal static class Utility
|
||||
{
|
||||
{
|
||||
public static ReadOnlyCollection<T> ToReadOnly<T>(this IEnumerable<T> items)
|
||||
{
|
||||
return new ReadOnlyCollection<T>(items.ToList());
|
||||
@@ -138,7 +138,7 @@ namespace SharpCompress
|
||||
|
||||
public static void Skip(this Stream source, long advanceAmount)
|
||||
{
|
||||
byte[] buffer = new byte[32 * 1024];
|
||||
byte[] buffer = GetTransferByteArray();
|
||||
int read = 0;
|
||||
int readCount = 0;
|
||||
do
|
||||
@@ -162,9 +162,9 @@ namespace SharpCompress
|
||||
while (true);
|
||||
}
|
||||
|
||||
public static void SkipAll(this Stream source)
|
||||
public static void Skip(this Stream source)
|
||||
{
|
||||
byte[] buffer = new byte[32 * 1024];
|
||||
byte[] buffer = GetTransferByteArray();
|
||||
do
|
||||
{
|
||||
}
|
||||
|
||||
@@ -6,29 +6,30 @@ namespace SharpCompress.Writers
|
||||
{
|
||||
public abstract class AbstractWriter : IWriter
|
||||
{
|
||||
private bool closeStream;
|
||||
private bool isDisposed;
|
||||
|
||||
protected AbstractWriter(ArchiveType type)
|
||||
protected AbstractWriter(ArchiveType type, WriterOptions writerOptions)
|
||||
{
|
||||
WriterType = type;
|
||||
WriterOptions = writerOptions;
|
||||
}
|
||||
|
||||
protected void InitalizeStream(Stream stream, bool closeStream)
|
||||
protected void InitalizeStream(Stream stream)
|
||||
{
|
||||
OutputStream = stream;
|
||||
this.closeStream = closeStream;
|
||||
}
|
||||
|
||||
protected Stream OutputStream { get; private set; }
|
||||
|
||||
public ArchiveType WriterType { get; }
|
||||
|
||||
protected WriterOptions WriterOptions { get; }
|
||||
|
||||
public abstract void Write(string filename, Stream source, DateTime? modificationTime);
|
||||
|
||||
protected virtual void Dispose(bool isDisposing)
|
||||
{
|
||||
if (isDisposing && closeStream)
|
||||
if (isDisposing && !WriterOptions.LeaveStreamOpen)
|
||||
{
|
||||
OutputStream.Dispose();
|
||||
}
|
||||
|
||||
@@ -8,12 +8,15 @@ namespace SharpCompress.Writers.GZip
|
||||
{
|
||||
public class GZipWriter : AbstractWriter
|
||||
{
|
||||
private bool wroteToStream;
|
||||
private bool _wroteToStream;
|
||||
|
||||
public GZipWriter(Stream destination, bool leaveOpen = false)
|
||||
: base(ArchiveType.GZip)
|
||||
public GZipWriter(Stream destination, GZipWriterOptions options = null)
|
||||
: base(ArchiveType.GZip, options ?? new GZipWriterOptions())
|
||||
{
|
||||
InitalizeStream(new GZipStream(destination, CompressionMode.Compress, leaveOpen), !leaveOpen);
|
||||
InitalizeStream(new GZipStream(destination, CompressionMode.Compress,
|
||||
options?.CompressionLevel ?? CompressionLevel.Default,
|
||||
WriterOptions.LeaveStreamOpen,
|
||||
WriterOptions.ArchiveEncoding.GetEncoding()));
|
||||
}
|
||||
|
||||
protected override void Dispose(bool isDisposing)
|
||||
@@ -28,7 +31,7 @@ namespace SharpCompress.Writers.GZip
|
||||
|
||||
public override void Write(string filename, Stream source, DateTime? modificationTime)
|
||||
{
|
||||
if (wroteToStream)
|
||||
if (_wroteToStream)
|
||||
{
|
||||
throw new ArgumentException("Can only write a single stream to a GZip file.");
|
||||
}
|
||||
@@ -36,7 +39,7 @@ namespace SharpCompress.Writers.GZip
|
||||
stream.FileName = filename;
|
||||
stream.LastModified = modificationTime;
|
||||
source.TransferTo(stream);
|
||||
wroteToStream = true;
|
||||
_wroteToStream = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
28
src/SharpCompress/Writers/GZip/GZipWriterOptions.cs
Normal file
28
src/SharpCompress/Writers/GZip/GZipWriterOptions.cs
Normal file
@@ -0,0 +1,28 @@
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
|
||||
namespace SharpCompress.Writers.GZip
|
||||
{
|
||||
public class GZipWriterOptions : WriterOptions
|
||||
{
|
||||
public GZipWriterOptions()
|
||||
: base(CompressionType.GZip)
|
||||
{
|
||||
}
|
||||
|
||||
internal GZipWriterOptions(WriterOptions options)
|
||||
: base(options.CompressionType)
|
||||
{
|
||||
LeaveStreamOpen = options.LeaveStreamOpen;
|
||||
ArchiveEncoding = options.ArchiveEncoding;
|
||||
|
||||
var writerOptions = options as GZipWriterOptions;
|
||||
if (writerOptions != null)
|
||||
{
|
||||
CompressionLevel = writerOptions.CompressionLevel;
|
||||
}
|
||||
}
|
||||
|
||||
public CompressionLevel CompressionLevel { get; set; } = CompressionLevel.Default;
|
||||
}
|
||||
}
|
||||
@@ -1,17 +1,18 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.BZip2;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
|
||||
namespace SharpCompress.Writers.Tar
|
||||
{
|
||||
public class TarWriter : AbstractWriter
|
||||
{
|
||||
public TarWriter(Stream destination, WriterOptions options)
|
||||
: base(ArchiveType.Tar)
|
||||
: base(ArchiveType.Tar, options)
|
||||
{
|
||||
if (!destination.CanWrite)
|
||||
{
|
||||
@@ -23,12 +24,17 @@ namespace SharpCompress.Writers.Tar
|
||||
break;
|
||||
case CompressionType.BZip2:
|
||||
{
|
||||
destination = new BZip2Stream(destination, CompressionMode.Compress, options.LeaveStreamOpen);
|
||||
destination = new BZip2Stream(destination, CompressionMode.Compress, true);
|
||||
}
|
||||
break;
|
||||
case CompressionType.GZip:
|
||||
{
|
||||
destination = new GZipStream(destination, CompressionMode.Compress, options.LeaveStreamOpen);
|
||||
destination = new GZipStream(destination, CompressionMode.Compress, true);
|
||||
}
|
||||
break;
|
||||
case CompressionType.LZip:
|
||||
{
|
||||
destination = new LZipStream(destination, CompressionMode.Compress, true);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@@ -36,7 +42,7 @@ namespace SharpCompress.Writers.Tar
|
||||
throw new InvalidFormatException("Tar does not support compression: " + options.CompressionType);
|
||||
}
|
||||
}
|
||||
InitalizeStream(destination, !options.LeaveStreamOpen);
|
||||
InitalizeStream(destination);
|
||||
}
|
||||
|
||||
public override void Write(string filename, Stream source, DateTime? modificationTime)
|
||||
@@ -66,11 +72,12 @@ namespace SharpCompress.Writers.Tar
|
||||
|
||||
long realSize = size ?? source.Length;
|
||||
|
||||
TarHeader header = new TarHeader();
|
||||
header.ModTime = modificationTime ?? TarHeader.Epoch;
|
||||
TarHeader header = new TarHeader(WriterOptions.ArchiveEncoding);
|
||||
|
||||
header.LastModifiedTime = modificationTime ?? TarHeader.Epoch;
|
||||
header.Name = NormalizeFilename(filename);
|
||||
header.Size = realSize;
|
||||
header.WriteHeader(OutputStream);
|
||||
header.Write(OutputStream);
|
||||
size = source.TransferTo(OutputStream);
|
||||
PadTo512(size.Value, false);
|
||||
}
|
||||
@@ -92,7 +99,19 @@ namespace SharpCompress.Writers.Tar
|
||||
{
|
||||
PadTo512(0, true);
|
||||
PadTo512(0, true);
|
||||
(OutputStream as BZip2Stream)?.Finish(); // required when bzip2 compression is used
|
||||
switch (OutputStream)
|
||||
{
|
||||
case BZip2Stream b:
|
||||
{
|
||||
b.Finish();
|
||||
break;
|
||||
}
|
||||
case LZipStream l:
|
||||
{
|
||||
l.Finish();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
base.Dispose(isDisposing);
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ namespace SharpCompress.Writers
|
||||
{
|
||||
throw new InvalidFormatException("GZip archives only support GZip compression type.");
|
||||
}
|
||||
return new GZipWriter(stream, writerOptions.LeaveStreamOpen);
|
||||
return new GZipWriter(stream, new GZipWriterOptions(writerOptions));
|
||||
}
|
||||
case ArchiveType.Zip:
|
||||
{
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Zip;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.Converters;
|
||||
@@ -9,22 +10,33 @@ namespace SharpCompress.Writers.Zip
|
||||
{
|
||||
internal class ZipCentralDirectoryEntry
|
||||
{
|
||||
internal string FileName { get; set; }
|
||||
private readonly ZipCompressionMethod compression;
|
||||
private readonly string fileName;
|
||||
private readonly ArchiveEncoding archiveEncoding;
|
||||
|
||||
public ZipCentralDirectoryEntry(ZipCompressionMethod compression, string fileName, ulong headerOffset, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
this.compression = compression;
|
||||
this.fileName = fileName;
|
||||
HeaderOffset = headerOffset;
|
||||
this.archiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal DateTime? ModificationTime { get; set; }
|
||||
internal string Comment { get; set; }
|
||||
internal uint Crc { get; set; }
|
||||
internal ulong HeaderOffset { get; set; }
|
||||
internal ulong Compressed { get; set; }
|
||||
internal ulong Decompressed { get; set; }
|
||||
internal ushort Zip64HeaderOffset { get; set; }
|
||||
internal ulong HeaderOffset { get; }
|
||||
|
||||
internal uint Write(Stream outputStream, ZipCompressionMethod compression)
|
||||
internal uint Write(Stream outputStream)
|
||||
{
|
||||
byte[] encodedFilename = Encoding.UTF8.GetBytes(FileName);
|
||||
byte[] encodedComment = Encoding.UTF8.GetBytes(Comment);
|
||||
byte[] encodedFilename = archiveEncoding.Encode(fileName);
|
||||
byte[] encodedComment = archiveEncoding.Encode(Comment);
|
||||
|
||||
var zip64_stream = Compressed >= uint.MaxValue || Decompressed >= uint.MaxValue;
|
||||
var zip64 = zip64_stream || HeaderOffset >= uint.MaxValue || Zip64HeaderOffset != 0;
|
||||
var zip64_stream = Compressed >= uint.MaxValue || Decompressed >= uint.MaxValue;
|
||||
var zip64 = zip64_stream || HeaderOffset >= uint.MaxValue || Zip64HeaderOffset != 0;
|
||||
|
||||
var compressedvalue = zip64 ? uint.MaxValue : (uint)Compressed;
|
||||
var decompressedvalue = zip64 ? uint.MaxValue : (uint)Decompressed;
|
||||
@@ -32,26 +44,26 @@ namespace SharpCompress.Writers.Zip
|
||||
var extralength = zip64 ? (2 + 2 + 8 + 8 + 8 + 4) : 0;
|
||||
var version = (byte)(zip64 ? 45 : 20); // Version 20 required for deflate/encryption
|
||||
|
||||
HeaderFlags flags = HeaderFlags.UTF8;
|
||||
HeaderFlags flags = Equals(archiveEncoding.GetEncoding(), Encoding.UTF8) ? HeaderFlags.UTF8 : HeaderFlags.None;
|
||||
if (!outputStream.CanSeek)
|
||||
{
|
||||
// Cannot use data descriptors with zip64:
|
||||
// https://blogs.oracle.com/xuemingshen/entry/is_zipinput_outputstream_handling_of
|
||||
|
||||
// We check that streams are not written too large in the ZipWritingStream,
|
||||
// so this extra guard is not required, but kept to simplify changing the code
|
||||
// once the zip64 post-data issue is resolved
|
||||
// We check that streams are not written too large in the ZipWritingStream,
|
||||
// so this extra guard is not required, but kept to simplify changing the code
|
||||
// once the zip64 post-data issue is resolved
|
||||
if (!zip64_stream)
|
||||
flags |= HeaderFlags.UsePostDataDescriptor;
|
||||
|
||||
|
||||
if (compression == ZipCompressionMethod.LZMA)
|
||||
{
|
||||
flags |= HeaderFlags.Bit1; // eos marker
|
||||
}
|
||||
}
|
||||
|
||||
//constant sig, then version made by, compabitility, then version to extract
|
||||
outputStream.Write(new byte[] { 80, 75, 1, 2, 0x14, 0, version, 0 }, 0, 8);
|
||||
//constant sig, then version made by, then version to extract
|
||||
outputStream.Write(new byte[] { 80, 75, 1, 2, version, 0, version, 0 }, 0, 8);
|
||||
|
||||
outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)flags), 0, 2);
|
||||
outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)compression), 0, 2); // zipping method
|
||||
|
||||
@@ -17,34 +17,31 @@ namespace SharpCompress.Writers.Zip
|
||||
{
|
||||
public class ZipWriter : AbstractWriter
|
||||
{
|
||||
private readonly CompressionType compressionType;
|
||||
private readonly CompressionLevel compressionLevel;
|
||||
private readonly List<ZipCentralDirectoryEntry> entries = new List<ZipCentralDirectoryEntry>();
|
||||
private readonly string zipComment;
|
||||
private long streamPosition;
|
||||
private PpmdProperties ppmdProps;
|
||||
private readonly bool isZip64;
|
||||
|
||||
private readonly ZipWriterOptions _zipWriterOptions;
|
||||
private readonly List<ZipCentralDirectoryEntry> _entries = new List<ZipCentralDirectoryEntry>();
|
||||
private long _streamPosition;
|
||||
private PpmdProperties _ppmdProps;
|
||||
|
||||
public ZipWriter(Stream destination, ZipWriterOptions zipWriterOptions)
|
||||
: base(ArchiveType.Zip)
|
||||
: base(ArchiveType.Zip, zipWriterOptions)
|
||||
{
|
||||
zipComment = zipWriterOptions.ArchiveComment ?? string.Empty;
|
||||
isZip64 = zipWriterOptions.UseZip64;
|
||||
|
||||
compressionType = zipWriterOptions.CompressionType;
|
||||
compressionLevel = zipWriterOptions.DeflateCompressionLevel;
|
||||
InitalizeStream(destination, !zipWriterOptions.LeaveStreamOpen);
|
||||
_zipWriterOptions = zipWriterOptions;
|
||||
if (destination.CanSeek)
|
||||
{
|
||||
_streamPosition = destination.Position;
|
||||
}
|
||||
InitalizeStream(destination);
|
||||
}
|
||||
|
||||
private PpmdProperties PpmdProperties
|
||||
{
|
||||
get
|
||||
{
|
||||
if (ppmdProps == null)
|
||||
if (_ppmdProps == null)
|
||||
{
|
||||
ppmdProps = new PpmdProperties();
|
||||
_ppmdProps = new PpmdProperties();
|
||||
}
|
||||
return ppmdProps;
|
||||
return _ppmdProps;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,14 +50,15 @@ namespace SharpCompress.Writers.Zip
|
||||
if (isDisposing)
|
||||
{
|
||||
ulong size = 0;
|
||||
foreach (ZipCentralDirectoryEntry entry in entries)
|
||||
foreach (ZipCentralDirectoryEntry entry in _entries)
|
||||
{
|
||||
size += entry.Write(OutputStream, ToZipCompressionMethod(compressionType));
|
||||
size += entry.Write(OutputStream);
|
||||
}
|
||||
WriteEndRecord(size);
|
||||
}
|
||||
base.Dispose(isDisposing);
|
||||
}
|
||||
|
||||
private static ZipCompressionMethod ToZipCompressionMethod(CompressionType compressionType)
|
||||
{
|
||||
switch (compressionType)
|
||||
@@ -93,9 +91,9 @@ namespace SharpCompress.Writers.Zip
|
||||
public override void Write(string entryPath, Stream source, DateTime? modificationTime)
|
||||
{
|
||||
Write(entryPath, source, new ZipWriterEntryOptions()
|
||||
{
|
||||
ModificationDateTime = modificationTime
|
||||
});
|
||||
{
|
||||
ModificationDateTime = modificationTime
|
||||
});
|
||||
}
|
||||
|
||||
public void Write(string entryPath, Stream source, ZipWriterEntryOptions zipWriterEntryOptions)
|
||||
@@ -108,27 +106,27 @@ namespace SharpCompress.Writers.Zip
|
||||
|
||||
public Stream WriteToStream(string entryPath, ZipWriterEntryOptions options)
|
||||
{
|
||||
var compression = ToZipCompressionMethod(options.CompressionType ?? _zipWriterOptions.CompressionType);
|
||||
|
||||
entryPath = NormalizeFilename(entryPath);
|
||||
options.ModificationDateTime = options.ModificationDateTime ?? DateTime.Now;
|
||||
options.EntryComment = options.EntryComment ?? string.Empty;
|
||||
var entry = new ZipCentralDirectoryEntry
|
||||
{
|
||||
Comment = options.EntryComment,
|
||||
FileName = entryPath,
|
||||
ModificationTime = options.ModificationDateTime,
|
||||
HeaderOffset = (ulong)streamPosition
|
||||
};
|
||||
var entry = new ZipCentralDirectoryEntry(compression, entryPath, (ulong)_streamPosition, WriterOptions.ArchiveEncoding)
|
||||
{
|
||||
Comment = options.EntryComment,
|
||||
ModificationTime = options.ModificationDateTime
|
||||
};
|
||||
|
||||
// Use the archive default setting for zip64 and allow overrides
|
||||
var useZip64 = isZip64;
|
||||
var useZip64 = _zipWriterOptions.UseZip64;
|
||||
if (options.EnableZip64.HasValue)
|
||||
{
|
||||
useZip64 = options.EnableZip64.Value;
|
||||
}
|
||||
|
||||
var headersize = (uint)WriteHeader(entryPath, options, entry, useZip64);
|
||||
streamPosition += headersize;
|
||||
return new ZipWritingStream(this, OutputStream, entry,
|
||||
ToZipCompressionMethod(options.CompressionType ?? compressionType),
|
||||
options.DeflateCompressionLevel ?? compressionLevel);
|
||||
_streamPosition += headersize;
|
||||
return new ZipWritingStream(this, OutputStream, entry, compression, _zipWriterOptions);
|
||||
}
|
||||
|
||||
private string NormalizeFilename(string filename)
|
||||
@@ -146,12 +144,12 @@ namespace SharpCompress.Writers.Zip
|
||||
|
||||
private int WriteHeader(string filename, ZipWriterEntryOptions zipWriterEntryOptions, ZipCentralDirectoryEntry entry, bool useZip64)
|
||||
{
|
||||
// We err on the side of caution until the zip specification clarifies how to support this
|
||||
if (!OutputStream.CanSeek && useZip64)
|
||||
throw new NotSupportedException("Zip64 extensions are not supported on non-seekable streams");
|
||||
// We err on the side of caution until the zip specification clarifies how to support this
|
||||
if (!OutputStream.CanSeek && useZip64)
|
||||
throw new NotSupportedException("Zip64 extensions are not supported on non-seekable streams");
|
||||
|
||||
var explicitZipCompressionInfo = ToZipCompressionMethod(zipWriterEntryOptions.CompressionType ?? compressionType);
|
||||
byte[] encodedFilename = ArchiveEncoding.Default.GetBytes(filename);
|
||||
var explicitZipCompressionInfo = ToZipCompressionMethod(zipWriterEntryOptions.CompressionType ?? _zipWriterOptions.CompressionType);
|
||||
byte[] encodedFilename = WriterOptions.ArchiveEncoding.Encode(filename);
|
||||
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes(ZipHeaderFactory.ENTRY_HEADER_BYTES), 0, 4);
|
||||
if (explicitZipCompressionInfo == ZipCompressionMethod.Deflate)
|
||||
@@ -159,22 +157,26 @@ namespace SharpCompress.Writers.Zip
|
||||
if (OutputStream.CanSeek && useZip64)
|
||||
OutputStream.Write(new byte[] { 45, 0 }, 0, 2); //smallest allowed version for zip64
|
||||
else
|
||||
OutputStream.Write(new byte[] { 20, 0 }, 0, 2); //older version which is more compatible
|
||||
OutputStream.Write(new byte[] { 20, 0 }, 0, 2); //older version which is more compatible
|
||||
}
|
||||
else
|
||||
{
|
||||
OutputStream.Write(new byte[] { 63, 0 }, 0, 2); //version says we used PPMd or LZMA
|
||||
}
|
||||
HeaderFlags flags = ArchiveEncoding.Default == Encoding.UTF8 ? HeaderFlags.UTF8 : 0;
|
||||
HeaderFlags flags = Equals(WriterOptions.ArchiveEncoding.GetEncoding(), Encoding.UTF8) ? HeaderFlags.UTF8 : 0;
|
||||
if (!OutputStream.CanSeek)
|
||||
{
|
||||
flags |= HeaderFlags.UsePostDataDescriptor;
|
||||
|
||||
|
||||
if (explicitZipCompressionInfo == ZipCompressionMethod.LZMA)
|
||||
{
|
||||
flags |= HeaderFlags.Bit1; // eos marker
|
||||
}
|
||||
}
|
||||
if (!string.IsNullOrEmpty(_zipWriterOptions.Password))
|
||||
{
|
||||
flags |= HeaderFlags.Encrypted;
|
||||
}
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)flags), 0, 2);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)explicitZipCompressionInfo), 0, 2); // zipping method
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes(zipWriterEntryOptions.ModificationDateTime.DateTimeToDosTime()), 0, 4);
|
||||
@@ -208,21 +210,13 @@ namespace SharpCompress.Writers.Zip
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes(uncompressed), 0, 4);
|
||||
}
|
||||
|
||||
private void WritePostdataDescriptor(uint crc, ulong compressed, ulong uncompressed)
|
||||
{
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes(ZipHeaderFactory.POST_DATA_DESCRIPTOR), 0, 4);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes(crc), 0, 4);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((uint)compressed), 0, 4);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((uint)uncompressed), 0, 4);
|
||||
}
|
||||
|
||||
private void WriteEndRecord(ulong size)
|
||||
{
|
||||
byte[] encodedComment = ArchiveEncoding.Default.GetBytes(zipComment);
|
||||
var zip64 = isZip64 || entries.Count > ushort.MaxValue || streamPosition >= uint.MaxValue || size >= uint.MaxValue;
|
||||
byte[] encodedComment = WriterOptions.ArchiveEncoding.Encode(_zipWriterOptions.ArchiveComment ?? string.Empty);
|
||||
var zip64 = _zipWriterOptions.UseZip64 || _entries.Count > ushort.MaxValue || _streamPosition >= uint.MaxValue || size >= uint.MaxValue;
|
||||
|
||||
var sizevalue = size >= uint.MaxValue ? uint.MaxValue : (uint)size;
|
||||
var streampositionvalue = streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)streamPosition;
|
||||
var streampositionvalue = _streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)_streamPosition;
|
||||
|
||||
if (zip64)
|
||||
{
|
||||
@@ -238,26 +232,26 @@ namespace SharpCompress.Writers.Zip
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((uint)0), 0, 4); // Central dir disk
|
||||
|
||||
// TODO: entries.Count is int, so max 2^31 files
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)entries.Count), 0, 8); // Entries in this disk
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)entries.Count), 0, 8); // Total entries
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)_entries.Count), 0, 8); // Entries in this disk
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)_entries.Count), 0, 8); // Total entries
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes(size), 0, 8); // Central Directory size
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)streamPosition), 0, 8); // Disk offset
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)_streamPosition), 0, 8); // Disk offset
|
||||
|
||||
// Write zip64 end of central directory locator
|
||||
OutputStream.Write(new byte[] { 80, 75, 6, 7 }, 0, 4);
|
||||
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes(0uL), 0, 4); // Entry disk
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)streamPosition + size), 0, 8); // Offset to the zip64 central directory
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)_streamPosition + size), 0, 8); // Offset to the zip64 central directory
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes(0u), 0, 4); // Number of disks
|
||||
|
||||
streamPosition += recordlen + (4 + 4 + 8 + 4);
|
||||
streampositionvalue = streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)streampositionvalue;
|
||||
_streamPosition += recordlen + (4 + 4 + 8 + 4);
|
||||
streampositionvalue = _streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)streampositionvalue;
|
||||
}
|
||||
|
||||
// Write normal end of central directory record
|
||||
OutputStream.Write(new byte[] {80, 75, 5, 6, 0, 0, 0, 0}, 0, 8);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)entries.Count), 0, 2);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)entries.Count), 0, 2);
|
||||
OutputStream.Write(new byte[] { 80, 75, 5, 6, 0, 0, 0, 0 }, 0, 8);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)_entries.Count), 0, 2);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)_entries.Count), 0, 2);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes(sizevalue), 0, 4);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((uint)streampositionvalue), 0, 4);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)encodedComment.Length), 0, 2);
|
||||
@@ -268,29 +262,29 @@ namespace SharpCompress.Writers.Zip
|
||||
|
||||
internal class ZipWritingStream : Stream
|
||||
{
|
||||
private readonly CRC32 crc = new CRC32();
|
||||
private readonly ZipCentralDirectoryEntry entry;
|
||||
private readonly Stream originalStream;
|
||||
private readonly Stream writeStream;
|
||||
private readonly ZipWriter writer;
|
||||
private readonly ZipCompressionMethod zipCompressionMethod;
|
||||
private readonly CompressionLevel compressionLevel;
|
||||
private CountingWritableSubStream counting;
|
||||
private ulong decompressed;
|
||||
private readonly CRC32 _crc = new CRC32();
|
||||
private readonly ZipCentralDirectoryEntry _entry;
|
||||
private readonly Stream _originalStream;
|
||||
private readonly Stream _writeStream;
|
||||
private readonly ZipWriter _writer;
|
||||
private readonly ZipWriterOptions _zipWriterOptions;
|
||||
private readonly ZipCompressionMethod _zipCompressionMethod;
|
||||
private CountingWritableSubStream _counting;
|
||||
private ulong _decompressed;
|
||||
|
||||
// Flag to prevent throwing exceptions on Dispose
|
||||
private bool limitsExceeded;
|
||||
// Flag to prevent throwing exceptions on Dispose
|
||||
private bool _limitsExceeded;
|
||||
|
||||
internal ZipWritingStream(ZipWriter writer, Stream originalStream, ZipCentralDirectoryEntry entry,
|
||||
ZipCompressionMethod zipCompressionMethod, CompressionLevel compressionLevel)
|
||||
internal ZipWritingStream(ZipWriter writer, Stream originalStream, ZipCentralDirectoryEntry entry,
|
||||
ZipCompressionMethod zipCompressionMethod,
|
||||
ZipWriterOptions zipWriterOptions)
|
||||
{
|
||||
this.writer = writer;
|
||||
this.originalStream = originalStream;
|
||||
this.writer = writer;
|
||||
this.entry = entry;
|
||||
this.zipCompressionMethod = zipCompressionMethod;
|
||||
this.compressionLevel = compressionLevel;
|
||||
writeStream = GetWriteStream(originalStream);
|
||||
this._writer = writer;
|
||||
this._originalStream = originalStream;
|
||||
this._entry = entry;
|
||||
_zipWriterOptions = zipWriterOptions;
|
||||
_zipCompressionMethod = zipCompressionMethod;
|
||||
_writeStream = GetWriteStream(originalStream);
|
||||
}
|
||||
|
||||
public override bool CanRead => false;
|
||||
@@ -305,45 +299,60 @@ namespace SharpCompress.Writers.Zip
|
||||
|
||||
private Stream GetWriteStream(Stream writeStream)
|
||||
{
|
||||
counting = new CountingWritableSubStream(writeStream);
|
||||
Stream output = counting;
|
||||
switch (zipCompressionMethod)
|
||||
_counting = new CountingWritableSubStream(writeStream);
|
||||
Stream output = _counting;
|
||||
Stream compressedStream;
|
||||
switch (_zipCompressionMethod)
|
||||
{
|
||||
case ZipCompressionMethod.None:
|
||||
{
|
||||
return output;
|
||||
}
|
||||
{
|
||||
compressedStream = output;
|
||||
break;
|
||||
}
|
||||
case ZipCompressionMethod.Deflate:
|
||||
{
|
||||
return new DeflateStream(counting, CompressionMode.Compress, compressionLevel,
|
||||
true);
|
||||
}
|
||||
{
|
||||
compressedStream = new DeflateStream(_counting, CompressionMode.Compress,
|
||||
_zipWriterOptions.DeflateCompressionLevel,
|
||||
true);
|
||||
break;
|
||||
}
|
||||
case ZipCompressionMethod.BZip2:
|
||||
{
|
||||
return new BZip2Stream(counting, CompressionMode.Compress, true);
|
||||
}
|
||||
{
|
||||
compressedStream = new BZip2Stream(_counting, CompressionMode.Compress, true);
|
||||
break;
|
||||
}
|
||||
case ZipCompressionMethod.LZMA:
|
||||
{
|
||||
counting.WriteByte(9);
|
||||
counting.WriteByte(20);
|
||||
counting.WriteByte(5);
|
||||
counting.WriteByte(0);
|
||||
{
|
||||
_counting.WriteByte(9);
|
||||
_counting.WriteByte(20);
|
||||
_counting.WriteByte(5);
|
||||
_counting.WriteByte(0);
|
||||
|
||||
LzmaStream lzmaStream = new LzmaStream(new LzmaEncoderProperties(!originalStream.CanSeek),
|
||||
false, counting);
|
||||
counting.Write(lzmaStream.Properties, 0, lzmaStream.Properties.Length);
|
||||
return lzmaStream;
|
||||
}
|
||||
LzmaStream lzmaStream = new LzmaStream(new LzmaEncoderProperties(!_originalStream.CanSeek),
|
||||
false, _counting);
|
||||
_counting.Write(lzmaStream.Properties, 0, lzmaStream.Properties.Length);
|
||||
compressedStream = lzmaStream;
|
||||
break;
|
||||
}
|
||||
case ZipCompressionMethod.PPMd:
|
||||
{
|
||||
counting.Write(writer.PpmdProperties.Properties, 0, 2);
|
||||
return new PpmdStream(writer.PpmdProperties, counting, true);
|
||||
}
|
||||
{
|
||||
_counting.Write(_writer.PpmdProperties.Properties, 0, 2);
|
||||
compressedStream = new PpmdStream(_writer.PpmdProperties, _counting, true);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw new NotSupportedException("CompressionMethod: " + zipCompressionMethod);
|
||||
}
|
||||
{
|
||||
throw new NotSupportedException("CompressionMethod: " + _zipCompressionMethod);
|
||||
}
|
||||
}
|
||||
|
||||
if (string.IsNullOrEmpty(_zipWriterOptions.Password))
|
||||
{
|
||||
return compressedStream;
|
||||
}
|
||||
|
||||
var encryptionData = PkwareTraditionalEncryptionData.ForWrite(_zipWriterOptions.Password, _zipWriterOptions.ArchiveEncoding);
|
||||
return new PkwareTraditionalCryptoStream(new NonDisposingStream(writeStream), encryptionData, CryptoMode.Encrypt);
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
@@ -351,78 +360,73 @@ namespace SharpCompress.Writers.Zip
|
||||
base.Dispose(disposing);
|
||||
if (disposing)
|
||||
{
|
||||
writeStream.Dispose();
|
||||
_writeStream.Dispose();
|
||||
|
||||
if (limitsExceeded)
|
||||
{
|
||||
// We have written invalid data into the archive,
|
||||
// so we destroy it now, instead of allowing the user to continue
|
||||
// with a defunct archive
|
||||
originalStream.Dispose();
|
||||
return;
|
||||
}
|
||||
|
||||
entry.Crc = (uint)crc.Crc32Result;
|
||||
entry.Compressed = counting.Count;
|
||||
entry.Decompressed = decompressed;
|
||||
|
||||
var zip64 = entry.Compressed >= uint.MaxValue || entry.Decompressed >= uint.MaxValue;
|
||||
var compressedvalue = zip64 ? uint.MaxValue : (uint)counting.Count;
|
||||
var decompressedvalue = zip64 ? uint.MaxValue : (uint)entry.Decompressed;
|
||||
|
||||
if (originalStream.CanSeek)
|
||||
if (_limitsExceeded)
|
||||
{
|
||||
originalStream.Position = (long)(entry.HeaderOffset + 6);
|
||||
originalStream.WriteByte(0);
|
||||
|
||||
originalStream.Position = (long)(entry.HeaderOffset + 14);
|
||||
// We have written invalid data into the archive,
|
||||
// so we destroy it now, instead of allowing the user to continue
|
||||
// with a defunct archive
|
||||
_originalStream.Dispose();
|
||||
return;
|
||||
}
|
||||
|
||||
writer.WriteFooter(entry.Crc, compressedvalue, decompressedvalue);
|
||||
_entry.Crc = (uint)_crc.Crc32Result;
|
||||
_entry.Compressed = _counting.Count;
|
||||
_entry.Decompressed = _decompressed;
|
||||
|
||||
// Ideally, we should not throw from Dispose()
|
||||
// We should not get here as the Write call checks the limits
|
||||
if (zip64 && entry.Zip64HeaderOffset == 0)
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
var zip64 = _entry.Compressed >= uint.MaxValue || _entry.Decompressed >= uint.MaxValue;
|
||||
var compressedvalue = zip64 ? uint.MaxValue : (uint)_counting.Count;
|
||||
var decompressedvalue = zip64 ? uint.MaxValue : (uint)_entry.Decompressed;
|
||||
|
||||
// If we have pre-allocated space for zip64 data,
|
||||
// fill it out, even if it is not required
|
||||
if (entry.Zip64HeaderOffset != 0)
|
||||
{
|
||||
originalStream.Position = (long)(entry.HeaderOffset + entry.Zip64HeaderOffset);
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes((ushort)0x0001), 0, 2);
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes((ushort)(8 + 8)), 0, 2);
|
||||
if (_originalStream.CanSeek)
|
||||
{
|
||||
_writer.WriteFooter(_entry.Crc, compressedvalue, decompressedvalue);
|
||||
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes(entry.Decompressed), 0, 8);
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes(entry.Compressed), 0, 8);
|
||||
}
|
||||
// Ideally, we should not throw from Dispose()
|
||||
// We should not get here as the Write call checks the limits
|
||||
if (zip64 && _entry.Zip64HeaderOffset == 0)
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
|
||||
originalStream.Position = writer.streamPosition + (long)entry.Compressed;
|
||||
writer.streamPosition += (long)entry.Compressed;
|
||||
// If we have pre-allocated space for zip64 data,
|
||||
// fill it out, even if it is not required
|
||||
if (_entry.Zip64HeaderOffset != 0)
|
||||
{
|
||||
_originalStream.Position = (long)(_entry.HeaderOffset + _entry.Zip64HeaderOffset);
|
||||
_originalStream.Write(DataConverter.LittleEndian.GetBytes((ushort)0x0001), 0, 2);
|
||||
_originalStream.Write(DataConverter.LittleEndian.GetBytes((ushort)(8 + 8)), 0, 2);
|
||||
|
||||
_originalStream.Write(DataConverter.LittleEndian.GetBytes(_entry.Decompressed), 0, 8);
|
||||
_originalStream.Write(DataConverter.LittleEndian.GetBytes(_entry.Compressed), 0, 8);
|
||||
}
|
||||
|
||||
_originalStream.Position = _writer._streamPosition + (long)_entry.Compressed;
|
||||
_writer._streamPosition += (long)_entry.Compressed;
|
||||
}
|
||||
else
|
||||
{
|
||||
// We have a streaming archive, so we should add a post-data-descriptor,
|
||||
// but we cannot as it does not hold the zip64 values
|
||||
// Throwing an exception until the zip specification is clarified
|
||||
// We have a streaming archive, so we should add a post-data-descriptor,
|
||||
// but we cannot as it does not hold the zip64 values
|
||||
// Throwing an exception until the zip specification is clarified
|
||||
|
||||
// Ideally, we should not throw from Dispose()
|
||||
// We should not get here as the Write call checks the limits
|
||||
if (zip64)
|
||||
throw new NotSupportedException("Streams larger than 4GiB are not supported for non-seekable streams");
|
||||
// Ideally, we should not throw from Dispose()
|
||||
// We should not get here as the Write call checks the limits
|
||||
if (zip64)
|
||||
throw new NotSupportedException("Streams larger than 4GiB are not supported for non-seekable streams");
|
||||
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes(ZipHeaderFactory.POST_DATA_DESCRIPTOR), 0, 4);
|
||||
writer.WriteFooter(entry.Crc,
|
||||
_originalStream.Write(DataConverter.LittleEndian.GetBytes(ZipHeaderFactory.POST_DATA_DESCRIPTOR), 0, 4);
|
||||
_writer.WriteFooter(_entry.Crc,
|
||||
(uint)compressedvalue,
|
||||
(uint)decompressedvalue);
|
||||
writer.streamPosition += (long)entry.Compressed + 16;
|
||||
_writer._streamPosition += (long)_entry.Compressed + 16;
|
||||
}
|
||||
writer.entries.Add(entry);
|
||||
_writer._entries.Add(_entry);
|
||||
}
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
writeStream.Flush();
|
||||
_writeStream.Flush();
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
@@ -442,36 +446,35 @@ namespace SharpCompress.Writers.Zip
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
// We check the limits first, because we can keep the archive consistent
|
||||
// if we can prevent the writes from happening
|
||||
if (entry.Zip64HeaderOffset == 0)
|
||||
{
|
||||
// Pre-check, the counting.Count is not exact, as we do not know the size before having actually compressed it
|
||||
if (limitsExceeded || ((decompressed + (uint)count) > uint.MaxValue) || (counting.Count + (uint)count) > uint.MaxValue)
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
}
|
||||
// We check the limits first, because we can keep the archive consistent
|
||||
// if we can prevent the writes from happening
|
||||
if (_entry.Zip64HeaderOffset == 0)
|
||||
{
|
||||
// Pre-check, the counting.Count is not exact, as we do not know the size before having actually compressed it
|
||||
if (_limitsExceeded || ((_decompressed + (uint)count) > uint.MaxValue) || (_counting.Count + (uint)count) > uint.MaxValue)
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
}
|
||||
|
||||
decompressed += (uint)count;
|
||||
crc.SlurpBlock(buffer, offset, count);
|
||||
writeStream.Write(buffer, offset, count);
|
||||
|
||||
if (entry.Zip64HeaderOffset == 0)
|
||||
{
|
||||
// Post-check, this is accurate
|
||||
if ((decompressed > uint.MaxValue) || counting.Count > uint.MaxValue)
|
||||
{
|
||||
// We have written the data, so the archive is now broken
|
||||
// Throwing the exception here, allows us to avoid
|
||||
// throwing an exception in Dispose() which is discouraged
|
||||
// as it can mask other errors
|
||||
limitsExceeded = true;
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
}
|
||||
}
|
||||
_decompressed += (uint)count;
|
||||
_crc.SlurpBlock(buffer, offset, count);
|
||||
_writeStream.Write(buffer, offset, count);
|
||||
|
||||
if (_entry.Zip64HeaderOffset == 0)
|
||||
{
|
||||
// Post-check, this is accurate
|
||||
if ((_decompressed > uint.MaxValue) || _counting.Count > uint.MaxValue)
|
||||
{
|
||||
// We have written the data, so the archive is now broken
|
||||
// Throwing the exception here, allows us to avoid
|
||||
// throwing an exception in Dispose() which is discouraged
|
||||
// as it can mask other errors
|
||||
_limitsExceeded = true;
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion Nested type: ZipWritingStream
|
||||
}
|
||||
}
|
||||
@@ -15,8 +15,15 @@ namespace SharpCompress.Writers.Zip
|
||||
: base(options.CompressionType)
|
||||
{
|
||||
LeaveStreamOpen = options.LeaveStreamOpen;
|
||||
if (options is ZipWriterOptions)
|
||||
UseZip64 = ((ZipWriterOptions)options).UseZip64;
|
||||
ArchiveEncoding = options.ArchiveEncoding;
|
||||
|
||||
var writerOptions = options as ZipWriterOptions;
|
||||
if (writerOptions != null)
|
||||
{
|
||||
UseZip64 = writerOptions.UseZip64;
|
||||
DeflateCompressionLevel = writerOptions.DeflateCompressionLevel;
|
||||
ArchiveComment = writerOptions.ArchiveComment;
|
||||
}
|
||||
}
|
||||
/// <summary>
|
||||
/// When CompressionType.Deflate is used, this property is referenced. Defaults to CompressionLevel.Default.
|
||||
@@ -33,5 +40,10 @@ namespace SharpCompress.Writers.Zip
|
||||
/// are less than 4GiB in length.
|
||||
/// </summary>
|
||||
public bool UseZip64 { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Setting a password will encrypt the zip archive with the Pkware style.
|
||||
/// </summary>
|
||||
public string Password { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -23,9 +23,12 @@
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.ADC;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Crypto;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
@@ -124,5 +127,28 @@ namespace SharpCompress.Test
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TestCrc32Stream()
|
||||
{
|
||||
using (FileStream decFs = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "Tar.tar")))
|
||||
{
|
||||
var crc32 = new CRC32().GetCrc32(decFs);
|
||||
decFs.Seek(0, SeekOrigin.Begin);
|
||||
|
||||
var memory = new MemoryStream();
|
||||
var crcStream = new Crc32Stream(memory, 0xEDB88320, 0xFFFFFFFF);
|
||||
decFs.CopyTo(crcStream);
|
||||
|
||||
decFs.Seek(0, SeekOrigin.Begin);
|
||||
|
||||
var crc32a = crcStream.Crc;
|
||||
|
||||
var crc32b = Crc32Stream.Compute(memory.ToArray());
|
||||
|
||||
Assert.Equal(crc32, crc32a);
|
||||
Assert.Equal(crc32, crc32b);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>netcoreapp1.1</TargetFramework>
|
||||
<AssemblyName>SharpCompress.Test</AssemblyName>
|
||||
@@ -10,11 +9,9 @@
|
||||
<GenerateRuntimeConfigurationFiles>true</GenerateRuntimeConfigurationFiles>
|
||||
<RuntimeFrameworkVersion>1.1.2</RuntimeFrameworkVersion>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\..\src\SharpCompress\SharpCompress.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="15.0.0" />
|
||||
<PackageReference Include="xunit.runner.visualstudio" Version="2.3.0-beta2-build1317" />
|
||||
@@ -22,9 +19,10 @@
|
||||
<PackageReference Include="xunit" Version="2.3.0-beta2-build3683" />
|
||||
<DotNetCliToolReference Include="dotnet-xunit" Version="2.3.0-beta2-build3683" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<Service Include="{82a7f48d-3b50-4b1e-b82e-3ada8210c358}" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
<ItemGroup>
|
||||
<Folder Include="Xz\" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
@@ -20,6 +20,33 @@ namespace SharpCompress.Test.Tar
|
||||
Read("Tar.tar", CompressionType.None);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Tar_Skip()
|
||||
{
|
||||
using (Stream stream = new ForwardOnlyStream(File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "Tar.tar"))))
|
||||
using (IReader reader = ReaderFactory.Open(stream))
|
||||
{
|
||||
ResetScratch();
|
||||
int x = 0;
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
if (!reader.Entry.IsDirectory)
|
||||
{
|
||||
x++;
|
||||
if (x % 2 == 0)
|
||||
{
|
||||
reader.WriteEntryToDirectory(SCRATCH_FILES_PATH,
|
||||
new ExtractionOptions()
|
||||
{
|
||||
ExtractFullPath = true,
|
||||
Overwrite = true
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Tar_BZip2_Reader()
|
||||
{
|
||||
@@ -38,6 +65,12 @@ namespace SharpCompress.Test.Tar
|
||||
Read("Tar.tar.lz", CompressionType.LZip);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Tar_Xz_Reader()
|
||||
{
|
||||
Read("Tar.tar.xz", CompressionType.Xz);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Tar_BZip2_Entry_Stream()
|
||||
{
|
||||
|
||||
@@ -23,6 +23,12 @@ namespace SharpCompress.Test.Tar
|
||||
Write(CompressionType.BZip2, "Tar.noEmptyDirs.tar.bz2", "Tar.noEmptyDirs.tar.bz2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Tar_LZip_Writer()
|
||||
{
|
||||
Write(CompressionType.LZip, "Tar.noEmptyDirs.tar.lz", "Tar.noEmptyDirs.tar.lz");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Tar_Rar_Write()
|
||||
{
|
||||
|
||||
@@ -28,10 +28,6 @@ namespace SharpCompress.Test
|
||||
{
|
||||
writer.WriteAll(ORIGINAL_FILES_PATH, "*", SearchOption.AllDirectories);
|
||||
}
|
||||
if (!stream.CanWrite)
|
||||
{
|
||||
throw new InvalidOperationException();
|
||||
}
|
||||
}
|
||||
CompareArchivesByPath(Path.Combine(SCRATCH2_FILES_PATH, archive),
|
||||
Path.Combine(TEST_ARCHIVES_PATH, archiveToVerifyAgainst));
|
||||
|
||||
32
tests/SharpCompress.Test/Xz/Crc32Tests.cs
Normal file
32
tests/SharpCompress.Test/Xz/Crc32Tests.cs
Normal file
@@ -0,0 +1,32 @@
|
||||
using SharpCompress.Compressors.Xz;
|
||||
using System;
|
||||
using System.Text;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test.Xz
|
||||
{
|
||||
public class Crc32Tests
|
||||
{
|
||||
private const string SimpleString = @"The quick brown fox jumps over the lazy dog.";
|
||||
private readonly byte[] SimpleBytes = Encoding.ASCII.GetBytes(SimpleString);
|
||||
private const string SimpleString2 = @"Life moves pretty fast. If you don't stop and look around once in a while, you could miss it.";
|
||||
private readonly byte[] SimpleBytes2 = Encoding.ASCII.GetBytes(SimpleString2);
|
||||
|
||||
[Fact]
|
||||
public void ShortAsciiString()
|
||||
{
|
||||
var actual = Crc32.Compute(SimpleBytes);
|
||||
|
||||
Assert.Equal((UInt32)0x519025e9, actual);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShortAsciiString2()
|
||||
{
|
||||
var actual = Crc32.Compute(SimpleBytes2);
|
||||
|
||||
Assert.Equal((UInt32)0x6ee3ad88, actual);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
32
tests/SharpCompress.Test/Xz/Crc64Tests.cs
Normal file
32
tests/SharpCompress.Test/Xz/Crc64Tests.cs
Normal file
@@ -0,0 +1,32 @@
|
||||
using SharpCompress.Compressors.Xz;
|
||||
using System;
|
||||
using System.Text;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test.Xz
|
||||
{
|
||||
public class Crc64Tests
|
||||
{
|
||||
private const string SimpleString = @"The quick brown fox jumps over the lazy dog.";
|
||||
private readonly byte[] SimpleBytes = Encoding.ASCII.GetBytes(SimpleString);
|
||||
private const string SimpleString2 = @"Life moves pretty fast. If you don't stop and look around once in a while, you could miss it.";
|
||||
private readonly byte[] SimpleBytes2 = Encoding.ASCII.GetBytes(SimpleString2);
|
||||
|
||||
[Fact]
|
||||
public void ShortAsciiString()
|
||||
{
|
||||
var actual = Crc64.Compute(SimpleBytes);
|
||||
|
||||
Assert.Equal((UInt64)0x7E210EB1B03E5A1D, actual);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShortAsciiString2()
|
||||
{
|
||||
var actual = Crc64.Compute(SimpleBytes2);
|
||||
|
||||
Assert.Equal((UInt64)0x416B4150508661EE, actual);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user