mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-04 13:34:59 +00:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03618a704b |
@@ -1,11 +0,0 @@
|
||||
version: 2
|
||||
jobs:
|
||||
build:
|
||||
docker:
|
||||
- image: adamhathcock/cake-build:latest
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Build
|
||||
command: ./build.sh
|
||||
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -2,4 +2,4 @@
|
||||
* text=auto
|
||||
|
||||
# need original files to be windows
|
||||
*.txt text eol=crlf
|
||||
test/TestArchives/Original/*.txt eol=crlf
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -14,4 +14,3 @@ tests/TestArchives/Scratch
|
||||
.vs
|
||||
tools
|
||||
.vscode
|
||||
.idea/
|
||||
|
||||
10
.travis.yml
Normal file
10
.travis.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
dist: trusty
|
||||
language: csharp
|
||||
solution: SharpCompress.sln
|
||||
matrix:
|
||||
include:
|
||||
- dotnet: 1.0.4
|
||||
mono: none
|
||||
env: DOTNETCORE=1
|
||||
script:
|
||||
- ./build.sh
|
||||
44
FORMATS.md
44
FORMATS.md
@@ -1,10 +1,10 @@
|
||||
# Formats
|
||||
# Archive Formats
|
||||
|
||||
## Accessing Archives
|
||||
|
||||
- Archive classes allow random access to a seekable stream.
|
||||
- Reader classes allow forward-only reading on a stream.
|
||||
- Writer classes allow forward-only Writing on a stream.
|
||||
Archive classes allow random access to a seekable stream.
|
||||
Reader classes allow forward-only reading
|
||||
Writer classes allow forward-only Writing
|
||||
|
||||
## Supported Format Table
|
||||
|
||||
@@ -12,24 +12,18 @@
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| Rar | Rar | Decompress (1) | RarArchive | RarReader | N/A |
|
||||
| Zip (2) | None, DEFLATE, BZip2, LZMA/LZMA2, PPMd | Both | ZipArchive | ZipReader | ZipWriter |
|
||||
| Tar | None | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.GZip | DEFLATE | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.BZip2 | BZip2 | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.LZip | LZMA | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.XZ | LZMA2 | Decompress | TarArchive | TarReader | TarWriter (3) |
|
||||
| GZip (single file) | DEFLATE | Both | GZipArchive | GZipReader | GZipWriter |
|
||||
| Tar | None, BZip2, GZip, LZip | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| GZip (single file) | GZip | Both | GZipArchive | GZipReader | GZipWriter |
|
||||
| 7Zip (4) | LZMA, LZMA2, BZip2, PPMd, BCJ, BCJ2, Deflate | Decompress | SevenZipArchive | N/A | N/A |
|
||||
| LZip (single file) (5) | LZip (LZMA) | Both | LZipArchive | LZipReader | LZipWriter |
|
||||
|
||||
1. SOLID Rars are only supported in the RarReader API.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading/writing is supported but only with seekable streams as the Zip spec doesn't support Zip64 data in post data descriptors.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading is supported.
|
||||
3. The Tar format requires a file size in the header. If no size is specified to the TarWriter and the stream is not seekable, then an exception will be thrown.
|
||||
4. The 7Zip format doesn't allow for reading as a forward-only stream so 7Zip is only supported through the Archive API
|
||||
5. LZip has no support for extra data like the file name or timestamp. There is a default filename used when looking at the entry Key on the archive.
|
||||
|
||||
## Compression Streams
|
||||
## Compressors
|
||||
|
||||
For those who want to directly compress/decompress bits. The single file formats are represented here as well. However, BZip2, LZip and XZ have no metadata (GZip has a little) so using them without something like a Tar file makes little sense.
|
||||
For those who want to directly compress/decompress bits
|
||||
|
||||
| Compressor | Compress/Decompress |
|
||||
| --- | --- |
|
||||
@@ -39,22 +33,4 @@ For those who want to directly compress/decompress bits. The single file format
|
||||
| LZMAStream | Both |
|
||||
| PPMdStream | Both |
|
||||
| ADCStream | Decompress |
|
||||
| LZipStream | Both |
|
||||
| XZStream | Decompress |
|
||||
|
||||
## Archive Formats vs Compression
|
||||
|
||||
Sometimes the terminology gets mixed.
|
||||
|
||||
### Compression
|
||||
|
||||
DEFLATE, LZMA are pure compression algorithms
|
||||
|
||||
### Formats
|
||||
|
||||
Formats like Zip, 7Zip, Rar are archive formats only. They use other compression methods (e.g. DEFLATE, LZMA, etc.) or propriatory (e.g RAR)
|
||||
|
||||
### Overlap
|
||||
|
||||
GZip, BZip2 and LZip are single file archival formats. The overlap in the API happens because Tar uses the single file formats as "compression" methods and the API tries to hide this a bit.
|
||||
|
||||
| LZipStream | Decompress |
|
||||
|
||||
36
README.md
36
README.md
@@ -7,24 +7,14 @@ The major feature is support for non-seekable streams so large files can be proc
|
||||
AppVeyor Build -
|
||||
[](https://ci.appveyor.com/project/adamhathcock/sharpcompress/branch/master)
|
||||
|
||||
Circle CI Build -
|
||||
[](https://circleci.com/gh/adamhathcock/sharpcompress)
|
||||
Travis CI Build -
|
||||
[](https://travis-ci.org/adamhathcock/sharpcompress)
|
||||
|
||||
## Need Help?
|
||||
Post Issues on Github!
|
||||
|
||||
Check the [Supported Formats](FORMATS.md) and [Basic Usage.](USAGE.md)
|
||||
|
||||
## Recommended Formats
|
||||
|
||||
In general, I recommend GZip (Deflate)/BZip2 (BZip)/LZip (LZMA) as the simplicity of the formats lend to better long term archival as well as the streamability. Tar is often used in conjunction for multiple files in a single archive (e.g. `.tar.gz`)
|
||||
|
||||
Zip is okay, but it's a very hap-hazard format and the variation in headers and implementations makes it hard to get correct. Uses Deflate by default but supports a lot of compression methods.
|
||||
|
||||
RAR is not recommended as it's a propriatory format and the compression is closed source. Use Tar/LZip for LZMA
|
||||
|
||||
7Zip and XZ both are overly complicated. 7Zip does not support streamable formats. XZ has known holes explained here: (http://www.nongnu.org/lzip/xz_inadequate.html) Use Tar/LZip for LZMA compression instead.
|
||||
|
||||
## A Simple Request
|
||||
|
||||
Hi everyone. I hope you're using SharpCompress and finding it useful. Please give me feedback on what you'd like to see changed especially as far as usability goes. New feature suggestions are always welcome as well. I would also like to know what projects SharpCompress is being used in. I like seeing how it is used to give me ideas for future versions. Thanks!
|
||||
@@ -44,26 +34,6 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
|
||||
|
||||
## Version Log
|
||||
|
||||
### Version 0.18
|
||||
|
||||
* [Now on Github releases](https://github.com/adamhathcock/sharpcompress/releases/tag/0.18)
|
||||
|
||||
### Version 0.17.1
|
||||
|
||||
* Fix - [Bug Fix for .NET Core on Windows](https://github.com/adamhathcock/sharpcompress/pull/257)
|
||||
|
||||
### Version 0.17.0
|
||||
|
||||
* New - Full LZip support! Can read and write LZip files and Tars inside LZip files. [Make LZip a first class citizen. #241](https://github.com/adamhathcock/sharpcompress/issues/241)
|
||||
* New - XZ read support! Can read XZ files and Tars inside XZ files. [XZ in SharpCompress #91](https://github.com/adamhathcock/sharpcompress/issues/94)
|
||||
* Fix - [Regression - zip file writing on seekable streams always assumed stream start was 0. Introduced with Zip64 writing.](https://github.com/adamhathcock/sharpcompress/issues/244)
|
||||
* Fix - [Zip files with post-data descriptors can be properly skipped via decompression](https://github.com/adamhathcock/sharpcompress/issues/162)
|
||||
|
||||
### Version 0.16.2
|
||||
|
||||
* Fix [.NET 3.5 should support files and cryptography (was a regression from 0.16.0)](https://github.com/adamhathcock/sharpcompress/pull/251)
|
||||
* Fix [Zip per entry compression customization wrote the wrong method into the zip archive](https://github.com/adamhathcock/sharpcompress/pull/249)
|
||||
|
||||
### Version 0.16.1
|
||||
|
||||
* Fix [Preserve compression method when getting a compressed stream](https://github.com/adamhathcock/sharpcompress/pull/235)
|
||||
@@ -182,8 +152,6 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
|
||||
* Embedded some BouncyCastle crypto classes to allow RAR Decryption and Winzip AES Decryption in Portable and Windows Store DLLs
|
||||
* Built in Release (I think)
|
||||
|
||||
XZ implementation based on: https://github.com/sambott/XZ.NET by @sambott
|
||||
|
||||
7Zip implementation based on: https://code.google.com/p/managed-lzma/
|
||||
|
||||
LICENSE
|
||||
|
||||
@@ -114,7 +114,6 @@
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=NAMESPACE_005FALIAS/@EntryIndexedValue"><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=XAML_005FFIELD/@EntryIndexedValue"><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=XAML_005FRESOURCE/@EntryIndexedValue"><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></s:String>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpAttributeForSingleLineMethodUpgrade/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EAddAccessorOwnerDeclarationBracesMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EMigrateBlankLinesAroundFieldToBlankLinesAroundProperty/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EMigrateThisQualifierSettings/@EntryIndexedValue">True</s:Boolean></wpf:ResourceDictionary>
|
||||
|
||||
31
build.cake
31
build.cake
@@ -30,11 +30,8 @@ Task("Build")
|
||||
|
||||
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
|
||||
|
||||
settings.Framework = "netstandard1.3";
|
||||
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
|
||||
|
||||
settings.Framework = "netstandard2.0";
|
||||
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
|
||||
settings.Framework = "netcoreapp1.1";
|
||||
DotNetCoreBuild("./tests/SharpCompress.Test/SharpCompress.Test.csproj", settings);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -42,25 +39,23 @@ Task("Test")
|
||||
.IsDependentOn("Build")
|
||||
.Does(() =>
|
||||
{
|
||||
var files = GetFiles("tests/**/*.csproj");
|
||||
foreach(var file in files)
|
||||
if (!bool.Parse(EnvironmentVariable("APPVEYOR") ?? "false")
|
||||
&& !bool.Parse(EnvironmentVariable("TRAVIS") ?? "false"))
|
||||
{
|
||||
var settings = new DotNetCoreTestSettings
|
||||
var files = GetFiles("tests/**/*.csproj");
|
||||
foreach(var file in files)
|
||||
{
|
||||
Configuration = "Release",
|
||||
Framework = "netcoreapp2.0"
|
||||
};
|
||||
|
||||
DotNetCoreTest(file.ToString(), settings);
|
||||
|
||||
|
||||
settings = new DotNetCoreTestSettings
|
||||
var settings = new DotNetCoreTestSettings
|
||||
{
|
||||
Configuration = "Release",
|
||||
Framework = "netcoreapp2.0"
|
||||
Configuration = "Release"
|
||||
};
|
||||
|
||||
DotNetCoreTest(file.ToString(), settings);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Information("Skipping tests as this is AppVeyor or Travis CI");
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
2
build.sh
2
build.sh
@@ -8,7 +8,7 @@
|
||||
# Define directories.
|
||||
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
TOOLS_DIR=$SCRIPT_DIR/tools
|
||||
CAKE_VERSION=0.23.0
|
||||
CAKE_VERSION=0.19.1
|
||||
CAKE_DLL=$TOOLS_DIR/Cake.CoreCLR.$CAKE_VERSION/Cake.dll
|
||||
|
||||
# Make sure the tools folder exist.
|
||||
|
||||
@@ -6,7 +6,6 @@ using SharpCompress.Archives.SevenZip;
|
||||
using SharpCompress.Archives.Tar;
|
||||
using SharpCompress.Archives.Zip;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Archives
|
||||
@@ -56,7 +55,7 @@ namespace SharpCompress.Archives
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
return TarArchive.Open(stream, readerOptions);
|
||||
}
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, LZip");
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip");
|
||||
}
|
||||
|
||||
public static IWritableArchive Create(ArchiveType type)
|
||||
|
||||
@@ -14,7 +14,6 @@ namespace SharpCompress.Archives.GZip
|
||||
public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
{
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -37,7 +36,6 @@ namespace SharpCompress.Archives.GZip
|
||||
return new GZipArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -56,11 +54,11 @@ namespace SharpCompress.Archives.GZip
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="options"></param>
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="options"></param>
|
||||
internal GZipArchive(FileInfo fileInfo, ReaderOptions options)
|
||||
: base(ArchiveType.GZip, fileInfo, options)
|
||||
{
|
||||
@@ -106,9 +104,15 @@ namespace SharpCompress.Archives.GZip
|
||||
{
|
||||
// read the header on the first read
|
||||
byte[] header = new byte[10];
|
||||
int n = stream.Read(header, 0, header.Length);
|
||||
|
||||
// workitem 8501: handle edge case (decompress empty stream)
|
||||
if (!stream.ReadFully(header))
|
||||
if (n == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (n != 10)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@@ -154,7 +158,7 @@ namespace SharpCompress.Archives.GZip
|
||||
{
|
||||
throw new InvalidOperationException("Only one entry is allowed in a GZip Archive");
|
||||
}
|
||||
using (var writer = new GZipWriter(stream, new GZipWriterOptions(options)))
|
||||
using (var writer = new GZipWriter(stream))
|
||||
{
|
||||
foreach (var entry in oldEntries.Concat(newEntries)
|
||||
.Where(x => !x.IsDirectory))
|
||||
@@ -175,7 +179,7 @@ namespace SharpCompress.Archives.GZip
|
||||
protected override IEnumerable<GZipArchiveEntry> LoadEntries(IEnumerable<GZipVolume> volumes)
|
||||
{
|
||||
Stream stream = volumes.Single().Stream;
|
||||
yield return new GZipArchiveEntry(this, new GZipFilePart(stream, ReaderOptions.ArchiveEncoding));
|
||||
yield return new GZipArchiveEntry(this, new GZipFilePart(stream));
|
||||
}
|
||||
|
||||
protected override IReader CreateReaderForSolidExtraction()
|
||||
|
||||
@@ -4,7 +4,6 @@ using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.SevenZip;
|
||||
using SharpCompress.Compressors.LZMA.Utilites;
|
||||
using SharpCompress.IO;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
@@ -107,7 +106,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
for (int i = 0; i < database.Files.Count; i++)
|
||||
{
|
||||
var file = database.Files[i];
|
||||
yield return new SevenZipArchiveEntry(this, new SevenZipFilePart(stream, database, i, file, ReaderOptions.ArchiveEncoding));
|
||||
yield return new SevenZipArchiveEntry(this, new SevenZipFilePart(stream, database, i, file));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,7 +117,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
stream.Position = 0;
|
||||
var reader = new ArchiveReader();
|
||||
reader.Open(stream);
|
||||
database = reader.ReadDatabase(new PasswordProvider(ReaderOptions.Password));
|
||||
database = reader.ReadDatabase(null);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,7 +144,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
|
||||
protected override IReader CreateReaderForSolidExtraction()
|
||||
{
|
||||
return new SevenZipReader(ReaderOptions, this);
|
||||
return new SevenZipReader(this);
|
||||
}
|
||||
|
||||
public override bool IsSolid { get { return Entries.Where(x => !x.IsDirectory).GroupBy(x => x.FilePart.Folder).Count() > 1; } }
|
||||
@@ -166,8 +165,8 @@ namespace SharpCompress.Archives.SevenZip
|
||||
private Stream currentStream;
|
||||
private CFileItem currentItem;
|
||||
|
||||
internal SevenZipReader(ReaderOptions readerOptions, SevenZipArchive archive)
|
||||
: base(readerOptions, ArchiveType.SevenZip)
|
||||
internal SevenZipReader(SevenZipArchive archive)
|
||||
: base(new ReaderOptions(), ArchiveType.SevenZip)
|
||||
{
|
||||
this.archive = archive;
|
||||
}
|
||||
@@ -191,7 +190,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
}
|
||||
else
|
||||
{
|
||||
currentStream = archive.database.GetFolderStream(stream, currentFolder, new PasswordProvider(Options.Password));
|
||||
currentStream = archive.database.GetFolderStream(stream, currentFolder, null);
|
||||
}
|
||||
foreach (var entry in group)
|
||||
{
|
||||
@@ -206,21 +205,5 @@ namespace SharpCompress.Archives.SevenZip
|
||||
return CreateEntryStream(new ReadOnlySubStream(currentStream, currentItem.Size));
|
||||
}
|
||||
}
|
||||
|
||||
private class PasswordProvider : IPasswordProvider
|
||||
{
|
||||
private readonly string _password;
|
||||
|
||||
public PasswordProvider(string password)
|
||||
{
|
||||
_password = password;
|
||||
|
||||
}
|
||||
|
||||
public string CryptoGetTextPassword()
|
||||
{
|
||||
return _password;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using SharpCompress.Readers;
|
||||
using SharpCompress.Readers.Tar;
|
||||
@@ -16,7 +15,7 @@ namespace SharpCompress.Archives.Tar
|
||||
public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
{
|
||||
#if !NO_FILE
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -39,7 +38,7 @@ namespace SharpCompress.Archives.Tar
|
||||
return new TarArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -52,7 +51,6 @@ namespace SharpCompress.Archives.Tar
|
||||
}
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
public static bool IsTarFile(string filePath)
|
||||
{
|
||||
return IsTarFile(new FileInfo(filePath));
|
||||
@@ -75,9 +73,9 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
try
|
||||
{
|
||||
TarHeader tar = new TarHeader(new ArchiveEncoding());
|
||||
tar.Read(new BinaryReader(stream));
|
||||
return tar.Name.Length > 0 && Enum.IsDefined(typeof(EntryType), tar.EntryType);
|
||||
var input = new TarInputStream(stream);
|
||||
var header = input.GetNextEntry();
|
||||
return header.Name.Length > 0;
|
||||
}
|
||||
catch
|
||||
{
|
||||
@@ -99,6 +97,7 @@ namespace SharpCompress.Archives.Tar
|
||||
|
||||
protected override IEnumerable<TarVolume> LoadVolumes(FileInfo file)
|
||||
{
|
||||
|
||||
return new TarVolume(file.OpenRead(), ReaderOptions).AsEnumerable();
|
||||
}
|
||||
#endif
|
||||
@@ -127,11 +126,11 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
Stream stream = volumes.Single().Stream;
|
||||
TarHeader previousHeader = null;
|
||||
foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream, ReaderOptions.ArchiveEncoding))
|
||||
foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream))
|
||||
{
|
||||
if (header != null)
|
||||
{
|
||||
if (header.EntryType == EntryType.LongName)
|
||||
if (header.TypeFlag == TarHeader.LF_GNU_LONGNAME)
|
||||
{
|
||||
previousHeader = header;
|
||||
}
|
||||
@@ -152,7 +151,7 @@ namespace SharpCompress.Archives.Tar
|
||||
memoryStream.Position = 0;
|
||||
var bytes = memoryStream.ToArray();
|
||||
|
||||
header.Name = ReaderOptions.ArchiveEncoding.Decode(bytes).TrimNulls();
|
||||
header.Name = ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length).TrimNulls();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,6 @@ namespace SharpCompress.Archives.Zip
|
||||
public CompressionLevel DeflateCompressionLevel { get; set; }
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -47,7 +46,6 @@ namespace SharpCompress.Archives.Zip
|
||||
return new ZipArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -60,7 +58,6 @@ namespace SharpCompress.Archives.Zip
|
||||
}
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
public static bool IsZipFile(string filePath, string password = null)
|
||||
{
|
||||
return IsZipFile(new FileInfo(filePath), password);
|
||||
@@ -81,7 +78,7 @@ namespace SharpCompress.Archives.Zip
|
||||
|
||||
public static bool IsZipFile(Stream stream, string password = null)
|
||||
{
|
||||
StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password, new ArchiveEncoding());
|
||||
StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password);
|
||||
try
|
||||
{
|
||||
ZipHeader header =
|
||||
@@ -112,7 +109,7 @@ namespace SharpCompress.Archives.Zip
|
||||
internal ZipArchive(FileInfo fileInfo, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Zip, fileInfo, readerOptions)
|
||||
{
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding);
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password);
|
||||
}
|
||||
|
||||
protected override IEnumerable<ZipVolume> LoadVolumes(FileInfo file)
|
||||
@@ -134,7 +131,7 @@ namespace SharpCompress.Archives.Zip
|
||||
internal ZipArchive(Stream stream, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Zip, stream, readerOptions)
|
||||
{
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding);
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password);
|
||||
}
|
||||
|
||||
protected override IEnumerable<ZipVolume> LoadVolumes(IEnumerable<Stream> streams)
|
||||
@@ -153,19 +150,19 @@ namespace SharpCompress.Archives.Zip
|
||||
switch (h.ZipHeaderType)
|
||||
{
|
||||
case ZipHeaderType.DirectoryEntry:
|
||||
{
|
||||
yield return new ZipArchiveEntry(this,
|
||||
new SeekableZipFilePart(headerFactory,
|
||||
h as DirectoryEntryHeader,
|
||||
stream));
|
||||
}
|
||||
{
|
||||
yield return new ZipArchiveEntry(this,
|
||||
new SeekableZipFilePart(headerFactory,
|
||||
h as DirectoryEntryHeader,
|
||||
stream));
|
||||
}
|
||||
break;
|
||||
case ZipHeaderType.DirectoryEnd:
|
||||
{
|
||||
byte[] bytes = (h as DirectoryEndHeader).Comment;
|
||||
volume.Comment = ReaderOptions.ArchiveEncoding.Decode(bytes);
|
||||
yield break;
|
||||
}
|
||||
{
|
||||
byte[] bytes = (h as DirectoryEndHeader).Comment;
|
||||
volume.Comment = ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length);
|
||||
yield break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -208,7 +205,7 @@ namespace SharpCompress.Archives.Zip
|
||||
{
|
||||
var stream = Volumes.Single().Stream;
|
||||
stream.Position = 0;
|
||||
return ZipReader.Open(stream, ReaderOptions);
|
||||
return ZipReader.Open(stream);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,60 +1,23 @@
|
||||
using System;
|
||||
using System.Text;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common
|
||||
{
|
||||
public class ArchiveEncoding
|
||||
public static class ArchiveEncoding
|
||||
{
|
||||
/// <summary>
|
||||
/// Default encoding to use when archive format doesn't specify one.
|
||||
/// </summary>
|
||||
public Encoding Default { get; set; }
|
||||
public static Encoding Default { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// ArchiveEncoding used by encryption schemes which don't comply with RFC 2898.
|
||||
/// Encoding used by encryption schemes which don't comply with RFC 2898.
|
||||
/// </summary>
|
||||
public Encoding Password { get; set; }
|
||||
public static Encoding Password { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Set this encoding when you want to force it for all encoding operations.
|
||||
/// </summary>
|
||||
public Encoding Forced { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Set this when you want to use a custom method for all decoding operations.
|
||||
/// </summary>
|
||||
/// <returns>string Func(bytes, index, length)</returns>
|
||||
public Func<byte[], int, int, string> CustomDecoder { get; set; }
|
||||
|
||||
public ArchiveEncoding()
|
||||
static ArchiveEncoding()
|
||||
{
|
||||
Default = Encoding.UTF8;
|
||||
Password = Encoding.UTF8;
|
||||
}
|
||||
|
||||
public string Decode(byte[] bytes)
|
||||
{
|
||||
return Decode(bytes, 0, bytes.Length);
|
||||
}
|
||||
|
||||
public string Decode(byte[] bytes, int start, int length)
|
||||
{
|
||||
return GetDecoder().Invoke(bytes, start, length);
|
||||
}
|
||||
|
||||
public byte[] Encode(string str)
|
||||
{
|
||||
return GetEncoding().GetBytes(str);
|
||||
}
|
||||
|
||||
public Encoding GetEncoding()
|
||||
{
|
||||
return Forced ?? Default ?? Encoding.UTF8;
|
||||
}
|
||||
|
||||
public Func<byte[], int, int, string> GetDecoder()
|
||||
{
|
||||
return CustomDecoder ?? ((bytes, index, count) => (Default ?? Encoding.UTF8).GetString(bytes, index, count));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,6 @@
|
||||
BCJ,
|
||||
BCJ2,
|
||||
LZip,
|
||||
Xz,
|
||||
Unknown
|
||||
}
|
||||
}
|
||||
@@ -4,17 +4,9 @@ namespace SharpCompress.Common
|
||||
{
|
||||
public abstract class FilePart
|
||||
{
|
||||
protected FilePart(ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
|
||||
internal abstract string FilePartName { get; }
|
||||
|
||||
internal abstract Stream GetCompressedStream();
|
||||
internal abstract Stream GetRawStream();
|
||||
internal bool Skipped { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.GZip
|
||||
{
|
||||
@@ -40,9 +39,9 @@ namespace SharpCompress.Common.GZip
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => filePart.AsEnumerable<FilePart>();
|
||||
|
||||
internal static IEnumerable<GZipEntry> GetEntries(Stream stream, OptionsBase options)
|
||||
internal static IEnumerable<GZipEntry> GetEntries(Stream stream)
|
||||
{
|
||||
yield return new GZipEntry(new GZipFilePart(stream, options.ArchiveEncoding));
|
||||
yield return new GZipEntry(new GZipFilePart(stream));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,41 +1,38 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.GZip
|
||||
{
|
||||
internal class GZipFilePart : FilePart
|
||||
{
|
||||
private string _name;
|
||||
private readonly Stream _stream;
|
||||
private string name;
|
||||
private readonly Stream stream;
|
||||
|
||||
internal GZipFilePart(Stream stream, ArchiveEncoding archiveEncoding)
|
||||
: base(archiveEncoding)
|
||||
internal GZipFilePart(Stream stream)
|
||||
{
|
||||
ReadAndValidateGzipHeader(stream);
|
||||
EntryStartPosition = stream.Position;
|
||||
this._stream = stream;
|
||||
this.stream = stream;
|
||||
}
|
||||
|
||||
internal long EntryStartPosition { get; }
|
||||
|
||||
internal DateTime? DateModified { get; private set; }
|
||||
|
||||
internal override string FilePartName => _name;
|
||||
internal override string FilePartName => name;
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
return new DeflateStream(_stream, CompressionMode.Decompress, CompressionLevel.Default, false);
|
||||
return new DeflateStream(stream, CompressionMode.Decompress, CompressionLevel.Default, false);
|
||||
}
|
||||
|
||||
internal override Stream GetRawStream()
|
||||
{
|
||||
return _stream;
|
||||
return stream;
|
||||
}
|
||||
|
||||
private void ReadAndValidateGzipHeader(Stream stream)
|
||||
@@ -69,16 +66,15 @@ namespace SharpCompress.Common.GZip
|
||||
|
||||
Int16 extraLength = (Int16)(header[0] + header[1] * 256);
|
||||
byte[] extra = new byte[extraLength];
|
||||
|
||||
if (!stream.ReadFully(extra))
|
||||
n = stream.Read(extra, 0, extra.Length);
|
||||
if (n != extraLength)
|
||||
{
|
||||
throw new ZlibException("Unexpected end-of-file reading GZIP header.");
|
||||
}
|
||||
n = extraLength;
|
||||
}
|
||||
if ((header[3] & 0x08) == 0x08)
|
||||
{
|
||||
_name = ReadZeroTerminatedString(stream);
|
||||
name = ReadZeroTerminatedString(stream);
|
||||
}
|
||||
if ((header[3] & 0x10) == 0x010)
|
||||
{
|
||||
@@ -90,7 +86,7 @@ namespace SharpCompress.Common.GZip
|
||||
}
|
||||
}
|
||||
|
||||
private string ReadZeroTerminatedString(Stream stream)
|
||||
private static string ReadZeroTerminatedString(Stream stream)
|
||||
{
|
||||
byte[] buf1 = new byte[1];
|
||||
var list = new List<byte>();
|
||||
@@ -113,8 +109,8 @@ namespace SharpCompress.Common.GZip
|
||||
}
|
||||
}
|
||||
while (!done);
|
||||
byte[] buffer = list.ToArray();
|
||||
return ArchiveEncoding.Decode(buffer);
|
||||
byte[] a = list.ToArray();
|
||||
return ArchiveEncoding.Default.GetString(a, 0, a.Length);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
|
||||
namespace SharpCompress.Common
|
||||
namespace SharpCompress.Common
|
||||
{
|
||||
public class OptionsBase
|
||||
{
|
||||
@@ -7,7 +6,5 @@ namespace SharpCompress.Common
|
||||
/// SharpCompress will keep the supplied streams open. Default is true.
|
||||
/// </summary>
|
||||
public bool LeaveStreamOpen { get; set; } = true;
|
||||
|
||||
public ArchiveEncoding ArchiveEncoding { get; set; } = new ArchiveEncoding();
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
using SharpCompress.IO;
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
@@ -52,50 +52,50 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
switch (HeaderType)
|
||||
{
|
||||
case HeaderType.FileHeader:
|
||||
{
|
||||
if (FileFlags.HasFlag(FileFlags.UNICODE))
|
||||
{
|
||||
if (FileFlags.HasFlag(FileFlags.UNICODE))
|
||||
int length = 0;
|
||||
while (length < fileNameBytes.Length
|
||||
&& fileNameBytes[length] != 0)
|
||||
{
|
||||
int length = 0;
|
||||
while (length < fileNameBytes.Length
|
||||
&& fileNameBytes[length] != 0)
|
||||
{
|
||||
length++;
|
||||
}
|
||||
if (length != nameSize)
|
||||
{
|
||||
length++;
|
||||
FileName = FileNameDecoder.Decode(fileNameBytes, length);
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = ArchiveEncoding.Decode(fileNameBytes);
|
||||
}
|
||||
length++;
|
||||
}
|
||||
if (length != nameSize)
|
||||
{
|
||||
length++;
|
||||
FileName = FileNameDecoder.Decode(fileNameBytes, length);
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = ArchiveEncoding.Decode(fileNameBytes);
|
||||
FileName = DecodeDefault(fileNameBytes);
|
||||
}
|
||||
FileName = ConvertPath(FileName, HostOS);
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = DecodeDefault(fileNameBytes);
|
||||
}
|
||||
FileName = ConvertPath(FileName, HostOS);
|
||||
}
|
||||
break;
|
||||
case HeaderType.NewSubHeader:
|
||||
{
|
||||
int datasize = HeaderSize - NEWLHD_SIZE - nameSize;
|
||||
if (FileFlags.HasFlag(FileFlags.SALT))
|
||||
{
|
||||
int datasize = HeaderSize - NEWLHD_SIZE - nameSize;
|
||||
if (FileFlags.HasFlag(FileFlags.SALT))
|
||||
{
|
||||
datasize -= SALT_SIZE;
|
||||
}
|
||||
if (datasize > 0)
|
||||
{
|
||||
SubData = reader.ReadBytes(datasize);
|
||||
}
|
||||
|
||||
if (NewSubHeaderType.SUBHEAD_TYPE_RR.Equals(fileNameBytes))
|
||||
{
|
||||
RecoverySectors = SubData[8] + (SubData[9] << 8)
|
||||
+ (SubData[10] << 16) + (SubData[11] << 24);
|
||||
}
|
||||
datasize -= SALT_SIZE;
|
||||
}
|
||||
if (datasize > 0)
|
||||
{
|
||||
SubData = reader.ReadBytes(datasize);
|
||||
}
|
||||
|
||||
if (NewSubHeaderType.SUBHEAD_TYPE_RR.Equals(fileNameBytes))
|
||||
{
|
||||
RecoverySectors = SubData[8] + (SubData[9] << 8)
|
||||
+ (SubData[10] << 16) + (SubData[11] << 24);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -118,6 +118,12 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
//only the full .net framework will do other code pages than unicode/utf8
|
||||
private string DecodeDefault(byte[] bytes)
|
||||
{
|
||||
return ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length);
|
||||
}
|
||||
|
||||
private long UInt32To64(uint x, uint y)
|
||||
{
|
||||
long l = x;
|
||||
@@ -172,7 +178,6 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
|
||||
internal long DataStartPosition { get; set; }
|
||||
|
||||
internal HostOS HostOS { get; private set; }
|
||||
|
||||
internal uint FileCRC { get; private set; }
|
||||
@@ -194,7 +199,6 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
internal FileFlags FileFlags => (FileFlags)Flags;
|
||||
|
||||
internal long CompressedSize { get; private set; }
|
||||
|
||||
internal long UncompressedSize { get; private set; }
|
||||
|
||||
internal string FileName { get; private set; }
|
||||
|
||||
@@ -18,9 +18,9 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
Flags == 0x1A21 &&
|
||||
HeaderSize == 0x07;
|
||||
|
||||
// Rar5 signature: 52 61 72 21 1A 07 01 00 (not supported yet)
|
||||
// Rar5 signature: 52 61 72 21 1A 07 10 00 (not supported yet)
|
||||
}
|
||||
|
||||
internal bool OldFormat { get; private set; }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
@@ -18,16 +17,14 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
HeaderSize = baseHeader.HeaderSize;
|
||||
AdditionalSize = baseHeader.AdditionalSize;
|
||||
ReadBytes = baseHeader.ReadBytes;
|
||||
ArchiveEncoding = baseHeader.ArchiveEncoding;
|
||||
}
|
||||
|
||||
internal static RarHeader Create(RarCrcBinaryReader reader, ArchiveEncoding archiveEncoding)
|
||||
internal static RarHeader Create(RarCrcBinaryReader reader)
|
||||
{
|
||||
try
|
||||
{
|
||||
RarHeader header = new RarHeader();
|
||||
|
||||
header.ArchiveEncoding = archiveEncoding;
|
||||
reader.Mark();
|
||||
header.ReadStartFromReader(reader);
|
||||
header.ReadBytes += reader.CurrentReadByteCount;
|
||||
@@ -53,8 +50,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
protected virtual void ReadFromReader(MarkingBinaryReader reader)
|
||||
{
|
||||
protected virtual void ReadFromReader(MarkingBinaryReader reader) {
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
@@ -80,11 +76,10 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
return header;
|
||||
}
|
||||
|
||||
private void VerifyHeaderCrc(ushort crc)
|
||||
{
|
||||
if (HeaderType != HeaderType.MarkHeader)
|
||||
private void VerifyHeaderCrc(ushort crc) {
|
||||
if (HeaderType != HeaderType.MarkHeader)
|
||||
{
|
||||
if (crc != HeadCRC)
|
||||
if (crc != HeadCRC)
|
||||
{
|
||||
throw new InvalidFormatException("rar header crc mismatch");
|
||||
}
|
||||
@@ -111,8 +106,6 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
protected short HeaderSize { get; private set; }
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; private set; }
|
||||
|
||||
/// <summary>
|
||||
/// This additional size of the header could be file data
|
||||
/// </summary>
|
||||
|
||||
@@ -117,7 +117,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
#if !NO_CRYPTO
|
||||
var reader = new RarCryptoBinaryReader(stream, Options.Password);
|
||||
|
||||
|
||||
if (IsEncrypted)
|
||||
{
|
||||
if (Options.Password == null)
|
||||
@@ -133,7 +133,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
#endif
|
||||
|
||||
RarHeader header = RarHeader.Create(reader, Options.ArchiveEncoding);
|
||||
RarHeader header = RarHeader.Create(reader);
|
||||
if (header == null)
|
||||
{
|
||||
return null;
|
||||
@@ -141,110 +141,110 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
switch (header.HeaderType)
|
||||
{
|
||||
case HeaderType.ArchiveHeader:
|
||||
{
|
||||
var ah = header.PromoteHeader<ArchiveHeader>(reader);
|
||||
IsEncrypted = ah.HasPassword;
|
||||
return ah;
|
||||
}
|
||||
{
|
||||
var ah = header.PromoteHeader<ArchiveHeader>(reader);
|
||||
IsEncrypted = ah.HasPassword;
|
||||
return ah;
|
||||
}
|
||||
case HeaderType.MarkHeader:
|
||||
{
|
||||
return header.PromoteHeader<MarkHeader>(reader);
|
||||
}
|
||||
{
|
||||
return header.PromoteHeader<MarkHeader>(reader);
|
||||
}
|
||||
|
||||
case HeaderType.ProtectHeader:
|
||||
{
|
||||
ProtectHeader ph = header.PromoteHeader<ProtectHeader>(reader);
|
||||
|
||||
// skip the recovery record data, we do not use it.
|
||||
switch (StreamingMode)
|
||||
{
|
||||
ProtectHeader ph = header.PromoteHeader<ProtectHeader>(reader);
|
||||
|
||||
// skip the recovery record data, we do not use it.
|
||||
switch (StreamingMode)
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
reader.BaseStream.Position += ph.DataSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
reader.BaseStream.Skip(ph.DataSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
reader.BaseStream.Position += ph.DataSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
reader.BaseStream.Skip(ph.DataSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
|
||||
return ph;
|
||||
}
|
||||
|
||||
return ph;
|
||||
}
|
||||
|
||||
case HeaderType.NewSubHeader:
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
//skip the data because it's useless?
|
||||
reader.BaseStream.Skip(fh.CompressedSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
case HeaderType.FileHeader:
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
var ms = new ReadOnlySubStream(reader.BaseStream, fh.CompressedSize);
|
||||
if (fh.Salt == null)
|
||||
{
|
||||
fh.PackedStream = ms;
|
||||
}
|
||||
else
|
||||
{
|
||||
//skip the data because it's useless?
|
||||
reader.BaseStream.Skip(fh.CompressedSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
case HeaderType.FileHeader:
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
var ms = new ReadOnlySubStream(reader.BaseStream, fh.CompressedSize);
|
||||
if (fh.Salt == null)
|
||||
{
|
||||
fh.PackedStream = ms;
|
||||
}
|
||||
else
|
||||
{
|
||||
#if !NO_CRYPTO
|
||||
fh.PackedStream = new RarCryptoWrapper(ms, Options.Password, fh.Salt);
|
||||
fh.PackedStream = new RarCryptoWrapper(ms, Options.Password, fh.Salt);
|
||||
#else
|
||||
throw new NotSupportedException("RarCrypto not supported");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
case HeaderType.EndArchiveHeader:
|
||||
{
|
||||
return header.PromoteHeader<EndArchiveHeader>(reader);
|
||||
}
|
||||
{
|
||||
return header.PromoteHeader<EndArchiveHeader>(reader);
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid Rar Header: " + header.HeaderType);
|
||||
}
|
||||
{
|
||||
throw new InvalidFormatException("Invalid Rar Header: " + header.HeaderType);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ namespace SharpCompress.Common.Rar
|
||||
internal abstract class RarFilePart : FilePart
|
||||
{
|
||||
internal RarFilePart(MarkHeader mh, FileHeader fh)
|
||||
: base(fh.ArchiveEncoding)
|
||||
{
|
||||
MarkHeader = mh;
|
||||
FileHeader = fh;
|
||||
|
||||
@@ -22,13 +22,6 @@ namespace SharpCompress.Common.SevenZip
|
||||
internal List<long> PackStreamStartPositions = new List<long>();
|
||||
internal List<int> FolderStartFileIndex = new List<int>();
|
||||
internal List<int> FileIndexToFolderIndexMap = new List<int>();
|
||||
|
||||
internal IPasswordProvider PasswordProvider { get; }
|
||||
|
||||
public ArchiveDatabase(IPasswordProvider passwordProvider)
|
||||
{
|
||||
PasswordProvider = passwordProvider;
|
||||
}
|
||||
|
||||
internal void Clear()
|
||||
{
|
||||
|
||||
@@ -182,7 +182,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
private DateTime? TranslateTime(long? time)
|
||||
{
|
||||
if (time.HasValue && time.Value >= 0 && time.Value <= 2650467743999999999) //maximum Windows file time 31.12.9999
|
||||
if (time.HasValue)
|
||||
{
|
||||
return TranslateTime(time.Value);
|
||||
}
|
||||
@@ -1211,7 +1211,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
public ArchiveDatabase ReadDatabase(IPasswordProvider pass)
|
||||
{
|
||||
var db = new ArchiveDatabase(pass);
|
||||
var db = new ArchiveDatabase();
|
||||
db.Clear();
|
||||
|
||||
db.MajorVersion = _header[6];
|
||||
@@ -1279,7 +1279,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
throw new InvalidOperationException();
|
||||
}
|
||||
|
||||
var dataVector = ReadAndDecodePackedStreams(db.StartPositionAfterHeader, db.PasswordProvider);
|
||||
var dataVector = ReadAndDecodePackedStreams(db.StartPositionAfterHeader, pass);
|
||||
|
||||
// compressed header without content is odd but ok
|
||||
if (dataVector.Count == 0)
|
||||
@@ -1301,7 +1301,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
}
|
||||
}
|
||||
|
||||
ReadHeader(db, db.PasswordProvider);
|
||||
ReadHeader(db, pass);
|
||||
}
|
||||
db.Fill();
|
||||
return db;
|
||||
@@ -1441,7 +1441,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
#endregion
|
||||
}
|
||||
|
||||
private Stream GetCachedDecoderStream(ArchiveDatabase _db, int folderIndex)
|
||||
private Stream GetCachedDecoderStream(ArchiveDatabase _db, int folderIndex, IPasswordProvider pw)
|
||||
{
|
||||
Stream s;
|
||||
if (!_cachedStreams.TryGetValue(folderIndex, out s))
|
||||
@@ -1456,13 +1456,13 @@ namespace SharpCompress.Common.SevenZip
|
||||
}
|
||||
|
||||
s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes.ToArray(), folderInfo,
|
||||
_db.PasswordProvider);
|
||||
pw);
|
||||
_cachedStreams.Add(folderIndex, s);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
public Stream OpenStream(ArchiveDatabase _db, int fileIndex)
|
||||
public Stream OpenStream(ArchiveDatabase _db, int fileIndex, IPasswordProvider pw)
|
||||
{
|
||||
int folderIndex = _db.FileIndexToFolderIndexMap[fileIndex];
|
||||
int numFilesInFolder = _db.NumUnpackStreamsVector[folderIndex];
|
||||
@@ -1479,12 +1479,12 @@ namespace SharpCompress.Common.SevenZip
|
||||
skipSize += _db.Files[firstFileIndex + i].Size;
|
||||
}
|
||||
|
||||
Stream s = GetCachedDecoderStream(_db, folderIndex);
|
||||
Stream s = GetCachedDecoderStream(_db, folderIndex, pw);
|
||||
s.Position = skipSize;
|
||||
return new ReadOnlySubStream(s, _db.Files[fileIndex].Size);
|
||||
}
|
||||
|
||||
public void Extract(ArchiveDatabase _db, int[] indices)
|
||||
public void Extract(ArchiveDatabase _db, int[] indices, IPasswordProvider pw)
|
||||
{
|
||||
int numItems;
|
||||
bool allFilesMode = (indices == null);
|
||||
@@ -1562,7 +1562,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
// TODO: If the decoding fails the last file may be extracted incompletely. Delete it?
|
||||
|
||||
Stream s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes.ToArray(),
|
||||
folderInfo, _db.PasswordProvider);
|
||||
folderInfo, pw);
|
||||
byte[] buffer = new byte[4 << 10];
|
||||
for (;;)
|
||||
{
|
||||
@@ -1588,4 +1588,4 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,15 +7,14 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
internal class SevenZipFilePart : FilePart
|
||||
{
|
||||
private CompressionType? _type;
|
||||
private readonly Stream _stream;
|
||||
private readonly ArchiveDatabase _database;
|
||||
private CompressionType? type;
|
||||
private readonly Stream stream;
|
||||
private readonly ArchiveDatabase database;
|
||||
|
||||
internal SevenZipFilePart(Stream stream, ArchiveDatabase database, int index, CFileItem fileEntry, ArchiveEncoding archiveEncoding)
|
||||
: base(archiveEncoding)
|
||||
internal SevenZipFilePart(Stream stream, ArchiveDatabase database, int index, CFileItem fileEntry)
|
||||
{
|
||||
this._stream = stream;
|
||||
this._database = database;
|
||||
this.stream = stream;
|
||||
this.database = database;
|
||||
Index = index;
|
||||
Header = fileEntry;
|
||||
if (Header.HasStream)
|
||||
@@ -42,14 +41,14 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
return null;
|
||||
}
|
||||
var folderStream = _database.GetFolderStream(_stream, Folder, _database.PasswordProvider);
|
||||
var folderStream = database.GetFolderStream(stream, Folder, null);
|
||||
|
||||
int firstFileIndex = _database.FolderStartFileIndex[_database.Folders.IndexOf(Folder)];
|
||||
int firstFileIndex = database.FolderStartFileIndex[database.Folders.IndexOf(Folder)];
|
||||
int skipCount = Index - firstFileIndex;
|
||||
long skipSize = 0;
|
||||
for (int i = 0; i < skipCount; i++)
|
||||
{
|
||||
skipSize += _database.Files[firstFileIndex + i].Size;
|
||||
skipSize += database.Files[firstFileIndex + i].Size;
|
||||
}
|
||||
if (skipSize > 0)
|
||||
{
|
||||
@@ -62,11 +61,11 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_type == null)
|
||||
if (type == null)
|
||||
{
|
||||
_type = GetCompression();
|
||||
type = GetCompression();
|
||||
}
|
||||
return _type.Value;
|
||||
return type.Value;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +84,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
var coder = Folder.Coders.First();
|
||||
switch (coder.MethodId.Id)
|
||||
{
|
||||
{
|
||||
case k_LZMA:
|
||||
case k_LZMA2:
|
||||
{
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
internal enum EntryType : byte
|
||||
{
|
||||
File = 0,
|
||||
OldFile = (byte)'0',
|
||||
HardLink = (byte)'1',
|
||||
SymLink = (byte)'2',
|
||||
CharDevice = (byte)'3',
|
||||
BlockDevice = (byte)'4',
|
||||
Directory = (byte)'5',
|
||||
Fifo = (byte)'6',
|
||||
LongLink = (byte)'K',
|
||||
LongName = (byte)'L',
|
||||
SparseFile = (byte)'S',
|
||||
VolumeHeader = (byte)'V',
|
||||
GlobalExtendedHeader = (byte)'g'
|
||||
}
|
||||
}
|
||||
@@ -1,275 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using SharpCompress.Converters;
|
||||
|
||||
namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
internal class TarHeader
|
||||
{
|
||||
internal static readonly DateTime Epoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
public TarHeader(ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal string Name { get; set; }
|
||||
|
||||
//internal int Mode { get; set; }
|
||||
//internal int UserId { get; set; }
|
||||
//internal string UserName { get; set; }
|
||||
//internal int GroupId { get; set; }
|
||||
//internal string GroupName { get; set; }
|
||||
internal long Size { get; set; }
|
||||
internal DateTime LastModifiedTime { get; set; }
|
||||
internal EntryType EntryType { get; set; }
|
||||
internal Stream PackedStream { get; set; }
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
|
||||
internal const int BlockSize = 512;
|
||||
|
||||
internal void Write(Stream output)
|
||||
{
|
||||
byte[] buffer = new byte[BlockSize];
|
||||
|
||||
WriteOctalBytes(511, buffer, 100, 8); // file mode
|
||||
WriteOctalBytes(0, buffer, 108, 8); // owner ID
|
||||
WriteOctalBytes(0, buffer, 116, 8); // group ID
|
||||
|
||||
//ArchiveEncoding.UTF8.GetBytes("magic").CopyTo(buffer, 257);
|
||||
if (Name.Length > 100)
|
||||
{
|
||||
// Set mock filename and filetype to indicate the next block is the actual name of the file
|
||||
WriteStringBytes("././@LongLink", buffer, 0, 100);
|
||||
buffer[156] = (byte)EntryType.LongName;
|
||||
WriteOctalBytes(Name.Length + 1, buffer, 124, 12);
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteStringBytes(Name, buffer, 0, 100);
|
||||
WriteOctalBytes(Size, buffer, 124, 12);
|
||||
var time = (long)(LastModifiedTime.ToUniversalTime() - Epoch).TotalSeconds;
|
||||
WriteOctalBytes(time, buffer, 136, 12);
|
||||
buffer[156] = (byte)EntryType;
|
||||
|
||||
if (Size >= 0x1FFFFFFFF)
|
||||
{
|
||||
byte[] bytes = DataConverter.BigEndian.GetBytes(Size);
|
||||
var bytes12 = new byte[12];
|
||||
bytes.CopyTo(bytes12, 12 - bytes.Length);
|
||||
bytes12[0] |= 0x80;
|
||||
bytes12.CopyTo(buffer, 124);
|
||||
}
|
||||
}
|
||||
|
||||
int crc = RecalculateChecksum(buffer);
|
||||
WriteOctalBytes(crc, buffer, 148, 8);
|
||||
|
||||
output.Write(buffer, 0, buffer.Length);
|
||||
|
||||
if (Name.Length > 100)
|
||||
{
|
||||
WriteLongFilenameHeader(output);
|
||||
Name = Name.Substring(0, 100);
|
||||
Write(output);
|
||||
}
|
||||
}
|
||||
|
||||
private void WriteLongFilenameHeader(Stream output)
|
||||
{
|
||||
byte[] nameBytes = ArchiveEncoding.Encode(Name);
|
||||
output.Write(nameBytes, 0, nameBytes.Length);
|
||||
|
||||
// pad to multiple of BlockSize bytes, and make sure a terminating null is added
|
||||
int numPaddingBytes = BlockSize - (nameBytes.Length % BlockSize);
|
||||
if (numPaddingBytes == 0)
|
||||
{
|
||||
numPaddingBytes = BlockSize;
|
||||
}
|
||||
output.Write(new byte[numPaddingBytes], 0, numPaddingBytes);
|
||||
}
|
||||
|
||||
internal bool Read(BinaryReader reader)
|
||||
{
|
||||
var buffer = ReadBlock(reader);
|
||||
if (buffer.Length == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ReadEntryType(buffer) == EntryType.LongName)
|
||||
{
|
||||
Name = ReadLongName(reader, buffer);
|
||||
buffer = ReadBlock(reader);
|
||||
}
|
||||
else
|
||||
{
|
||||
Name = ArchiveEncoding.Decode(buffer, 0, 100).TrimNulls();
|
||||
}
|
||||
|
||||
EntryType = ReadEntryType(buffer);
|
||||
Size = ReadSize(buffer);
|
||||
|
||||
//Mode = ReadASCIIInt32Base8(buffer, 100, 7);
|
||||
//UserId = ReadASCIIInt32Base8(buffer, 108, 7);
|
||||
//GroupId = ReadASCIIInt32Base8(buffer, 116, 7);
|
||||
long unixTimeStamp = ReadASCIIInt64Base8(buffer, 136, 11);
|
||||
LastModifiedTime = Epoch.AddSeconds(unixTimeStamp).ToLocalTime();
|
||||
|
||||
Magic = ArchiveEncoding.Decode(buffer, 257, 6).TrimNulls();
|
||||
|
||||
if (!string.IsNullOrEmpty(Magic)
|
||||
&& "ustar".Equals(Magic))
|
||||
{
|
||||
string namePrefix = ArchiveEncoding.Decode(buffer, 345, 157);
|
||||
namePrefix = namePrefix.TrimNulls();
|
||||
if (!string.IsNullOrEmpty(namePrefix))
|
||||
{
|
||||
Name = namePrefix + "/" + Name;
|
||||
}
|
||||
}
|
||||
if (EntryType != EntryType.LongName
|
||||
&& Name.Length == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private string ReadLongName(BinaryReader reader, byte[] buffer)
|
||||
{
|
||||
var size = ReadSize(buffer);
|
||||
var nameLength = (int)size;
|
||||
var nameBytes = reader.ReadBytes(nameLength);
|
||||
var remainingBytesToRead = BlockSize - (nameLength % BlockSize);
|
||||
|
||||
// Read the rest of the block and discard the data
|
||||
if (remainingBytesToRead < BlockSize)
|
||||
{
|
||||
reader.ReadBytes(remainingBytesToRead);
|
||||
}
|
||||
return ArchiveEncoding.Decode(nameBytes, 0, nameBytes.Length).TrimNulls();
|
||||
}
|
||||
|
||||
private static EntryType ReadEntryType(byte[] buffer)
|
||||
{
|
||||
return (EntryType)buffer[156];
|
||||
}
|
||||
|
||||
private long ReadSize(byte[] buffer)
|
||||
{
|
||||
if ((buffer[124] & 0x80) == 0x80) // if size in binary
|
||||
{
|
||||
return DataConverter.BigEndian.GetInt64(buffer, 0x80);
|
||||
}
|
||||
return ReadASCIIInt64Base8(buffer, 124, 11);
|
||||
}
|
||||
|
||||
private static byte[] ReadBlock(BinaryReader reader)
|
||||
{
|
||||
byte[] buffer = reader.ReadBytes(BlockSize);
|
||||
|
||||
if (buffer.Length != 0 && buffer.Length < BlockSize)
|
||||
{
|
||||
throw new InvalidOperationException("Buffer is invalid size");
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
private static void WriteStringBytes(string name, byte[] buffer, int offset, int length)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < length - 1 && i < name.Length; ++i)
|
||||
{
|
||||
buffer[offset + i] = (byte)name[i];
|
||||
}
|
||||
|
||||
for (; i < length; ++i)
|
||||
{
|
||||
buffer[offset + i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
private static void WriteOctalBytes(long value, byte[] buffer, int offset, int length)
|
||||
{
|
||||
string val = Convert.ToString(value, 8);
|
||||
int shift = length - val.Length - 1;
|
||||
for (int i = 0; i < shift; i++)
|
||||
{
|
||||
buffer[offset + i] = (byte)' ';
|
||||
}
|
||||
for (int i = 0; i < val.Length; i++)
|
||||
{
|
||||
buffer[offset + i + shift] = (byte)val[i];
|
||||
}
|
||||
}
|
||||
|
||||
private static int ReadASCIIInt32Base8(byte[] buffer, int offset, int count)
|
||||
{
|
||||
string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls();
|
||||
if (string.IsNullOrEmpty(s))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return Convert.ToInt32(s, 8);
|
||||
}
|
||||
|
||||
private static long ReadASCIIInt64Base8(byte[] buffer, int offset, int count)
|
||||
{
|
||||
string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls();
|
||||
if (string.IsNullOrEmpty(s))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return Convert.ToInt64(s, 8);
|
||||
}
|
||||
|
||||
private static long ReadASCIIInt64(byte[] buffer, int offset, int count)
|
||||
{
|
||||
string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls();
|
||||
if (string.IsNullOrEmpty(s))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return Convert.ToInt64(s);
|
||||
}
|
||||
|
||||
internal static int RecalculateChecksum(byte[] buf)
|
||||
{
|
||||
// Set default value for checksum. That is 8 spaces.
|
||||
Encoding.UTF8.GetBytes(" ").CopyTo(buf, 148);
|
||||
|
||||
// Calculate checksum
|
||||
int headerChecksum = 0;
|
||||
foreach (byte b in buf)
|
||||
{
|
||||
headerChecksum += b;
|
||||
}
|
||||
return headerChecksum;
|
||||
}
|
||||
|
||||
internal static int RecalculateAltChecksum(byte[] buf)
|
||||
{
|
||||
Encoding.UTF8.GetBytes(" ").CopyTo(buf, 148);
|
||||
int headerChecksum = 0;
|
||||
foreach (byte b in buf)
|
||||
{
|
||||
if ((b & 0x80) == 0x80)
|
||||
{
|
||||
headerChecksum -= b ^ 0x80;
|
||||
}
|
||||
else
|
||||
{
|
||||
headerChecksum += b;
|
||||
}
|
||||
}
|
||||
return headerChecksum;
|
||||
}
|
||||
|
||||
public long? DataStartPosition { get; set; }
|
||||
|
||||
public string Magic { get; set; }
|
||||
}
|
||||
}
|
||||
541
src/SharpCompress/Common/Tar/TarBuffer.cs
Normal file
541
src/SharpCompress/Common/Tar/TarBuffer.cs
Normal file
@@ -0,0 +1,541 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// The TarBuffer class implements the tar archive concept
|
||||
/// of a buffered input stream. This concept goes back to the
|
||||
/// days of blocked tape drives and special io devices. In the
|
||||
/// C# universe, the only real function that this class
|
||||
/// performs is to ensure that files have the correct "record"
|
||||
/// size, or other tars will complain.
|
||||
/// <p>
|
||||
/// You should never have a need to access this class directly.
|
||||
/// TarBuffers are created by Tar IO Streams.
|
||||
/// </p>
|
||||
/// </summary>
|
||||
public class TarBuffer
|
||||
{
|
||||
|
||||
/* A quote from GNU tar man file on blocking and records
|
||||
A `tar' archive file contains a series of blocks. Each block
|
||||
contains `BLOCKSIZE' bytes. Although this format may be thought of as
|
||||
being on magnetic tape, other media are often used.
|
||||
|
||||
Each file archived is represented by a header block which describes
|
||||
the file, followed by zero or more blocks which give the contents of
|
||||
the file. At the end of the archive file there may be a block filled
|
||||
with binary zeros as an end-of-file marker. A reasonable system should
|
||||
write a block of zeros at the end, but must not assume that such a
|
||||
block exists when reading an archive.
|
||||
|
||||
The blocks may be "blocked" for physical I/O operations. Each
|
||||
record of N blocks is written with a single 'write ()'
|
||||
operation. On magnetic tapes, the result of such a write is a single
|
||||
record. When writing an archive, the last record of blocks should be
|
||||
written at the full size, with blocks after the zero block containing
|
||||
all zeros. When reading an archive, a reasonable system should
|
||||
properly handle an archive whose last record is shorter than the rest,
|
||||
or which contains garbage records after a zero block.
|
||||
*/
|
||||
|
||||
#region Constants
|
||||
/// <summary>
|
||||
/// The size of a block in a tar archive in bytes.
|
||||
/// </summary>
|
||||
/// <remarks>This is 512 bytes.</remarks>
|
||||
public const int BlockSize = 512;
|
||||
|
||||
/// <summary>
|
||||
/// The number of blocks in a default record.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The default value is 20 blocks per record.
|
||||
/// </remarks>
|
||||
public const int DefaultBlockFactor = 20;
|
||||
|
||||
/// <summary>
|
||||
/// The size in bytes of a default record.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The default size is 10KB.
|
||||
/// </remarks>
|
||||
public const int DefaultRecordSize = BlockSize * DefaultBlockFactor;
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size for this buffer
|
||||
/// </summary>
|
||||
/// <value>The record size in bytes.
|
||||
/// This is equal to the <see cref="BlockFactor"/> multiplied by the <see cref="BlockSize"/></value>
|
||||
public int RecordSize => recordSize;
|
||||
|
||||
/// <summary>
|
||||
/// Get the TAR Buffer's record size.
|
||||
/// </summary>
|
||||
/// <returns>The record size in bytes.
|
||||
/// This is equal to the <see cref="BlockFactor"/> multiplied by the <see cref="BlockSize"/></returns>
|
||||
[Obsolete("Use RecordSize property instead")]
|
||||
public int GetRecordSize()
|
||||
{
|
||||
return recordSize;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the Blocking factor for the buffer
|
||||
/// </summary>
|
||||
/// <value>This is the number of blocks in each record.</value>
|
||||
public int BlockFactor => blockFactor;
|
||||
|
||||
/// <summary>
|
||||
/// Get the TAR Buffer's block factor
|
||||
/// </summary>
|
||||
/// <returns>The block factor; the number of blocks per record.</returns>
|
||||
[Obsolete("Use BlockFactor property instead")]
|
||||
public int GetBlockFactor()
|
||||
{
|
||||
return blockFactor;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct a default TarBuffer
|
||||
/// </summary>
|
||||
protected TarBuffer()
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Create TarBuffer for reading with default BlockFactor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">Stream to buffer</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for input.</returns>
|
||||
public static TarBuffer CreateInputTarBuffer(Stream inputStream)
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new ArgumentNullException(nameof(inputStream));
|
||||
}
|
||||
|
||||
return CreateInputTarBuffer(inputStream, DefaultBlockFactor);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarBuffer for reading inputStream setting BlockFactor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">Stream to buffer</param>
|
||||
/// <param name="blockFactor">Blocking factor to apply</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for input.</returns>
|
||||
public static TarBuffer CreateInputTarBuffer(Stream inputStream, int blockFactor)
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new ArgumentNullException(nameof(inputStream));
|
||||
}
|
||||
|
||||
if (blockFactor <= 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(blockFactor), "Factor cannot be negative");
|
||||
}
|
||||
|
||||
var tarBuffer = new TarBuffer
|
||||
{
|
||||
inputStream = inputStream,
|
||||
outputStream = null
|
||||
};
|
||||
tarBuffer.Initialize(blockFactor);
|
||||
|
||||
return tarBuffer;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarBuffer for writing with default BlockFactor
|
||||
/// </summary>
|
||||
/// <param name="outputStream">output stream for buffer</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for output.</returns>
|
||||
public static TarBuffer CreateOutputTarBuffer(Stream outputStream)
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new ArgumentNullException(nameof(outputStream));
|
||||
}
|
||||
|
||||
return CreateOutputTarBuffer(outputStream, DefaultBlockFactor);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarBuffer for writing Tar output to streams.
|
||||
/// </summary>
|
||||
/// <param name="outputStream">Output stream to write to.</param>
|
||||
/// <param name="blockFactor">Blocking factor to apply</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for output.</returns>
|
||||
public static TarBuffer CreateOutputTarBuffer(Stream outputStream, int blockFactor)
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new ArgumentNullException(nameof(outputStream));
|
||||
}
|
||||
|
||||
if (blockFactor <= 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(blockFactor), "Factor cannot be negative");
|
||||
}
|
||||
|
||||
var tarBuffer = new TarBuffer();
|
||||
tarBuffer.inputStream = null;
|
||||
tarBuffer.outputStream = outputStream;
|
||||
tarBuffer.Initialize(blockFactor);
|
||||
|
||||
return tarBuffer;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Initialization common to all constructors.
|
||||
/// </summary>
|
||||
void Initialize(int archiveBlockFactor)
|
||||
{
|
||||
blockFactor = archiveBlockFactor;
|
||||
recordSize = archiveBlockFactor * BlockSize;
|
||||
recordBuffer = new byte[RecordSize];
|
||||
|
||||
if (inputStream != null) {
|
||||
currentRecordIndex = -1;
|
||||
currentBlockIndex = BlockFactor;
|
||||
} else {
|
||||
currentRecordIndex = 0;
|
||||
currentBlockIndex = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Determine if an archive block indicates End of Archive. End of
|
||||
/// archive is indicated by a block that consists entirely of null bytes.
|
||||
/// All remaining blocks for the record should also be null's
|
||||
/// However some older tars only do a couple of null blocks (Old GNU tar for one)
|
||||
/// and also partial records
|
||||
/// </summary>
|
||||
/// <param name = "block">The data block to check.</param>
|
||||
/// <returns>Returns true if the block is an EOF block; false otherwise.</returns>
|
||||
[Obsolete("Use IsEndOfArchiveBlock instead")]
|
||||
public bool IsEOFBlock(byte[] block)
|
||||
{
|
||||
if (block == null) {
|
||||
throw new ArgumentNullException(nameof(block));
|
||||
}
|
||||
|
||||
if (block.Length != BlockSize) {
|
||||
throw new ArgumentException("block length is invalid");
|
||||
}
|
||||
|
||||
for (int i = 0; i < BlockSize; ++i) {
|
||||
if (block[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Determine if an archive block indicates the End of an Archive has been reached.
|
||||
/// End of archive is indicated by a block that consists entirely of null bytes.
|
||||
/// All remaining blocks for the record should also be null's
|
||||
/// However some older tars only do a couple of null blocks (Old GNU tar for one)
|
||||
/// and also partial records
|
||||
/// </summary>
|
||||
/// <param name = "block">The data block to check.</param>
|
||||
/// <returns>Returns true if the block is an EOF block; false otherwise.</returns>
|
||||
public static bool IsEndOfArchiveBlock(byte[] block)
|
||||
{
|
||||
if (block == null) {
|
||||
throw new ArgumentNullException(nameof(block));
|
||||
}
|
||||
|
||||
if (block.Length != BlockSize) {
|
||||
throw new ArgumentException("block length is invalid");
|
||||
}
|
||||
|
||||
for (int i = 0; i < BlockSize; ++i) {
|
||||
if (block[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Skip over a block on the input stream.
|
||||
/// </summary>
|
||||
public void SkipBlock()
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new TarException("no input stream defined");
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
if (!ReadRecord()) {
|
||||
throw new TarException("Failed to read a record");
|
||||
}
|
||||
}
|
||||
|
||||
currentBlockIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read a block from the input stream.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The block of data read.
|
||||
/// </returns>
|
||||
public byte[] ReadBlock()
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new TarException("TarBuffer.ReadBlock - no input stream defined");
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
if (!ReadRecord()) {
|
||||
throw new TarException("Failed to read a record");
|
||||
}
|
||||
}
|
||||
|
||||
byte[] result = new byte[BlockSize];
|
||||
|
||||
Array.Copy(recordBuffer, (currentBlockIndex * BlockSize), result, 0, BlockSize);
|
||||
currentBlockIndex++;
|
||||
return result;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read a record from data stream.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// false if End-Of-File, else true.
|
||||
/// </returns>
|
||||
bool ReadRecord()
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new TarException("no input stream stream defined");
|
||||
}
|
||||
|
||||
currentBlockIndex = 0;
|
||||
|
||||
int offset = 0;
|
||||
int bytesNeeded = RecordSize;
|
||||
|
||||
while (bytesNeeded > 0) {
|
||||
long numBytes = inputStream.Read(recordBuffer, offset, bytesNeeded);
|
||||
|
||||
//
|
||||
// NOTE
|
||||
// We have found EOF, and the record is not full!
|
||||
//
|
||||
// This is a broken archive. It does not follow the standard
|
||||
// blocking algorithm. However, because we are generous, and
|
||||
// it requires little effort, we will simply ignore the error
|
||||
// and continue as if the entire record were read. This does
|
||||
// not appear to break anything upstream. We used to return
|
||||
// false in this case.
|
||||
//
|
||||
// Thanks to 'Yohann.Roussel@alcatel.fr' for this fix.
|
||||
//
|
||||
if (numBytes <= 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
offset += (int)numBytes;
|
||||
bytesNeeded -= (int)numBytes;
|
||||
}
|
||||
|
||||
currentRecordIndex++;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the current block number, within the current record, zero based.
|
||||
/// </summary>
|
||||
/// <remarks>Block numbers are zero based values</remarks>
|
||||
/// <seealso cref="RecordSize"/>
|
||||
public int CurrentBlock => currentBlockIndex;
|
||||
|
||||
/// <summary>
|
||||
/// Get/set flag indicating ownership of the underlying stream.
|
||||
/// When the flag is true <see cref="Close"></see> will close the underlying stream also.
|
||||
/// </summary>
|
||||
public bool IsStreamOwner {
|
||||
get => isStreamOwner_;
|
||||
set => isStreamOwner_ = value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the current block number, within the current record, zero based.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The current zero based block number.
|
||||
/// </returns>
|
||||
/// <remarks>
|
||||
/// The absolute block number = (<see cref="GetCurrentRecordNum">record number</see> * <see cref="BlockFactor">block factor</see>) + <see cref="GetCurrentBlockNum">block number</see>.
|
||||
/// </remarks>
|
||||
[Obsolete("Use CurrentBlock property instead")]
|
||||
public int GetCurrentBlockNum()
|
||||
{
|
||||
return currentBlockIndex;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the current record number.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The current zero based record number.
|
||||
/// </returns>
|
||||
public int CurrentRecord => currentRecordIndex;
|
||||
|
||||
/// <summary>
|
||||
/// Get the current record number.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The current zero based record number.
|
||||
/// </returns>
|
||||
[Obsolete("Use CurrentRecord property instead")]
|
||||
public int GetCurrentRecordNum()
|
||||
{
|
||||
return currentRecordIndex;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write a block of data to the archive.
|
||||
/// </summary>
|
||||
/// <param name="block">
|
||||
/// The data to write to the archive.
|
||||
/// </param>
|
||||
public void WriteBlock(byte[] block)
|
||||
{
|
||||
if (block == null) {
|
||||
throw new ArgumentNullException(nameof(block));
|
||||
}
|
||||
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteBlock - no output stream defined");
|
||||
}
|
||||
|
||||
if (block.Length != BlockSize) {
|
||||
string errorText = string.Format("TarBuffer.WriteBlock - block to write has length '{0}' which is not the block size of '{1}'",
|
||||
block.Length, BlockSize);
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
WriteRecord();
|
||||
}
|
||||
|
||||
Array.Copy(block, 0, recordBuffer, (currentBlockIndex * BlockSize), BlockSize);
|
||||
currentBlockIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write an archive record to the archive, where the record may be
|
||||
/// inside of a larger array buffer. The buffer must be "offset plus
|
||||
/// record size" long.
|
||||
/// </summary>
|
||||
/// <param name="buffer">
|
||||
/// The buffer containing the record data to write.
|
||||
/// </param>
|
||||
/// <param name="offset">
|
||||
/// The offset of the record data within buffer.
|
||||
/// </param>
|
||||
public void WriteBlock(byte[] buffer, int offset)
|
||||
{
|
||||
if (buffer == null) {
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteBlock - no output stream stream defined");
|
||||
}
|
||||
|
||||
if ((offset < 0) || (offset >= buffer.Length)) {
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
|
||||
if ((offset + BlockSize) > buffer.Length) {
|
||||
string errorText = string.Format("TarBuffer.WriteBlock - record has length '{0}' with offset '{1}' which is less than the record size of '{2}'",
|
||||
buffer.Length, offset, recordSize);
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
WriteRecord();
|
||||
}
|
||||
|
||||
Array.Copy(buffer, offset, recordBuffer, (currentBlockIndex * BlockSize), BlockSize);
|
||||
|
||||
currentBlockIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write a TarBuffer record to the archive.
|
||||
/// </summary>
|
||||
void WriteRecord()
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteRecord no output stream defined");
|
||||
}
|
||||
|
||||
outputStream.Write(recordBuffer, 0, RecordSize);
|
||||
outputStream.Flush();
|
||||
|
||||
currentBlockIndex = 0;
|
||||
currentRecordIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// WriteFinalRecord writes the current record buffer to output any unwritten data is present.
|
||||
/// </summary>
|
||||
/// <remarks>Any trailing bytes are set to zero which is by definition correct behaviour
|
||||
/// for the end of a tar stream.</remarks>
|
||||
void WriteFinalRecord()
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteFinalRecord no output stream defined");
|
||||
}
|
||||
|
||||
if (currentBlockIndex > 0) {
|
||||
int dataBytes = currentBlockIndex * BlockSize;
|
||||
Array.Clear(recordBuffer, dataBytes, RecordSize - dataBytes);
|
||||
WriteRecord();
|
||||
}
|
||||
|
||||
outputStream.Flush();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Close the TarBuffer. If this is an output buffer, also flush the
|
||||
/// current block before closing.
|
||||
/// </summary>
|
||||
public void Close()
|
||||
{
|
||||
if (outputStream != null) {
|
||||
WriteFinalRecord();
|
||||
|
||||
if (isStreamOwner_) {
|
||||
outputStream.Dispose();
|
||||
}
|
||||
outputStream = null;
|
||||
} else if (inputStream != null) {
|
||||
if (isStreamOwner_) {
|
||||
inputStream.Dispose();
|
||||
}
|
||||
inputStream = null;
|
||||
}
|
||||
}
|
||||
|
||||
#region Instance Fields
|
||||
Stream inputStream;
|
||||
Stream outputStream;
|
||||
|
||||
byte[] recordBuffer;
|
||||
int currentBlockIndex;
|
||||
int currentRecordIndex;
|
||||
|
||||
int recordSize = DefaultRecordSize;
|
||||
int blockFactor = DefaultBlockFactor;
|
||||
bool isStreamOwner_ = true;
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,7 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
@@ -17,6 +15,8 @@ namespace SharpCompress.Common.Tar
|
||||
CompressionType = type;
|
||||
}
|
||||
|
||||
internal TarHeader Header => filePart.Header;
|
||||
|
||||
public override CompressionType CompressionType { get; }
|
||||
|
||||
public override long Crc => 0;
|
||||
@@ -27,7 +27,7 @@ namespace SharpCompress.Common.Tar
|
||||
|
||||
public override long Size => filePart.Header.Size;
|
||||
|
||||
public override DateTime? LastModifiedTime => filePart.Header.LastModifiedTime;
|
||||
public override DateTime? LastModifiedTime => filePart.Header.ModTime;
|
||||
|
||||
public override DateTime? CreatedTime => null;
|
||||
|
||||
@@ -37,26 +37,27 @@ namespace SharpCompress.Common.Tar
|
||||
|
||||
public override bool IsEncrypted => false;
|
||||
|
||||
public override bool IsDirectory => filePart.Header.EntryType == EntryType.Directory;
|
||||
public override bool IsDirectory => filePart.Header.TypeFlag == TarHeader.LF_DIR;
|
||||
|
||||
public override bool IsSplit => false;
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => filePart.AsEnumerable<FilePart>();
|
||||
|
||||
internal static IEnumerable<TarEntry> GetEntries(StreamingMode mode, Stream stream,
|
||||
CompressionType compressionType, ArchiveEncoding archiveEncoding)
|
||||
CompressionType compressionType)
|
||||
{
|
||||
foreach (TarHeader h in TarHeaderFactory.ReadHeader(mode, stream, archiveEncoding))
|
||||
using (var tarStream = new TarInputStream(stream))
|
||||
{
|
||||
if (h != null)
|
||||
TarHeader header = null;
|
||||
while ((header = tarStream.GetNextEntry()) != null)
|
||||
{
|
||||
if (mode == StreamingMode.Seekable)
|
||||
{
|
||||
yield return new TarEntry(new TarFilePart(h, stream), compressionType);
|
||||
yield return new TarEntry(new TarFilePart(header, stream), compressionType);
|
||||
}
|
||||
else
|
||||
{
|
||||
yield return new TarEntry(new TarFilePart(h, null), compressionType);
|
||||
yield return new TarEntry(new TarFilePart(header, null), compressionType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
19
src/SharpCompress/Common/Tar/TarException.cs
Normal file
19
src/SharpCompress/Common/Tar/TarException.cs
Normal file
@@ -0,0 +1,19 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// TarException represents exceptions specific to Tar classes and code.
|
||||
/// </summary>
|
||||
public class TarException : ArchiveException
|
||||
{
|
||||
/// <summary>
|
||||
/// Initialise a new instance of <see cref="TarException" /> with its message string.
|
||||
/// </summary>
|
||||
/// <param name="message">A <see cref="string"/> that describes the error.</param>
|
||||
public TarException(string message)
|
||||
: base(message)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,17 +1,15 @@
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal class TarFilePart : FilePart
|
||||
{
|
||||
private readonly Stream _seekableStream;
|
||||
private readonly Stream seekableStream;
|
||||
|
||||
internal TarFilePart(TarHeader header, Stream seekableStream)
|
||||
: base(header.ArchiveEncoding)
|
||||
{
|
||||
this._seekableStream = seekableStream;
|
||||
this.seekableStream = seekableStream;
|
||||
Header = header;
|
||||
}
|
||||
|
||||
@@ -21,10 +19,10 @@ namespace SharpCompress.Common.Tar
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (_seekableStream != null)
|
||||
if (seekableStream != null)
|
||||
{
|
||||
_seekableStream.Position = Header.DataStartPosition.Value;
|
||||
return new ReadOnlySubStream(_seekableStream, Header.Size);
|
||||
seekableStream.Position = Header.DataStartPosition.Value;
|
||||
return new ReadOnlySubStream(seekableStream, Header.Size);
|
||||
}
|
||||
return Header.PackedStream;
|
||||
}
|
||||
|
||||
1060
src/SharpCompress/Common/Tar/TarHeader.cs
Normal file
1060
src/SharpCompress/Common/Tar/TarHeader.cs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,64 +0,0 @@
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal static class TarHeaderFactory
|
||||
{
|
||||
internal static IEnumerable<TarHeader> ReadHeader(StreamingMode mode, Stream stream, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
TarHeader header = null;
|
||||
try
|
||||
{
|
||||
BinaryReader reader = new BinaryReader(stream);
|
||||
header = new TarHeader(archiveEncoding);
|
||||
|
||||
if (!header.Read(reader))
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
switch (mode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
header.DataStartPosition = reader.BaseStream.Position;
|
||||
|
||||
//skip to nearest 512
|
||||
reader.BaseStream.Position += PadTo512(header.Size);
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
header.PackedStream = new TarReadOnlySubStream(stream, header.Size);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
}
|
||||
catch
|
||||
{
|
||||
header = null;
|
||||
}
|
||||
yield return header;
|
||||
}
|
||||
}
|
||||
|
||||
private static long PadTo512(long size)
|
||||
{
|
||||
int zeros = (int)(size % 512);
|
||||
if (zeros == 0)
|
||||
{
|
||||
return size;
|
||||
}
|
||||
return 512 - zeros + size;
|
||||
}
|
||||
}
|
||||
}
|
||||
547
src/SharpCompress/Common/Tar/TarInputStream.cs
Normal file
547
src/SharpCompress/Common/Tar/TarInputStream.cs
Normal file
@@ -0,0 +1,547 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// The TarInputStream reads a UNIX tar archive as an InputStream.
|
||||
/// methods are provided to position at each successive entry in
|
||||
/// the archive, and the read each entry as a normal input stream
|
||||
/// using read().
|
||||
/// </summary>
|
||||
public class TarInputStream : Stream
|
||||
{
|
||||
#region Constructors
|
||||
|
||||
/// <summary>
|
||||
/// Construct a TarInputStream with default block factor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">stream to source data from</param>
|
||||
public TarInputStream(Stream inputStream)
|
||||
: this(inputStream, TarBuffer.DefaultBlockFactor)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct a TarInputStream with user specified block factor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">stream to source data from</param>
|
||||
/// <param name="blockFactor">block factor to apply to archive</param>
|
||||
public TarInputStream(Stream inputStream, int blockFactor)
|
||||
{
|
||||
this.inputStream = inputStream;
|
||||
tarBuffer = TarBuffer.CreateInputTarBuffer(inputStream, blockFactor);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
/// Get/set flag indicating ownership of the underlying stream.
|
||||
/// When the flag is true <see cref="Close"></see> will close the underlying stream also.
|
||||
/// </summary>
|
||||
public bool IsStreamOwner { get => tarBuffer.IsStreamOwner; set => tarBuffer.IsStreamOwner = value; }
|
||||
|
||||
#region Stream Overrides
|
||||
|
||||
/// <summary>
|
||||
/// Gets a value indicating whether the current stream supports reading
|
||||
/// </summary>
|
||||
public override bool CanRead => inputStream.CanRead;
|
||||
|
||||
/// <summary>
|
||||
/// Gets a value indicating whether the current stream supports seeking
|
||||
/// This property always returns false.
|
||||
/// </summary>
|
||||
public override bool CanSeek => false;
|
||||
|
||||
/// <summary>
|
||||
/// Gets a value indicating if the stream supports writing.
|
||||
/// This property always returns false.
|
||||
/// </summary>
|
||||
public override bool CanWrite => false;
|
||||
|
||||
/// <summary>
|
||||
/// The length in bytes of the stream
|
||||
/// </summary>
|
||||
public override long Length => inputStream.Length;
|
||||
|
||||
/// <summary>
|
||||
/// Gets or sets the position within the stream.
|
||||
/// Setting the Position is not supported and throws a NotSupportedExceptionNotSupportedException
|
||||
/// </summary>
|
||||
/// <exception cref="NotSupportedException">Any attempt to set position</exception>
|
||||
public override long Position { get => inputStream.Position; set => throw new NotSupportedException("TarInputStream Seek not supported"); }
|
||||
|
||||
/// <summary>
|
||||
/// Flushes the baseInputStream
|
||||
/// </summary>
|
||||
public override void Flush()
|
||||
{
|
||||
inputStream.Flush();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Set the streams position. This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="offset">The offset relative to the origin to seek to.</param>
|
||||
/// <param name="origin">The <see cref="SeekOrigin"/> to start seeking from.</param>
|
||||
/// <returns>The new position in the stream.</returns>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream Seek not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the length of the stream
|
||||
/// This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="value">The new stream length.</param>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream SetLength not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes a block of bytes to this stream using data from a buffer.
|
||||
/// This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="buffer">The buffer containing bytes to write.</param>
|
||||
/// <param name="offset">The offset in the buffer of the frist byte to write.</param>
|
||||
/// <param name="count">The number of bytes to write.</param>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream Write not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes a byte to the current position in the file stream.
|
||||
/// This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="value">The byte value to write.</param>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override void WriteByte(byte value)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream WriteByte not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads a byte from the current tar archive entry.
|
||||
/// </summary>
|
||||
/// <returns>A byte cast to an int; -1 if the at the end of the stream.</returns>
|
||||
public override int ReadByte()
|
||||
{
|
||||
byte[] oneByteBuffer = new byte[1];
|
||||
int num = Read(oneByteBuffer, 0, 1);
|
||||
if (num <= 0)
|
||||
{
|
||||
// return -1 to indicate that no byte was read.
|
||||
return -1;
|
||||
}
|
||||
return oneByteBuffer[0];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads bytes from the current tar archive entry.
|
||||
///
|
||||
/// This method is aware of the boundaries of the current
|
||||
/// entry in the archive and will deal with them appropriately
|
||||
/// </summary>
|
||||
/// <param name="buffer">
|
||||
/// The buffer into which to place bytes read.
|
||||
/// </param>
|
||||
/// <param name="offset">
|
||||
/// The offset at which to place bytes read.
|
||||
/// </param>
|
||||
/// <param name="count">
|
||||
/// The number of bytes to read.
|
||||
/// </param>
|
||||
/// <returns>
|
||||
/// The number of bytes read, or 0 at end of stream/EOF.
|
||||
/// </returns>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
int totalRead = 0;
|
||||
|
||||
if (entryOffset >= entrySize)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
long numToRead = count;
|
||||
|
||||
if ((numToRead + entryOffset) > entrySize)
|
||||
{
|
||||
numToRead = entrySize - entryOffset;
|
||||
}
|
||||
|
||||
if (readBuffer != null)
|
||||
{
|
||||
int sz = (numToRead > readBuffer.Length) ? readBuffer.Length : (int)numToRead;
|
||||
|
||||
Array.Copy(readBuffer, 0, buffer, offset, sz);
|
||||
|
||||
if (sz >= readBuffer.Length)
|
||||
{
|
||||
readBuffer = null;
|
||||
}
|
||||
else
|
||||
{
|
||||
int newLen = readBuffer.Length - sz;
|
||||
byte[] newBuf = new byte[newLen];
|
||||
Array.Copy(readBuffer, sz, newBuf, 0, newLen);
|
||||
readBuffer = newBuf;
|
||||
}
|
||||
|
||||
totalRead += sz;
|
||||
numToRead -= sz;
|
||||
offset += sz;
|
||||
}
|
||||
|
||||
while (numToRead > 0)
|
||||
{
|
||||
byte[] rec = tarBuffer.ReadBlock();
|
||||
if (rec == null)
|
||||
{
|
||||
// Unexpected EOF!
|
||||
throw new TarException("unexpected EOF with " + numToRead + " bytes unread");
|
||||
}
|
||||
|
||||
var sz = (int)numToRead;
|
||||
int recLen = rec.Length;
|
||||
|
||||
if (recLen > sz)
|
||||
{
|
||||
Array.Copy(rec, 0, buffer, offset, sz);
|
||||
readBuffer = new byte[recLen - sz];
|
||||
Array.Copy(rec, sz, readBuffer, 0, recLen - sz);
|
||||
}
|
||||
else
|
||||
{
|
||||
sz = recLen;
|
||||
Array.Copy(rec, 0, buffer, offset, recLen);
|
||||
}
|
||||
|
||||
totalRead += sz;
|
||||
numToRead -= sz;
|
||||
offset += sz;
|
||||
}
|
||||
|
||||
entryOffset += totalRead;
|
||||
|
||||
return totalRead;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Closes this stream. Calls the TarBuffer's close() method.
|
||||
/// The underlying stream is closed by the TarBuffer.
|
||||
/// </summary>
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (disposing)
|
||||
{
|
||||
tarBuffer.Close();
|
||||
}
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
public int RecordSize => tarBuffer.RecordSize;
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// TarBuffer record size.
|
||||
/// </returns>
|
||||
[Obsolete("Use RecordSize property instead")]
|
||||
public int GetRecordSize()
|
||||
{
|
||||
return tarBuffer.RecordSize;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the available data that can be read from the current
|
||||
/// entry in the archive. This does not indicate how much data
|
||||
/// is left in the entire archive, only in the current entry.
|
||||
/// This value is determined from the entry's size header field
|
||||
/// and the amount of data already read from the current entry.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The number of available bytes for the current entry.
|
||||
/// </returns>
|
||||
public long Available => entrySize - entryOffset;
|
||||
|
||||
/// <summary>
|
||||
/// Skip bytes in the input buffer. This skips bytes in the
|
||||
/// current entry's data, not the entire archive, and will
|
||||
/// stop at the end of the current entry's data if the number
|
||||
/// to skip extends beyond that point.
|
||||
/// </summary>
|
||||
/// <param name="skipCount">
|
||||
/// The number of bytes to skip.
|
||||
/// </param>
|
||||
public void Skip(long skipCount)
|
||||
{
|
||||
// TODO: REVIEW efficiency of TarInputStream.Skip
|
||||
// This is horribly inefficient, but it ensures that we
|
||||
// properly skip over bytes via the TarBuffer...
|
||||
//
|
||||
byte[] skipBuf = new byte[8 * 1024];
|
||||
|
||||
for (long num = skipCount; num > 0;)
|
||||
{
|
||||
int toRead = num > skipBuf.Length ? skipBuf.Length : (int)num;
|
||||
int numRead = Read(skipBuf, 0, toRead);
|
||||
|
||||
if (numRead == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
num -= numRead;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Return a value of true if marking is supported; false otherwise.
|
||||
/// </summary>
|
||||
/// <remarks>Currently marking is not supported, the return value is always false.</remarks>
|
||||
public bool IsMarkSupported => false;
|
||||
|
||||
/// <summary>
|
||||
/// Since we do not support marking just yet, we do nothing.
|
||||
/// </summary>
|
||||
/// <param name ="markLimit">
|
||||
/// The limit to mark.
|
||||
/// </param>
|
||||
public void Mark(int markLimit)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Since we do not support marking just yet, we do nothing.
|
||||
/// </summary>
|
||||
public void Reset()
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the next entry in this tar archive. This will skip
|
||||
/// over any remaining data in the current entry, if there
|
||||
/// is one, and place the input stream at the header of the
|
||||
/// next entry, and read the header and instantiate a new
|
||||
/// TarEntry from the header bytes and return that entry.
|
||||
/// If there are no more entries in the archive, null will
|
||||
/// be returned to indicate that the end of the archive has
|
||||
/// been reached.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The next TarEntry in the archive, or null.
|
||||
/// </returns>
|
||||
public TarHeader GetNextEntry()
|
||||
{
|
||||
if (hasHitEOF)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
if (currentEntry != null)
|
||||
{
|
||||
SkipToNextEntry();
|
||||
}
|
||||
|
||||
byte[] headerBuf = tarBuffer.ReadBlock();
|
||||
|
||||
if (headerBuf == null)
|
||||
{
|
||||
hasHitEOF = true;
|
||||
}
|
||||
else
|
||||
hasHitEOF |= TarBuffer.IsEndOfArchiveBlock(headerBuf);
|
||||
|
||||
if (hasHitEOF)
|
||||
{
|
||||
currentEntry = null;
|
||||
}
|
||||
else
|
||||
{
|
||||
try
|
||||
{
|
||||
var header = new TarHeader();
|
||||
header.ParseBuffer(headerBuf);
|
||||
if (!header.IsChecksumValid)
|
||||
{
|
||||
throw new TarException("Header checksum is invalid");
|
||||
}
|
||||
this.entryOffset = 0;
|
||||
this.entrySize = header.Size;
|
||||
|
||||
StringBuilder longName = null;
|
||||
|
||||
if (header.TypeFlag == TarHeader.LF_GNU_LONGNAME)
|
||||
{
|
||||
byte[] nameBuffer = new byte[TarBuffer.BlockSize];
|
||||
long numToRead = this.entrySize;
|
||||
|
||||
longName = new StringBuilder();
|
||||
|
||||
while (numToRead > 0)
|
||||
{
|
||||
int numRead = this.Read(nameBuffer, 0, (numToRead > nameBuffer.Length ? nameBuffer.Length : (int)numToRead));
|
||||
|
||||
if (numRead == -1)
|
||||
{
|
||||
throw new TarException("Failed to read long name entry");
|
||||
}
|
||||
|
||||
longName.Append(TarHeader.ParseName(nameBuffer, 0, numRead).ToString());
|
||||
numToRead -= numRead;
|
||||
}
|
||||
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag == TarHeader.LF_GHDR)
|
||||
{
|
||||
// POSIX global extended header
|
||||
// Ignore things we dont understand completely for now
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag == TarHeader.LF_XHDR)
|
||||
{
|
||||
// POSIX extended header
|
||||
// Ignore things we dont understand completely for now
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag == TarHeader.LF_GNU_VOLHDR)
|
||||
{
|
||||
// TODO: could show volume name when verbose
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag != TarHeader.LF_NORMAL &&
|
||||
header.TypeFlag != TarHeader.LF_OLDNORM &&
|
||||
header.TypeFlag != TarHeader.LF_LINK &&
|
||||
header.TypeFlag != TarHeader.LF_SYMLINK &&
|
||||
header.TypeFlag != TarHeader.LF_DIR)
|
||||
{
|
||||
// Ignore things we dont understand completely for now
|
||||
SkipToNextEntry();
|
||||
headerBuf = tarBuffer.ReadBlock();
|
||||
}
|
||||
currentEntry = new TarHeader();
|
||||
|
||||
if (longName != null)
|
||||
{
|
||||
currentEntry.Name = longName.ToString();
|
||||
}
|
||||
|
||||
// Magic was checked here for 'ustar' but there are multiple valid possibilities
|
||||
// so this is not done anymore.
|
||||
|
||||
entryOffset = 0;
|
||||
|
||||
// TODO: Review How do we resolve this discrepancy?!
|
||||
entrySize = this.currentEntry.Size;
|
||||
}
|
||||
catch (TarException ex)
|
||||
{
|
||||
entrySize = 0;
|
||||
entryOffset = 0;
|
||||
currentEntry = null;
|
||||
string errorText = $"Bad header in record {tarBuffer.CurrentRecord} block {tarBuffer.CurrentBlock} {ex.Message}";
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
}
|
||||
return currentEntry;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Copies the contents of the current tar archive entry directly into
|
||||
/// an output stream.
|
||||
/// </summary>
|
||||
/// <param name="outputStream">
|
||||
/// The OutputStream into which to write the entry's data.
|
||||
/// </param>
|
||||
public void CopyEntryContents(Stream outputStream)
|
||||
{
|
||||
byte[] tempBuffer = new byte[32 * 1024];
|
||||
|
||||
while (true)
|
||||
{
|
||||
int numRead = Read(tempBuffer, 0, tempBuffer.Length);
|
||||
if (numRead <= 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
outputStream.Write(tempBuffer, 0, numRead);
|
||||
}
|
||||
}
|
||||
|
||||
private void SkipToNextEntry()
|
||||
{
|
||||
long numToSkip = entrySize - entryOffset;
|
||||
|
||||
if (numToSkip > 0)
|
||||
{
|
||||
Skip(numToSkip);
|
||||
}
|
||||
|
||||
readBuffer = null;
|
||||
}
|
||||
|
||||
#region Instance Fields
|
||||
|
||||
/// <summary>
|
||||
/// Flag set when last block has been read
|
||||
/// </summary>
|
||||
protected bool hasHitEOF;
|
||||
|
||||
/// <summary>
|
||||
/// Size of this entry as recorded in header
|
||||
/// </summary>
|
||||
protected long entrySize;
|
||||
|
||||
/// <summary>
|
||||
/// Number of bytes read for this entry so far
|
||||
/// </summary>
|
||||
protected long entryOffset;
|
||||
|
||||
/// <summary>
|
||||
/// Buffer used with calls to <code>Read()</code>
|
||||
/// </summary>
|
||||
protected byte[] readBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// Working buffer
|
||||
/// </summary>
|
||||
protected TarBuffer tarBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// Current entry being read
|
||||
/// </summary>
|
||||
private TarHeader currentEntry;
|
||||
|
||||
/// <summary>
|
||||
/// Stream used as the source of input data.
|
||||
/// </summary>
|
||||
private readonly Stream inputStream;
|
||||
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
417
src/SharpCompress/Common/Tar/TarOutputStream.cs
Normal file
417
src/SharpCompress/Common/Tar/TarOutputStream.cs
Normal file
@@ -0,0 +1,417 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// The TarOutputStream writes a UNIX tar archive as an OutputStream.
|
||||
/// Methods are provided to put entries, and then write their contents
|
||||
/// by writing to this stream using write().
|
||||
/// </summary>
|
||||
/// public
|
||||
public class TarOutputStream : Stream
|
||||
{
|
||||
#region Constructors
|
||||
/// <summary>
|
||||
/// Construct TarOutputStream using default block factor
|
||||
/// </summary>
|
||||
/// <param name="outputStream">stream to write to</param>
|
||||
public TarOutputStream(Stream outputStream)
|
||||
: this(outputStream, TarBuffer.DefaultBlockFactor)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarOutputStream with user specified block factor
|
||||
/// </summary>
|
||||
/// <param name="outputStream">stream to write to</param>
|
||||
/// <param name="blockFactor">blocking factor</param>
|
||||
public TarOutputStream(Stream outputStream, int blockFactor)
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new ArgumentNullException(nameof(outputStream));
|
||||
}
|
||||
|
||||
this.outputStream = outputStream;
|
||||
buffer = TarBuffer.CreateOutputTarBuffer(outputStream, blockFactor);
|
||||
|
||||
assemblyBuffer = new byte[TarBuffer.BlockSize];
|
||||
blockBuffer = new byte[TarBuffer.BlockSize];
|
||||
}
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
/// Get/set flag indicating ownership of the underlying stream.
|
||||
/// When the flag is true <see cref="Close"></see> will close the underlying stream also.
|
||||
/// </summary>
|
||||
public bool IsStreamOwner {
|
||||
get => buffer.IsStreamOwner;
|
||||
set => buffer.IsStreamOwner = value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// true if the stream supports reading; otherwise, false.
|
||||
/// </summary>
|
||||
public override bool CanRead => outputStream.CanRead;
|
||||
|
||||
/// <summary>
|
||||
/// true if the stream supports seeking; otherwise, false.
|
||||
/// </summary>
|
||||
public override bool CanSeek => outputStream.CanSeek;
|
||||
|
||||
/// <summary>
|
||||
/// true if stream supports writing; otherwise, false.
|
||||
/// </summary>
|
||||
public override bool CanWrite => outputStream.CanWrite;
|
||||
|
||||
/// <summary>
|
||||
/// length of stream in bytes
|
||||
/// </summary>
|
||||
public override long Length => outputStream.Length;
|
||||
|
||||
/// <summary>
|
||||
/// gets or sets the position within the current stream.
|
||||
/// </summary>
|
||||
public override long Position {
|
||||
get => outputStream.Position;
|
||||
set => outputStream.Position = value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// set the position within the current stream
|
||||
/// </summary>
|
||||
/// <param name="offset">The offset relative to the <paramref name="origin"/> to seek to</param>
|
||||
/// <param name="origin">The <see cref="SeekOrigin"/> to seek from.</param>
|
||||
/// <returns>The new position in the stream.</returns>
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
return outputStream.Seek(offset, origin);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Set the length of the current stream
|
||||
/// </summary>
|
||||
/// <param name="value">The new stream length.</param>
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
outputStream.SetLength(value);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read a byte from the stream and advance the position within the stream
|
||||
/// by one byte or returns -1 if at the end of the stream.
|
||||
/// </summary>
|
||||
/// <returns>The byte value or -1 if at end of stream</returns>
|
||||
public override int ReadByte()
|
||||
{
|
||||
return outputStream.ReadByte();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// read bytes from the current stream and advance the position within the
|
||||
/// stream by the number of bytes read.
|
||||
/// </summary>
|
||||
/// <param name="buffer">The buffer to store read bytes in.</param>
|
||||
/// <param name="offset">The index into the buffer to being storing bytes at.</param>
|
||||
/// <param name="count">The desired number of bytes to read.</param>
|
||||
/// <returns>The total number of bytes read, or zero if at the end of the stream.
|
||||
/// The number of bytes may be less than the <paramref name="count">count</paramref>
|
||||
/// requested if data is not avialable.</returns>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
return outputStream.Read(buffer, offset, count);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// All buffered data is written to destination
|
||||
/// </summary>
|
||||
public override void Flush()
|
||||
{
|
||||
outputStream.Flush();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ends the TAR archive without closing the underlying OutputStream.
|
||||
/// The result is that the EOF block of nulls is written.
|
||||
/// </summary>
|
||||
public void Finish()
|
||||
{
|
||||
if (IsEntryOpen) {
|
||||
CloseEntry();
|
||||
}
|
||||
WriteEofBlock();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ends the TAR archive and closes the underlying OutputStream.
|
||||
/// </summary>
|
||||
/// <remarks>This means that Finish() is called followed by calling the
|
||||
/// TarBuffer's Close().</remarks>
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (!isClosed) {
|
||||
isClosed = true;
|
||||
Finish();
|
||||
buffer.Close();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
public int RecordSize => buffer.RecordSize;
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The TarBuffer record size.
|
||||
/// </returns>
|
||||
[Obsolete("Use RecordSize property instead")]
|
||||
public int GetRecordSize()
|
||||
{
|
||||
return buffer.RecordSize;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get a value indicating wether an entry is open, requiring more data to be written.
|
||||
/// </summary>
|
||||
bool IsEntryOpen => (currBytes < currSize);
|
||||
|
||||
/// <summary>
|
||||
/// Put an entry on the output stream. This writes the entry's
|
||||
/// header and positions the output stream for writing
|
||||
/// the contents of the entry. Once this method is called, the
|
||||
/// stream is ready for calls to write() to write the entry's
|
||||
/// contents. Once the contents are written, closeEntry()
|
||||
/// <B>MUST</B> be called to ensure that all buffered data
|
||||
/// is completely written to the output stream.
|
||||
/// </summary>
|
||||
/// <param name="entry">
|
||||
/// The TarEntry to be written to the archive.
|
||||
/// </param>
|
||||
public void PutNextEntry(TarEntry entry)
|
||||
{
|
||||
if (entry == null) {
|
||||
throw new ArgumentNullException(nameof(entry));
|
||||
}
|
||||
|
||||
if (entry.TarHeader.Name.Length > TarHeader.NAMELEN) {
|
||||
var longHeader = new TarHeader();
|
||||
longHeader.TypeFlag = TarHeader.LF_GNU_LONGNAME;
|
||||
longHeader.Name = longHeader.Name + "././@LongLink";
|
||||
longHeader.Mode = 420;//644 by default
|
||||
longHeader.UserId = entry.UserId;
|
||||
longHeader.GroupId = entry.GroupId;
|
||||
longHeader.GroupName = entry.GroupName;
|
||||
longHeader.UserName = entry.UserName;
|
||||
longHeader.LinkName = "";
|
||||
longHeader.Size = entry.TarHeader.Name.Length + 1; // Plus one to avoid dropping last char
|
||||
|
||||
longHeader.WriteHeader(blockBuffer);
|
||||
buffer.WriteBlock(blockBuffer); // Add special long filename header block
|
||||
|
||||
int nameCharIndex = 0;
|
||||
|
||||
while (nameCharIndex < entry.TarHeader.Name.Length + 1 /* we've allocated one for the null char, now we must make sure it gets written out */) {
|
||||
Array.Clear(blockBuffer, 0, blockBuffer.Length);
|
||||
TarHeader.GetAsciiBytes(entry.TarHeader.Name, nameCharIndex, this.blockBuffer, 0, TarBuffer.BlockSize); // This func handles OK the extra char out of string length
|
||||
nameCharIndex += TarBuffer.BlockSize;
|
||||
buffer.WriteBlock(blockBuffer);
|
||||
}
|
||||
}
|
||||
|
||||
entry.WriteEntryHeader(blockBuffer);
|
||||
buffer.WriteBlock(blockBuffer);
|
||||
|
||||
currBytes = 0;
|
||||
|
||||
currSize = entry.IsDirectory ? 0 : entry.Size;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Close an entry. This method MUST be called for all file
|
||||
/// entries that contain data. The reason is that we must
|
||||
/// buffer data written to the stream in order to satisfy
|
||||
/// the buffer's block based writes. Thus, there may be
|
||||
/// data fragments still being assembled that must be written
|
||||
/// to the output stream before this entry is closed and the
|
||||
/// next entry written.
|
||||
/// </summary>
|
||||
public void CloseEntry()
|
||||
{
|
||||
if (assemblyBufferLength > 0) {
|
||||
Array.Clear(assemblyBuffer, assemblyBufferLength, assemblyBuffer.Length - assemblyBufferLength);
|
||||
|
||||
buffer.WriteBlock(assemblyBuffer);
|
||||
|
||||
currBytes += assemblyBufferLength;
|
||||
assemblyBufferLength = 0;
|
||||
}
|
||||
|
||||
if (currBytes < currSize) {
|
||||
string errorText = string.Format(
|
||||
"Entry closed at '{0}' before the '{1}' bytes specified in the header were written",
|
||||
currBytes, currSize);
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes a byte to the current tar archive entry.
|
||||
/// This method simply calls Write(byte[], int, int).
|
||||
/// </summary>
|
||||
/// <param name="value">
|
||||
/// The byte to be written.
|
||||
/// </param>
|
||||
public override void WriteByte(byte value)
|
||||
{
|
||||
Write(new byte[] { value }, 0, 1);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes bytes to the current tar archive entry. This method
|
||||
/// is aware of the current entry and will throw an exception if
|
||||
/// you attempt to write bytes past the length specified for the
|
||||
/// current entry. The method is also (painfully) aware of the
|
||||
/// record buffering required by TarBuffer, and manages buffers
|
||||
/// that are not a multiple of recordsize in length, including
|
||||
/// assembling records from small buffers.
|
||||
/// </summary>
|
||||
/// <param name = "buffer">
|
||||
/// The buffer to write to the archive.
|
||||
/// </param>
|
||||
/// <param name = "offset">
|
||||
/// The offset in the buffer from which to get bytes.
|
||||
/// </param>
|
||||
/// <param name = "count">
|
||||
/// The number of bytes to write.
|
||||
/// </param>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (buffer == null) {
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
if (offset < 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(offset), "Cannot be negative");
|
||||
}
|
||||
|
||||
if (buffer.Length - offset < count) {
|
||||
throw new ArgumentException("offset and count combination is invalid");
|
||||
}
|
||||
|
||||
if (count < 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(count), "Cannot be negative");
|
||||
}
|
||||
|
||||
if ((currBytes + count) > currSize) {
|
||||
string errorText = string.Format("request to write '{0}' bytes exceeds size in header of '{1}' bytes",
|
||||
count, this.currSize);
|
||||
throw new ArgumentOutOfRangeException(nameof(count), errorText);
|
||||
}
|
||||
|
||||
//
|
||||
// We have to deal with assembly!!!
|
||||
// The programmer can be writing little 32 byte chunks for all
|
||||
// we know, and we must assemble complete blocks for writing.
|
||||
// TODO REVIEW Maybe this should be in TarBuffer? Could that help to
|
||||
// eliminate some of the buffer copying.
|
||||
//
|
||||
if (assemblyBufferLength > 0) {
|
||||
if ((assemblyBufferLength + count) >= blockBuffer.Length) {
|
||||
int aLen = blockBuffer.Length - assemblyBufferLength;
|
||||
|
||||
Array.Copy(assemblyBuffer, 0, blockBuffer, 0, assemblyBufferLength);
|
||||
Array.Copy(buffer, offset, blockBuffer, assemblyBufferLength, aLen);
|
||||
|
||||
this.buffer.WriteBlock(blockBuffer);
|
||||
|
||||
currBytes += blockBuffer.Length;
|
||||
|
||||
offset += aLen;
|
||||
count -= aLen;
|
||||
|
||||
assemblyBufferLength = 0;
|
||||
} else {
|
||||
Array.Copy(buffer, offset, assemblyBuffer, assemblyBufferLength, count);
|
||||
offset += count;
|
||||
assemblyBufferLength += count;
|
||||
count -= count;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// When we get here we have EITHER:
|
||||
// o An empty "assembly" buffer.
|
||||
// o No bytes to write (count == 0)
|
||||
//
|
||||
while (count > 0) {
|
||||
if (count < blockBuffer.Length) {
|
||||
Array.Copy(buffer, offset, assemblyBuffer, assemblyBufferLength, count);
|
||||
assemblyBufferLength += count;
|
||||
break;
|
||||
}
|
||||
|
||||
this.buffer.WriteBlock(buffer, offset);
|
||||
|
||||
int bufferLength = blockBuffer.Length;
|
||||
currBytes += bufferLength;
|
||||
count -= bufferLength;
|
||||
offset += bufferLength;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write an EOF (end of archive) block to the tar archive.
|
||||
/// An EOF block consists of all zeros.
|
||||
/// </summary>
|
||||
void WriteEofBlock()
|
||||
{
|
||||
Array.Clear(blockBuffer, 0, blockBuffer.Length);
|
||||
buffer.WriteBlock(blockBuffer);
|
||||
}
|
||||
|
||||
#region Instance Fields
|
||||
/// <summary>
|
||||
/// bytes written for this entry so far
|
||||
/// </summary>
|
||||
long currBytes;
|
||||
|
||||
/// <summary>
|
||||
/// current 'Assembly' buffer length
|
||||
/// </summary>
|
||||
int assemblyBufferLength;
|
||||
|
||||
/// <summary>
|
||||
/// Flag indicating wether this instance has been closed or not.
|
||||
/// </summary>
|
||||
bool isClosed;
|
||||
|
||||
/// <summary>
|
||||
/// Size for the current entry
|
||||
/// </summary>
|
||||
protected long currSize;
|
||||
|
||||
/// <summary>
|
||||
/// single block working buffer
|
||||
/// </summary>
|
||||
protected byte[] blockBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// 'Assembly' buffer used to assemble data before writing
|
||||
/// </summary>
|
||||
protected byte[] assemblyBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// TarBuffer used to provide correct blocking factor
|
||||
/// </summary>
|
||||
protected TarBuffer buffer;
|
||||
|
||||
/// <summary>
|
||||
/// the destination stream for the archive contents
|
||||
/// </summary>
|
||||
protected Stream outputStream;
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal class TarReadOnlySubStream : Stream
|
||||
{
|
||||
private bool isDisposed;
|
||||
private long amountRead;
|
||||
|
||||
public TarReadOnlySubStream(Stream stream, long bytesToRead)
|
||||
{
|
||||
Stream = stream;
|
||||
BytesLeftToRead = bytesToRead;
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
isDisposed = true;
|
||||
if (disposing)
|
||||
{
|
||||
long skipBytes = amountRead % 512;
|
||||
if (skipBytes == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
skipBytes = 512 - skipBytes;
|
||||
if (skipBytes == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
var buffer = new byte[skipBytes];
|
||||
Stream.ReadFully(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
private long BytesLeftToRead { get; set; }
|
||||
|
||||
public Stream Stream { get; }
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); }
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (BytesLeftToRead < count)
|
||||
{
|
||||
count = (int)BytesLeftToRead;
|
||||
}
|
||||
int read = Stream.Read(buffer, offset, count);
|
||||
if (read > 0)
|
||||
{
|
||||
BytesLeftToRead -= read;
|
||||
amountRead += read;
|
||||
}
|
||||
return read;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,8 +6,8 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal class DirectoryEntryHeader : ZipFileEntry
|
||||
{
|
||||
public DirectoryEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
: base(ZipHeaderType.DirectoryEntry, archiveEncoding)
|
||||
public DirectoryEntryHeader()
|
||||
: base(ZipHeaderType.DirectoryEntry)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -31,10 +31,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
RelativeOffsetOfEntryHeader = reader.ReadUInt32();
|
||||
|
||||
byte[] name = reader.ReadBytes(nameLength);
|
||||
Name = ArchiveEncoding.Decode(name);
|
||||
Name = DecodeString(name);
|
||||
byte[] extra = reader.ReadBytes(extraLength);
|
||||
byte[] comment = reader.ReadBytes(commentLength);
|
||||
Comment = ArchiveEncoding.Decode(comment);
|
||||
Comment = DecodeString(comment);
|
||||
LoadExtra(extra);
|
||||
|
||||
var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField);
|
||||
|
||||
@@ -5,7 +5,6 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
[Flags]
|
||||
internal enum HeaderFlags : ushort
|
||||
{
|
||||
None = 0,
|
||||
Encrypted = 1, // http://www.pkware.com/documents/casestudies/APPNOTE.TXT
|
||||
Bit1 = 2,
|
||||
Bit2 = 4,
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal class LocalEntryHeader : ZipFileEntry
|
||||
{
|
||||
public LocalEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
: base(ZipHeaderType.LocalEntry, archiveEncoding)
|
||||
public LocalEntryHeader()
|
||||
: base(ZipHeaderType.LocalEntry)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -25,7 +24,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
ushort extraLength = reader.ReadUInt16();
|
||||
byte[] name = reader.ReadBytes(nameLength);
|
||||
byte[] extra = reader.ReadBytes(extraLength);
|
||||
Name = ArchiveEncoding.Decode(name);
|
||||
Name = DecodeString(name);
|
||||
LoadExtra(extra);
|
||||
|
||||
var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField);
|
||||
|
||||
@@ -8,11 +8,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal abstract class ZipFileEntry : ZipHeader
|
||||
{
|
||||
protected ZipFileEntry(ZipHeaderType type, ArchiveEncoding archiveEncoding)
|
||||
protected ZipFileEntry(ZipHeaderType type)
|
||||
: base(type)
|
||||
{
|
||||
Extra = new List<ExtraData>();
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal bool IsDirectory
|
||||
@@ -30,10 +29,27 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
&& Name.EndsWith("\\");
|
||||
}
|
||||
}
|
||||
|
||||
internal Stream PackedStream { get; set; }
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
protected string DecodeString(byte[] str)
|
||||
{
|
||||
if (FlagUtility.HasFlag(Flags, HeaderFlags.UTF8))
|
||||
{
|
||||
return Encoding.UTF8.GetString(str, 0, str.Length);
|
||||
}
|
||||
|
||||
return ArchiveEncoding.Default.GetString(str, 0, str.Length);
|
||||
}
|
||||
|
||||
protected byte[] EncodeString(string str)
|
||||
{
|
||||
if (FlagUtility.HasFlag(Flags, HeaderFlags.UTF8))
|
||||
{
|
||||
return Encoding.UTF8.GetBytes(str);
|
||||
}
|
||||
return ArchiveEncoding.Default.GetBytes(str);
|
||||
}
|
||||
|
||||
internal Stream PackedStream { get; set; }
|
||||
|
||||
internal string Name { get; set; }
|
||||
|
||||
@@ -48,7 +64,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
internal long UncompressedSize { get; set; }
|
||||
|
||||
internal List<ExtraData> Extra { get; set; }
|
||||
|
||||
|
||||
public string Password { get; set; }
|
||||
|
||||
internal PkwareTraditionalEncryptionData ComposeEncryptionData(Stream archiveStream)
|
||||
@@ -59,10 +75,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
}
|
||||
|
||||
var buffer = new byte[12];
|
||||
archiveStream.ReadFully(buffer);
|
||||
archiveStream.Read(buffer, 0, 12);
|
||||
|
||||
PkwareTraditionalEncryptionData encryptionData = PkwareTraditionalEncryptionData.ForRead(Password, this, buffer);
|
||||
|
||||
|
||||
return encryptionData;
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
throw new ArgumentNullException("buffer");
|
||||
}
|
||||
|
||||
byte[] temp = new byte[count];
|
||||
|
||||
@@ -9,11 +9,9 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
private static readonly CRC32 crc32 = new CRC32();
|
||||
private readonly UInt32[] _Keys = {0x12345678, 0x23456789, 0x34567890};
|
||||
private readonly ArchiveEncoding _archiveEncoding;
|
||||
|
||||
private PkwareTraditionalEncryptionData(string password, ArchiveEncoding archiveEncoding)
|
||||
private PkwareTraditionalEncryptionData(string password)
|
||||
{
|
||||
_archiveEncoding = archiveEncoding;
|
||||
Initialize(password);
|
||||
}
|
||||
|
||||
@@ -29,7 +27,7 @@ namespace SharpCompress.Common.Zip
|
||||
public static PkwareTraditionalEncryptionData ForRead(string password, ZipFileEntry header,
|
||||
byte[] encryptionHeader)
|
||||
{
|
||||
var encryptor = new PkwareTraditionalEncryptionData(password, header.ArchiveEncoding);
|
||||
var encryptor = new PkwareTraditionalEncryptionData(password);
|
||||
byte[] plainTextHeader = encryptor.Decrypt(encryptionHeader, encryptionHeader.Length);
|
||||
if (plainTextHeader[11] != (byte)((header.Crc >> 24) & 0xff))
|
||||
{
|
||||
@@ -49,7 +47,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
if (length > cipherText.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(length),
|
||||
throw new ArgumentOutOfRangeException("length",
|
||||
"Bad length during Decryption: the length parameter must be smaller than or equal to the size of the destination array.");
|
||||
}
|
||||
|
||||
@@ -72,7 +70,7 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (length > plainText.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(length),
|
||||
throw new ArgumentOutOfRangeException("length",
|
||||
"Bad length during Encryption: The length parameter must be smaller than or equal to the size of the destination array.");
|
||||
}
|
||||
|
||||
@@ -95,12 +93,17 @@ namespace SharpCompress.Common.Zip
|
||||
}
|
||||
}
|
||||
|
||||
internal byte[] StringToByteArray(string value)
|
||||
internal static byte[] StringToByteArray(string value, Encoding encoding)
|
||||
{
|
||||
byte[] a = _archiveEncoding.Password.GetBytes(value);
|
||||
byte[] a = encoding.GetBytes(value);
|
||||
return a;
|
||||
}
|
||||
|
||||
internal static byte[] StringToByteArray(string value)
|
||||
{
|
||||
return StringToByteArray(value, ArchiveEncoding.Password);
|
||||
}
|
||||
|
||||
private void UpdateKeys(byte byteValue)
|
||||
{
|
||||
_Keys[0] = (UInt32)crc32.ComputeCrc32((int)_Keys[0], byteValue);
|
||||
|
||||
@@ -5,21 +5,21 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class SeekableZipFilePart : ZipFilePart
|
||||
{
|
||||
private bool _isLocalHeaderLoaded;
|
||||
private readonly SeekableZipHeaderFactory _headerFactory;
|
||||
private bool isLocalHeaderLoaded;
|
||||
private readonly SeekableZipHeaderFactory headerFactory;
|
||||
|
||||
internal SeekableZipFilePart(SeekableZipHeaderFactory headerFactory, DirectoryEntryHeader header, Stream stream)
|
||||
: base(header, stream)
|
||||
{
|
||||
this._headerFactory = headerFactory;
|
||||
this.headerFactory = headerFactory;
|
||||
}
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (!_isLocalHeaderLoaded)
|
||||
if (!isLocalHeaderLoaded)
|
||||
{
|
||||
LoadLocalHeader();
|
||||
_isLocalHeaderLoaded = true;
|
||||
isLocalHeaderLoaded = true;
|
||||
}
|
||||
return base.GetCompressedStream();
|
||||
}
|
||||
@@ -29,7 +29,7 @@ namespace SharpCompress.Common.Zip
|
||||
private void LoadLocalHeader()
|
||||
{
|
||||
bool hasData = Header.HasData;
|
||||
Header = _headerFactory.GetLocalHeader(BaseStream, Header as DirectoryEntryHeader);
|
||||
Header = headerFactory.GetLocalHeader(BaseStream, Header as DirectoryEntryHeader);
|
||||
Header.HasData = hasData;
|
||||
}
|
||||
|
||||
|
||||
@@ -3,17 +3,16 @@ using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class SeekableZipHeaderFactory : ZipHeaderFactory
|
||||
{
|
||||
private const int MAX_ITERATIONS_FOR_DIRECTORY_HEADER = 4096;
|
||||
private bool _zip64;
|
||||
private bool zip64;
|
||||
|
||||
internal SeekableZipHeaderFactory(string password, ArchiveEncoding archiveEncoding)
|
||||
: base(StreamingMode.Seekable, password, archiveEncoding)
|
||||
internal SeekableZipHeaderFactory(string password)
|
||||
: base(StreamingMode.Seekable, password)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -27,14 +26,14 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (entry.IsZip64)
|
||||
{
|
||||
_zip64 = true;
|
||||
zip64 = true;
|
||||
SeekBackToHeader(stream, reader, ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR);
|
||||
var zip64Locator = new Zip64DirectoryEndLocatorHeader();
|
||||
zip64Locator.Read(reader);
|
||||
|
||||
stream.Seek(zip64Locator.RelativeOffsetOfTheEndOfDirectoryRecord, SeekOrigin.Begin);
|
||||
uint zip64Signature = reader.ReadUInt32();
|
||||
if (zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
|
||||
if(zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
|
||||
throw new ArchiveException("Failed to locate the Zip64 Header");
|
||||
|
||||
var zip64Entry = new Zip64DirectoryEndHeader();
|
||||
@@ -51,7 +50,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
stream.Position = position;
|
||||
uint signature = reader.ReadUInt32();
|
||||
var directoryEntryHeader = ReadHeader(signature, reader, _zip64) as DirectoryEntryHeader;
|
||||
var directoryEntryHeader = ReadHeader(signature, reader, zip64) as DirectoryEntryHeader;
|
||||
position = stream.Position;
|
||||
if (directoryEntryHeader == null)
|
||||
{
|
||||
@@ -92,7 +91,7 @@ namespace SharpCompress.Common.Zip
|
||||
stream.Seek(directoryEntryHeader.RelativeOffsetOfEntryHeader, SeekOrigin.Begin);
|
||||
BinaryReader reader = new BinaryReader(stream);
|
||||
uint signature = reader.ReadUInt32();
|
||||
var localEntryHeader = ReadHeader(signature, reader, _zip64) as LocalEntryHeader;
|
||||
var localEntryHeader = ReadHeader(signature, reader, zip64) as LocalEntryHeader;
|
||||
if (localEntryHeader == null)
|
||||
{
|
||||
throw new InvalidOperationException();
|
||||
|
||||
@@ -39,20 +39,19 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return new BinaryReader(rewindableStream);
|
||||
}
|
||||
if (Header.HasData && !Skipped)
|
||||
if (Header.HasData)
|
||||
{
|
||||
if (decompressionStream == null)
|
||||
{
|
||||
decompressionStream = GetCompressedStream();
|
||||
}
|
||||
decompressionStream.Skip();
|
||||
decompressionStream.SkipAll();
|
||||
|
||||
DeflateStream deflateStream = decompressionStream as DeflateStream;
|
||||
if (deflateStream != null)
|
||||
{
|
||||
rewindableStream.Rewind(deflateStream.InputBuffer);
|
||||
}
|
||||
Skipped = true;
|
||||
}
|
||||
var reader = new BinaryReader(rewindableStream);
|
||||
decompressionStream = null;
|
||||
|
||||
@@ -2,14 +2,13 @@
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class StreamingZipHeaderFactory : ZipHeaderFactory
|
||||
{
|
||||
internal StreamingZipHeaderFactory(string password, ArchiveEncoding archiveEncoding)
|
||||
: base(StreamingMode.Streaming, password, archiveEncoding)
|
||||
internal StreamingZipHeaderFactory(string password)
|
||||
: base(StreamingMode.Streaming, password)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
//read out last 10 auth bytes
|
||||
var ten = new byte[10];
|
||||
stream.ReadFully(ten);
|
||||
stream.Read(ten, 0, 10);
|
||||
stream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ namespace SharpCompress.Common.Zip
|
||||
internal abstract class ZipFilePart : FilePart
|
||||
{
|
||||
internal ZipFilePart(ZipFileEntry header, Stream stream)
|
||||
: base(header.ArchiveEncoding)
|
||||
{
|
||||
Header = header;
|
||||
header.Part = this;
|
||||
@@ -89,7 +88,7 @@ namespace SharpCompress.Common.Zip
|
||||
case ZipCompressionMethod.PPMd:
|
||||
{
|
||||
var props = new byte[2];
|
||||
stream.ReadFully(props);
|
||||
stream.Read(props, 0, props.Length);
|
||||
return new PpmdStream(new PpmdProperties(props), stream, false);
|
||||
}
|
||||
case ZipCompressionMethod.WinzipAes:
|
||||
@@ -176,6 +175,7 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return plainStream;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ using System.Linq;
|
||||
#endif
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
@@ -24,13 +23,11 @@ namespace SharpCompress.Common.Zip
|
||||
protected LocalEntryHeader lastEntryHeader;
|
||||
private readonly string password;
|
||||
private readonly StreamingMode mode;
|
||||
private readonly ArchiveEncoding archiveEncoding;
|
||||
|
||||
protected ZipHeaderFactory(StreamingMode mode, string password, ArchiveEncoding archiveEncoding)
|
||||
protected ZipHeaderFactory(StreamingMode mode, string password)
|
||||
{
|
||||
this.mode = mode;
|
||||
this.password = password;
|
||||
this.archiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
protected ZipHeader ReadHeader(uint headerBytes, BinaryReader reader, bool zip64 = false)
|
||||
@@ -39,7 +36,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
case ENTRY_HEADER_BYTES:
|
||||
{
|
||||
var entryHeader = new LocalEntryHeader(archiveEncoding);
|
||||
var entryHeader = new LocalEntryHeader();
|
||||
entryHeader.Read(reader);
|
||||
LoadHeader(entryHeader, reader.BaseStream);
|
||||
|
||||
@@ -48,48 +45,48 @@ namespace SharpCompress.Common.Zip
|
||||
}
|
||||
case DIRECTORY_START_HEADER_BYTES:
|
||||
{
|
||||
var entry = new DirectoryEntryHeader(archiveEncoding);
|
||||
var entry = new DirectoryEntryHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case POST_DATA_DESCRIPTOR:
|
||||
{
|
||||
if (FlagUtility.HasFlag(lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor))
|
||||
{
|
||||
if (FlagUtility.HasFlag(lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor))
|
||||
{
|
||||
lastEntryHeader.Crc = reader.ReadUInt32();
|
||||
lastEntryHeader.CompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
lastEntryHeader.UncompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
}
|
||||
else
|
||||
{
|
||||
reader.ReadBytes(zip64 ? 20 : 12);
|
||||
}
|
||||
return null;
|
||||
lastEntryHeader.Crc = reader.ReadUInt32();
|
||||
lastEntryHeader.CompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
lastEntryHeader.UncompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
}
|
||||
else
|
||||
{
|
||||
reader.ReadBytes(zip64 ? 20 : 12);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
case DIGITAL_SIGNATURE:
|
||||
return null;
|
||||
case DIRECTORY_END_HEADER_BYTES:
|
||||
{
|
||||
var entry = new DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case SPLIT_ARCHIVE_HEADER_BYTES:
|
||||
{
|
||||
return new SplitHeader();
|
||||
}
|
||||
{
|
||||
return new SplitHeader();
|
||||
}
|
||||
case ZIP64_END_OF_CENTRAL_DIRECTORY:
|
||||
{
|
||||
var entry = new Zip64DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new Zip64DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR:
|
||||
{
|
||||
var entry = new Zip64DirectoryEndLocatorHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new Zip64DirectoryEndLocatorHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
default:
|
||||
throw new NotSupportedException("Unknown header: " + headerBytes);
|
||||
}
|
||||
@@ -168,22 +165,22 @@ namespace SharpCompress.Common.Zip
|
||||
switch (mode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
entryHeader.DataStartPosition = stream.Position;
|
||||
stream.Position += entryHeader.CompressedSize;
|
||||
break;
|
||||
}
|
||||
{
|
||||
entryHeader.DataStartPosition = stream.Position;
|
||||
stream.Position += entryHeader.CompressedSize;
|
||||
break;
|
||||
}
|
||||
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
entryHeader.PackedStream = stream;
|
||||
break;
|
||||
}
|
||||
{
|
||||
entryHeader.PackedStream = stream;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
|
||||
//}
|
||||
|
||||
@@ -105,19 +105,19 @@ namespace SharpCompress.Compressors.ADC
|
||||
}
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
throw new ArgumentNullException("buffer");
|
||||
}
|
||||
if (count < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
}
|
||||
if (offset < buffer.GetLowerBound(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
}
|
||||
if ((offset + count) > buffer.GetLength(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
}
|
||||
|
||||
int size = -1;
|
||||
|
||||
@@ -99,7 +99,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// </summary>
|
||||
/// <param name="input">The stream over which to calculate the CRC32</param>
|
||||
/// <returns>the CRC32 calculation</returns>
|
||||
public UInt32 GetCrc32(Stream input)
|
||||
public Int32 GetCrc32(Stream input)
|
||||
{
|
||||
return GetCrc32AndCopy(input, null);
|
||||
}
|
||||
@@ -111,7 +111,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// <param name="input">The stream over which to calculate the CRC32</param>
|
||||
/// <param name="output">The stream into which to deflate the input</param>
|
||||
/// <returns>the CRC32 calculation</returns>
|
||||
public UInt32 GetCrc32AndCopy(Stream input, Stream output)
|
||||
public Int32 GetCrc32AndCopy(Stream input, Stream output)
|
||||
{
|
||||
if (input == null)
|
||||
{
|
||||
@@ -143,7 +143,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
TotalBytesRead += count;
|
||||
}
|
||||
|
||||
return ~runningCrc32Result;
|
||||
return (Int32)(~runningCrc32Result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -37,10 +36,9 @@ namespace SharpCompress.Compressors.Deflate
|
||||
|
||||
public DeflateStream(Stream stream, CompressionMode mode,
|
||||
CompressionLevel level = CompressionLevel.Default,
|
||||
bool leaveOpen = false,
|
||||
Encoding forceEncoding = null)
|
||||
bool leaveOpen = false)
|
||||
{
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen, forceEncoding);
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen);
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
|
||||
@@ -30,45 +30,41 @@ using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
public class GZipStream : Stream
|
||||
{
|
||||
internal static readonly DateTime UNIX_EPOCH = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
internal static readonly DateTime UnixEpoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
public DateTime? LastModified { get; set; }
|
||||
|
||||
private string _comment;
|
||||
private string _fileName;
|
||||
private string comment;
|
||||
private string fileName;
|
||||
|
||||
internal ZlibBaseStream BaseStream;
|
||||
private bool _disposed;
|
||||
private bool _firstReadDone;
|
||||
private int _headerByteCount;
|
||||
|
||||
private readonly Encoding _encoding;
|
||||
private bool disposed;
|
||||
private bool firstReadDone;
|
||||
private int headerByteCount;
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, CompressionLevel.Default, false, Encoding.UTF8)
|
||||
: this(stream, mode, CompressionLevel.Default, false)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level)
|
||||
: this(stream, mode, level, false, Encoding.UTF8)
|
||||
: this(stream, mode, level, false)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen, Encoding.UTF8)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen, Encoding encoding)
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
|
||||
{
|
||||
BaseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen, encoding);
|
||||
_encoding = encoding;
|
||||
BaseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen);
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
@@ -78,7 +74,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
get => (BaseStream._flushMode);
|
||||
set
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -91,7 +87,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
get => BaseStream._bufferSize;
|
||||
set
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -127,7 +123,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -153,7 +149,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -183,7 +179,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Writer)
|
||||
{
|
||||
return BaseStream._z.TotalBytesOut + _headerByteCount;
|
||||
return BaseStream._z.TotalBytesOut + headerByteCount;
|
||||
}
|
||||
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Reader)
|
||||
{
|
||||
@@ -206,14 +202,14 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
try
|
||||
{
|
||||
if (!_disposed)
|
||||
if (!disposed)
|
||||
{
|
||||
if (disposing && (BaseStream != null))
|
||||
{
|
||||
BaseStream.Dispose();
|
||||
Crc32 = BaseStream.Crc32;
|
||||
}
|
||||
_disposed = true;
|
||||
disposed = true;
|
||||
}
|
||||
}
|
||||
finally
|
||||
@@ -227,7 +223,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// </summary>
|
||||
public override void Flush()
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -267,7 +263,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// <returns>the number of bytes actually read</returns>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -276,9 +272,9 @@ namespace SharpCompress.Compressors.Deflate
|
||||
// Console.WriteLine("GZipStream::Read(buffer, off({0}), c({1}) = {2}", offset, count, n);
|
||||
// Console.WriteLine( Util.FormatByteArray(buffer, offset, n) );
|
||||
|
||||
if (!_firstReadDone)
|
||||
if (!firstReadDone)
|
||||
{
|
||||
_firstReadDone = true;
|
||||
firstReadDone = true;
|
||||
FileName = BaseStream._GzipFileName;
|
||||
Comment = BaseStream._GzipComment;
|
||||
}
|
||||
@@ -329,7 +325,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// <param name="count">the number of bytes to write.</param>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -339,7 +335,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
if (BaseStream._wantCompress)
|
||||
{
|
||||
// first write in compression, therefore, emit the GZIP header
|
||||
_headerByteCount = EmitHeader();
|
||||
headerByteCount = EmitHeader();
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -350,56 +346,56 @@ namespace SharpCompress.Compressors.Deflate
|
||||
BaseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
#endregion Stream methods
|
||||
#endregion
|
||||
|
||||
public String Comment
|
||||
{
|
||||
get => _comment;
|
||||
get => comment;
|
||||
set
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
_comment = value;
|
||||
comment = value;
|
||||
}
|
||||
}
|
||||
|
||||
public string FileName
|
||||
{
|
||||
get => _fileName;
|
||||
get => fileName;
|
||||
set
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
_fileName = value;
|
||||
if (_fileName == null)
|
||||
fileName = value;
|
||||
if (fileName == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (_fileName.IndexOf("/") != -1)
|
||||
if (fileName.IndexOf("/") != -1)
|
||||
{
|
||||
_fileName = _fileName.Replace("/", "\\");
|
||||
fileName = fileName.Replace("/", "\\");
|
||||
}
|
||||
if (_fileName.EndsWith("\\"))
|
||||
if (fileName.EndsWith("\\"))
|
||||
{
|
||||
throw new InvalidOperationException("Illegal filename");
|
||||
}
|
||||
|
||||
var index = _fileName.IndexOf("\\");
|
||||
var index = fileName.IndexOf("\\");
|
||||
if (index != -1)
|
||||
{
|
||||
// trim any leading path
|
||||
int length = _fileName.Length;
|
||||
int length = fileName.Length;
|
||||
int num = length;
|
||||
while (--num >= 0)
|
||||
{
|
||||
char c = _fileName[num];
|
||||
char c = fileName[num];
|
||||
if (c == '\\')
|
||||
{
|
||||
_fileName = _fileName.Substring(num + 1, length - num - 1);
|
||||
fileName = fileName.Substring(num + 1, length - num - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -410,10 +406,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
|
||||
private int EmitHeader()
|
||||
{
|
||||
byte[] commentBytes = (Comment == null) ? null
|
||||
: _encoding.GetBytes(Comment);
|
||||
byte[] filenameBytes = (FileName == null) ? null
|
||||
: _encoding.GetBytes(FileName);
|
||||
byte[] commentBytes = (Comment == null) ? null : ArchiveEncoding.Default.GetBytes(Comment);
|
||||
byte[] filenameBytes = (FileName == null) ? null : ArchiveEncoding.Default.GetBytes(FileName);
|
||||
|
||||
int cbLength = (Comment == null) ? 0 : commentBytes.Length + 1;
|
||||
int fnLength = (FileName == null) ? 0 : filenameBytes.Length + 1;
|
||||
@@ -446,7 +440,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
LastModified = DateTime.Now;
|
||||
}
|
||||
TimeSpan delta = LastModified.Value - UNIX_EPOCH;
|
||||
TimeSpan delta = LastModified.Value - UnixEpoch;
|
||||
var timet = (Int32)delta.TotalSeconds;
|
||||
DataConverter.LittleEndian.PutBytes(header, i, timet);
|
||||
i += 4;
|
||||
|
||||
@@ -418,7 +418,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
internal sealed class Adler
|
||||
{
|
||||
// largest prime smaller than 65536
|
||||
private static readonly uint BASE = 65521U;
|
||||
private static readonly int BASE = 65521;
|
||||
|
||||
// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
|
||||
private static readonly int NMAX = 5552;
|
||||
@@ -430,8 +430,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
return 1;
|
||||
}
|
||||
|
||||
uint s1 = adler & 0xffffU;
|
||||
uint s2 = (adler >> 16) & 0xffffU;
|
||||
int s1 = (int)(adler & 0xffff);
|
||||
int s2 = (int)((adler >> 16) & 0xffff);
|
||||
|
||||
while (len > 0)
|
||||
{
|
||||
@@ -486,7 +486,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
s1 %= BASE;
|
||||
s2 %= BASE;
|
||||
}
|
||||
return (s2 << 16) | s1;
|
||||
return (uint)((s2 << 16) | s1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +1,20 @@
|
||||
// ZlibBaseStream.cs
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
|
||||
// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
|
||||
// All rights reserved.
|
||||
//
|
||||
// This code module is part of DotNetZip, a zipfile class library.
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// This code is licensed under the Microsoft Public License.
|
||||
// This code is licensed under the Microsoft Public License.
|
||||
// See the file License.txt for the license details.
|
||||
// More info on: http://dotnetzip.codeplex.com
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// last saved (in emacs):
|
||||
// last saved (in emacs):
|
||||
// Time-stamp: <2009-October-28 15:45:15>
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
@@ -28,9 +28,7 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -65,8 +63,6 @@ namespace SharpCompress.Compressors.Deflate
|
||||
protected internal DateTime _GzipMtime;
|
||||
protected internal int _gzipHeaderByteCount;
|
||||
|
||||
private readonly Encoding _encoding;
|
||||
|
||||
internal int Crc32
|
||||
{
|
||||
get
|
||||
@@ -83,8 +79,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
CompressionMode compressionMode,
|
||||
CompressionLevel level,
|
||||
ZlibStreamFlavor flavor,
|
||||
bool leaveOpen,
|
||||
Encoding encoding)
|
||||
bool leaveOpen)
|
||||
{
|
||||
_flushMode = FlushType.None;
|
||||
|
||||
@@ -95,8 +90,6 @@ namespace SharpCompress.Compressors.Deflate
|
||||
_flavor = flavor;
|
||||
_level = level;
|
||||
|
||||
_encoding = encoding;
|
||||
|
||||
// workitem 7159
|
||||
if (flavor == ZlibStreamFlavor.GZIP)
|
||||
{
|
||||
@@ -424,8 +417,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
}
|
||||
}
|
||||
while (!done);
|
||||
byte[] buffer = list.ToArray();
|
||||
return _encoding.GetString(buffer, 0, buffer.Length);
|
||||
byte[] a = list.ToArray();
|
||||
return ArchiveEncoding.Default.GetString(a, 0, a.Length);
|
||||
}
|
||||
|
||||
private int _ReadAndValidateGzipHeader()
|
||||
@@ -534,19 +527,19 @@ namespace SharpCompress.Compressors.Deflate
|
||||
}
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
throw new ArgumentNullException("buffer");
|
||||
}
|
||||
if (count < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
}
|
||||
if (offset < buffer.GetLowerBound(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
}
|
||||
if ((offset + count) > buffer.GetLength(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
}
|
||||
|
||||
int rc = 0;
|
||||
@@ -599,7 +592,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
while (_z.AvailableBytesOut > 0 && !nomoreinput && rc == ZlibConstants.Z_OK);
|
||||
|
||||
// workitem 8557
|
||||
// is there more room in output?
|
||||
// is there more room in output?
|
||||
if (_z.AvailableBytesOut > 0)
|
||||
{
|
||||
if (rc == ZlibConstants.Z_OK && _z.AvailableBytesIn == 0)
|
||||
|
||||
@@ -27,7 +27,6 @@
|
||||
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -37,23 +36,23 @@ namespace SharpCompress.Compressors.Deflate
|
||||
private bool _disposed;
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, CompressionLevel.Default, false, Encoding.UTF8)
|
||||
: this(stream, mode, CompressionLevel.Default, false)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level)
|
||||
: this(stream, mode, level, false, Encoding.UTF8)
|
||||
: this(stream, mode, level, false)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen, Encoding.UTF8)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen, Encoding encoding)
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
|
||||
{
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen, encoding);
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen);
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
@@ -327,6 +326,6 @@ namespace SharpCompress.Compressors.Deflate
|
||||
_baseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
#endregion System.IO.Stream methods
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
@@ -58,7 +58,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
}
|
||||
|
||||
return (mBits[index >> 5] & (1u << (index & 31))) != 0;
|
||||
@@ -69,7 +69,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
}
|
||||
|
||||
mBits[index >> 5] |= 1u << (index & 31);
|
||||
@@ -79,7 +79,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
}
|
||||
|
||||
uint bits = mBits[index >> 5];
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Converters;
|
||||
using SharpCompress.Crypto;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
@@ -17,62 +14,29 @@ namespace SharpCompress.Compressors.LZMA
|
||||
public class LZipStream : Stream
|
||||
{
|
||||
private readonly Stream stream;
|
||||
private readonly CountingWritableSubStream rawStream;
|
||||
private bool disposed;
|
||||
private readonly bool leaveOpen;
|
||||
private bool finished;
|
||||
|
||||
private long writeCount;
|
||||
|
||||
public LZipStream(Stream stream, CompressionMode mode, bool leaveOpen = false)
|
||||
public LZipStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, false)
|
||||
{
|
||||
Mode = mode;
|
||||
this.leaveOpen = leaveOpen;
|
||||
|
||||
if (mode == CompressionMode.Decompress)
|
||||
{
|
||||
int dSize = ValidateAndReadSize(stream);
|
||||
if (dSize == 0)
|
||||
{
|
||||
throw new IOException("Not an LZip stream");
|
||||
}
|
||||
byte[] properties = GetProperties(dSize);
|
||||
this.stream = new LzmaStream(properties, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
//default
|
||||
int dSize = 104 * 1024;
|
||||
WriteHeaderSize(stream);
|
||||
|
||||
rawStream = new CountingWritableSubStream(stream);
|
||||
this.stream = new Crc32Stream(new LzmaStream(new LzmaEncoderProperties(true, dSize), false, rawStream));
|
||||
}
|
||||
}
|
||||
|
||||
public void Finish()
|
||||
public LZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
{
|
||||
if (!finished)
|
||||
if (mode != CompressionMode.Decompress)
|
||||
{
|
||||
if (Mode == CompressionMode.Compress)
|
||||
{
|
||||
var crc32Stream = (Crc32Stream)stream;
|
||||
crc32Stream.WrappedStream.Dispose();
|
||||
crc32Stream.Dispose();
|
||||
var compressedCount = rawStream.Count;
|
||||
|
||||
var bytes = DataConverter.LittleEndian.GetBytes(crc32Stream.Crc);
|
||||
rawStream.Write(bytes, 0, bytes.Length);
|
||||
|
||||
bytes = DataConverter.LittleEndian.GetBytes(writeCount);
|
||||
rawStream.Write(bytes, 0, bytes.Length);
|
||||
|
||||
//total with headers
|
||||
bytes = DataConverter.LittleEndian.GetBytes(compressedCount + 6 + 20);
|
||||
rawStream.Write(bytes, 0, bytes.Length);
|
||||
}
|
||||
finished = true;
|
||||
throw new NotImplementedException("Only LZip decompression is currently supported");
|
||||
}
|
||||
Mode = mode;
|
||||
this.leaveOpen = leaveOpen;
|
||||
int dictionarySize = ValidateAndReadSize(stream);
|
||||
if (dictionarySize == 0)
|
||||
{
|
||||
throw new IOException("Not an LZip stream");
|
||||
}
|
||||
byte[] properties = GetProperties(dictionarySize);
|
||||
this.stream = new LzmaStream(properties, stream);
|
||||
}
|
||||
|
||||
#region Stream methods
|
||||
@@ -84,23 +48,19 @@ namespace SharpCompress.Compressors.LZMA
|
||||
return;
|
||||
}
|
||||
disposed = true;
|
||||
if (disposing)
|
||||
if (disposing && !leaveOpen)
|
||||
{
|
||||
Finish();
|
||||
if (!leaveOpen)
|
||||
{
|
||||
rawStream.Dispose();
|
||||
}
|
||||
stream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
public CompressionMode Mode { get; }
|
||||
|
||||
public override bool CanRead => Mode == CompressionMode.Decompress;
|
||||
public override bool CanRead => stream.CanRead;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => Mode == CompressionMode.Compress;
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
@@ -115,16 +75,20 @@ namespace SharpCompress.Compressors.LZMA
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) => stream.Read(buffer, offset, count);
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
stream.Write(buffer, offset, count);
|
||||
writeCount += count;
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
@@ -141,7 +105,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
/// couldn't be read or it isn't a validate LZIP header, or the dictionary
|
||||
/// size if it *is* a valid LZIP file.
|
||||
/// </summary>
|
||||
public static int ValidateAndReadSize(Stream stream)
|
||||
private static int ValidateAndReadSize(Stream stream)
|
||||
{
|
||||
if (stream == null)
|
||||
{
|
||||
@@ -167,17 +131,6 @@ namespace SharpCompress.Compressors.LZMA
|
||||
return (1 << basePower) - subtractionNumerator * (1 << (basePower - 4));
|
||||
}
|
||||
|
||||
public static void WriteHeaderSize(Stream stream)
|
||||
{
|
||||
if (stream == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
}
|
||||
// hard coding the dictionary size encoding
|
||||
byte[] header = new byte[6] {(byte)'L', (byte)'Z', (byte)'I', (byte)'P', 1, 113};
|
||||
stream.Write(header, 0, 6);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a byte array to communicate the parameters and dictionary size to LzmaStream.
|
||||
/// </summary>
|
||||
|
||||
@@ -141,7 +141,10 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
position = encoder.Code(null, true);
|
||||
}
|
||||
inputStream?.Dispose();
|
||||
if (inputStream != null)
|
||||
{
|
||||
inputStream.Dispose();
|
||||
}
|
||||
}
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
@@ -58,22 +58,22 @@ namespace SharpCompress.Compressors.LZMA.Utilites
|
||||
{
|
||||
if (stream == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
throw new ArgumentNullException("stream");
|
||||
}
|
||||
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
throw new ArgumentNullException("buffer");
|
||||
}
|
||||
|
||||
if (offset < 0 || offset > buffer.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
}
|
||||
|
||||
if (length < 0 || length > buffer.Length - offset)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(length));
|
||||
throw new ArgumentOutOfRangeException("length");
|
||||
}
|
||||
|
||||
while (length > 0)
|
||||
|
||||
@@ -146,12 +146,12 @@ namespace SharpCompress.Compressors.PPMd.I1
|
||||
{
|
||||
if (target == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(target));
|
||||
throw new ArgumentNullException("target");
|
||||
}
|
||||
|
||||
if (source == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(source));
|
||||
throw new ArgumentNullException("source");
|
||||
}
|
||||
|
||||
EncodeStart(properties);
|
||||
@@ -235,12 +235,12 @@ namespace SharpCompress.Compressors.PPMd.I1
|
||||
{
|
||||
if (target == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(target));
|
||||
throw new ArgumentNullException("target");
|
||||
}
|
||||
|
||||
if (source == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(source));
|
||||
throw new ArgumentNullException("source");
|
||||
}
|
||||
|
||||
DecodeStart(source, properties);
|
||||
|
||||
@@ -31,7 +31,7 @@ namespace SharpCompress.Compressors.Rar {
|
||||
{
|
||||
currentCrc = RarCRC.CheckCrc(currentCrc, buffer, offset, result);
|
||||
}
|
||||
else if (GetCrc() != readStream.CurrentCrc && count != 0)
|
||||
else if (GetCrc() != readStream.CurrentCrc)
|
||||
{
|
||||
// NOTE: we use the last FileHeader in a multipart volume to check CRC
|
||||
throw new InvalidFormatException("file crc mismatch");
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public static class BinaryUtils
|
||||
{
|
||||
public static int ReadLittleEndianInt32(this BinaryReader reader)
|
||||
{
|
||||
byte[] bytes = reader.ReadBytes(4);
|
||||
return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24));
|
||||
}
|
||||
|
||||
internal static uint ReadLittleEndianUInt32(this BinaryReader reader)
|
||||
{
|
||||
return unchecked((uint)ReadLittleEndianInt32(reader));
|
||||
}
|
||||
public static int ReadLittleEndianInt32(this Stream stream)
|
||||
{
|
||||
byte[] bytes = new byte[4];
|
||||
var read = stream.ReadFully(bytes);
|
||||
if (!read)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24));
|
||||
}
|
||||
|
||||
internal static uint ReadLittleEndianUInt32(this Stream stream)
|
||||
{
|
||||
return unchecked((uint)ReadLittleEndianInt32(stream));
|
||||
}
|
||||
|
||||
internal static byte[] ToBigEndianBytes(this uint uint32)
|
||||
{
|
||||
var result = BitConverter.GetBytes(uint32);
|
||||
|
||||
if (BitConverter.IsLittleEndian)
|
||||
Array.Reverse(result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
internal static byte[] ToLittleEndianBytes(this uint uint32)
|
||||
{
|
||||
var result = BitConverter.GetBytes(uint32);
|
||||
|
||||
if (!BitConverter.IsLittleEndian)
|
||||
Array.Reverse(result);
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public enum CheckType : byte
|
||||
{
|
||||
NONE = 0x00,
|
||||
CRC32 = 0x01,
|
||||
CRC64 = 0x04,
|
||||
SHA256 = 0x0A
|
||||
}
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal static class Crc32
|
||||
{
|
||||
public const UInt32 DefaultPolynomial = 0xedb88320u;
|
||||
public const UInt32 DefaultSeed = 0xffffffffu;
|
||||
|
||||
static UInt32[] defaultTable;
|
||||
|
||||
public static UInt32 Compute(byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultSeed, buffer);
|
||||
}
|
||||
|
||||
public static UInt32 Compute(UInt32 seed, byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultPolynomial, seed, buffer);
|
||||
}
|
||||
|
||||
public static UInt32 Compute(UInt32 polynomial, UInt32 seed, byte[] buffer)
|
||||
{
|
||||
return ~CalculateHash(InitializeTable(polynomial), seed, buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
static UInt32[] InitializeTable(UInt32 polynomial)
|
||||
{
|
||||
if (polynomial == DefaultPolynomial && defaultTable != null)
|
||||
return defaultTable;
|
||||
|
||||
var createTable = new UInt32[256];
|
||||
for (var i = 0; i < 256; i++)
|
||||
{
|
||||
var entry = (UInt32)i;
|
||||
for (var j = 0; j < 8; j++)
|
||||
if ((entry & 1) == 1)
|
||||
entry = (entry >> 1) ^ polynomial;
|
||||
else
|
||||
entry = entry >> 1;
|
||||
createTable[i] = entry;
|
||||
}
|
||||
|
||||
if (polynomial == DefaultPolynomial)
|
||||
defaultTable = createTable;
|
||||
|
||||
return createTable;
|
||||
}
|
||||
|
||||
static UInt32 CalculateHash(UInt32[] table, UInt32 seed, IList<byte> buffer, int start, int size)
|
||||
{
|
||||
var crc = seed;
|
||||
for (var i = start; i < size - start; i++)
|
||||
crc = (crc >> 8) ^ table[buffer[i] ^ crc & 0xff];
|
||||
return crc;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal static class Crc64
|
||||
{
|
||||
public const UInt64 DefaultSeed = 0x0;
|
||||
|
||||
internal static UInt64[] Table;
|
||||
|
||||
public const UInt64 Iso3309Polynomial = 0xD800000000000000;
|
||||
|
||||
public static UInt64 Compute(byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultSeed, buffer);
|
||||
}
|
||||
|
||||
public static UInt64 Compute(UInt64 seed, byte[] buffer)
|
||||
{
|
||||
if (Table == null)
|
||||
Table = CreateTable(Iso3309Polynomial);
|
||||
|
||||
return CalculateHash(seed, Table, buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
public static UInt64 CalculateHash(UInt64 seed, UInt64[] table, IList<byte> buffer, int start, int size)
|
||||
{
|
||||
var crc = seed;
|
||||
|
||||
for (var i = start; i < size; i++)
|
||||
unchecked
|
||||
{
|
||||
crc = (crc >> 8) ^ table[(buffer[i] ^ crc) & 0xff];
|
||||
}
|
||||
|
||||
return crc;
|
||||
}
|
||||
|
||||
public static ulong[] CreateTable(ulong polynomial)
|
||||
{
|
||||
var createTable = new UInt64[256];
|
||||
for (var i = 0; i < 256; ++i)
|
||||
{
|
||||
var entry = (UInt64)i;
|
||||
for (var j = 0; j < 8; ++j)
|
||||
if ((entry & 1) == 1)
|
||||
entry = (entry >> 1) ^ polynomial;
|
||||
else
|
||||
entry = entry >> 1;
|
||||
createTable[i] = entry;
|
||||
}
|
||||
return createTable;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz.Filters
|
||||
{
|
||||
internal abstract class BlockFilter : ReadOnlyStream
|
||||
{
|
||||
public enum FilterTypes : ulong
|
||||
{
|
||||
DELTA = 0x03,
|
||||
ARCH_x86_FILTER = 0x04,
|
||||
ARCH_PowerPC_FILTER = 0x05,
|
||||
ARCH_IA64_FILTER = 0x06,
|
||||
ARCH_ARM_FILTER = 0x07,
|
||||
ARCH_ARMTHUMB_FILTER = 0x08,
|
||||
ARCH_SPARC_FILTER = 0x09,
|
||||
LZMA2 = 0x21,
|
||||
}
|
||||
|
||||
static Dictionary<FilterTypes, Type> FilterMap = new Dictionary<FilterTypes, Type>()
|
||||
{
|
||||
{FilterTypes.LZMA2, typeof(Lzma2Filter) }
|
||||
};
|
||||
|
||||
public abstract bool AllowAsLast { get; }
|
||||
public abstract bool AllowAsNonLast { get; }
|
||||
public abstract bool ChangesDataSize { get; }
|
||||
|
||||
public BlockFilter() { }
|
||||
|
||||
public abstract void Init(byte[] properties);
|
||||
public abstract void ValidateFilter();
|
||||
|
||||
public FilterTypes FilterType { get; set; }
|
||||
public static BlockFilter Read(BinaryReader reader)
|
||||
{
|
||||
var filterType = (FilterTypes)reader.ReadXZInteger();
|
||||
if (!FilterMap.ContainsKey(filterType))
|
||||
throw new NotImplementedException($"Filter {filterType} has not yet been implemented");
|
||||
var filter = Activator.CreateInstance(FilterMap[filterType]) as BlockFilter;
|
||||
|
||||
var sizeOfProperties = reader.ReadXZInteger();
|
||||
if (sizeOfProperties > int.MaxValue)
|
||||
throw new InvalidDataException("Block filter information too large");
|
||||
byte[] properties = reader.ReadBytes((int)sizeOfProperties);
|
||||
filter.Init(properties);
|
||||
return filter;
|
||||
}
|
||||
|
||||
public abstract void SetBaseStream(Stream stream);
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz.Filters
|
||||
{
|
||||
internal class Lzma2Filter : BlockFilter
|
||||
{
|
||||
public override bool AllowAsLast => true;
|
||||
public override bool AllowAsNonLast => false;
|
||||
public override bool ChangesDataSize => true;
|
||||
|
||||
byte _dictionarySize;
|
||||
public uint DictionarySize
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_dictionarySize > 40)
|
||||
throw new OverflowException("Dictionary size greater than UInt32.Max");
|
||||
if (_dictionarySize == 40)
|
||||
{
|
||||
return uint.MaxValue;
|
||||
}
|
||||
int mantissa = 2 | (_dictionarySize & 1);
|
||||
int exponent = _dictionarySize / 2 + 11;
|
||||
return (uint)mantissa << exponent;
|
||||
}
|
||||
}
|
||||
|
||||
public override void Init(byte[] properties)
|
||||
{
|
||||
if (properties.Length != 1)
|
||||
throw new InvalidDataException("LZMA properties unexpected length");
|
||||
|
||||
_dictionarySize = (byte)(properties[0] & 0x3F);
|
||||
var reserved = properties[0] & 0xC0;
|
||||
if (reserved != 0)
|
||||
throw new InvalidDataException("Reserved bits used in LZMA properties");
|
||||
}
|
||||
|
||||
public override void ValidateFilter()
|
||||
{
|
||||
}
|
||||
|
||||
public override void SetBaseStream(Stream stream)
|
||||
{
|
||||
BaseStream = new SharpCompress.Compressors.LZMA.LzmaStream(new[] { _dictionarySize }, stream);
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
return BaseStream.Read(buffer, offset, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal static class MultiByteIntegers
|
||||
{
|
||||
public static ulong ReadXZInteger(this BinaryReader reader, int MaxBytes = 9)
|
||||
{
|
||||
if (MaxBytes <= 0)
|
||||
throw new ArgumentOutOfRangeException();
|
||||
if (MaxBytes > 9)
|
||||
MaxBytes = 9;
|
||||
|
||||
byte LastByte = reader.ReadByte();
|
||||
ulong Output = (ulong)LastByte & 0x7F;
|
||||
|
||||
int i = 0;
|
||||
while ((LastByte & 0x80) != 0)
|
||||
{
|
||||
if (++i >= MaxBytes)
|
||||
throw new InvalidDataException();
|
||||
LastByte = reader.ReadByte();
|
||||
if (LastByte == 0)
|
||||
throw new InvalidDataException();
|
||||
|
||||
Output |= ((ulong)(LastByte & 0x7F)) << (i * 7);
|
||||
}
|
||||
return Output;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public abstract class ReadOnlyStream : Stream
|
||||
{
|
||||
public Stream BaseStream { get; protected set; }
|
||||
|
||||
public override bool CanRead => BaseStream.CanRead;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.Compressors.Xz.Filters;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal sealed class XZBlock : XZReadOnlyStream
|
||||
{
|
||||
public int BlockHeaderSize => (_blockHeaderSizeByte + 1) * 4;
|
||||
public ulong? CompressedSize { get; private set; }
|
||||
public ulong? UncompressedSize { get; private set; }
|
||||
public Stack<BlockFilter> Filters { get; private set; } = new Stack<BlockFilter>();
|
||||
public bool HeaderIsLoaded { get; private set; }
|
||||
private CheckType _checkType;
|
||||
private int _checkSize;
|
||||
private bool _streamConnected;
|
||||
private int _numFilters;
|
||||
private byte _blockHeaderSizeByte;
|
||||
private Stream _decomStream;
|
||||
private bool _endOfStream;
|
||||
private bool _paddingSkipped;
|
||||
private bool _crcChecked;
|
||||
private ulong _bytesRead;
|
||||
|
||||
public XZBlock(Stream stream, CheckType checkType, int checkSize) : base(stream)
|
||||
{
|
||||
_checkType = checkType;
|
||||
_checkSize = checkSize;
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int bytesRead = 0;
|
||||
if (!HeaderIsLoaded)
|
||||
LoadHeader();
|
||||
if (!_streamConnected)
|
||||
ConnectStream();
|
||||
if (!_endOfStream)
|
||||
bytesRead = _decomStream.Read(buffer, offset, count);
|
||||
if (bytesRead != count)
|
||||
_endOfStream = true;
|
||||
if (_endOfStream && !_paddingSkipped)
|
||||
SkipPadding();
|
||||
if (_endOfStream && !_crcChecked)
|
||||
CheckCrc();
|
||||
_bytesRead += (ulong)bytesRead;
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
private void SkipPadding()
|
||||
{
|
||||
int bytes = (int)(BaseStream.Position % 4);
|
||||
if (bytes > 0)
|
||||
{
|
||||
byte[] paddingBytes = new byte[4 - bytes];
|
||||
BaseStream.Read(paddingBytes, 0, paddingBytes.Length);
|
||||
if (paddingBytes.Any(b => b != 0))
|
||||
throw new InvalidDataException("Padding bytes were non-null");
|
||||
}
|
||||
_paddingSkipped = true;
|
||||
}
|
||||
|
||||
private void CheckCrc()
|
||||
{
|
||||
byte[] crc = new byte[_checkSize];
|
||||
BaseStream.Read(crc, 0, _checkSize);
|
||||
// Actually do a check (and read in the bytes
|
||||
// into the function throughout the stream read).
|
||||
_crcChecked = true;
|
||||
}
|
||||
|
||||
private void ConnectStream()
|
||||
{
|
||||
_decomStream = BaseStream;
|
||||
while (Filters.Any())
|
||||
{
|
||||
var filter = Filters.Pop();
|
||||
filter.SetBaseStream(_decomStream);
|
||||
_decomStream = filter;
|
||||
}
|
||||
_streamConnected = true;
|
||||
}
|
||||
|
||||
private void LoadHeader()
|
||||
{
|
||||
ReadHeaderSize();
|
||||
byte[] headerCache = CacheHeader();
|
||||
|
||||
using (var cache = new MemoryStream(headerCache))
|
||||
using (var cachedReader = new BinaryReader(cache))
|
||||
{
|
||||
cachedReader.BaseStream.Position = 1; // skip the header size byte
|
||||
ReadBlockFlags(cachedReader);
|
||||
ReadFilters(cachedReader);
|
||||
}
|
||||
HeaderIsLoaded = true;
|
||||
}
|
||||
|
||||
private void ReadHeaderSize()
|
||||
{
|
||||
_blockHeaderSizeByte = (byte)BaseStream.ReadByte();
|
||||
if (_blockHeaderSizeByte == 0)
|
||||
throw new XZIndexMarkerReachedException();
|
||||
}
|
||||
|
||||
private byte[] CacheHeader()
|
||||
{
|
||||
byte[] blockHeaderWithoutCrc = new byte[BlockHeaderSize - 4];
|
||||
blockHeaderWithoutCrc[0] = _blockHeaderSizeByte;
|
||||
var read = BaseStream.Read(blockHeaderWithoutCrc, 1, BlockHeaderSize - 5);
|
||||
if (read != BlockHeaderSize - 5)
|
||||
throw new EndOfStreamException("Reached end of stream unexectedly");
|
||||
|
||||
uint crc = BaseStream.ReadLittleEndianUInt32();
|
||||
uint calcCrc = Crc32.Compute(blockHeaderWithoutCrc);
|
||||
if (crc != calcCrc)
|
||||
throw new InvalidDataException("Block header corrupt");
|
||||
|
||||
return blockHeaderWithoutCrc;
|
||||
}
|
||||
|
||||
private void ReadBlockFlags(BinaryReader reader)
|
||||
{
|
||||
var blockFlags = reader.ReadByte();
|
||||
_numFilters = (blockFlags & 0x03) + 1;
|
||||
byte reserved = (byte)(blockFlags & 0x3C);
|
||||
|
||||
if (reserved != 0)
|
||||
throw new InvalidDataException("Reserved bytes used, perhaps an unknown XZ implementation");
|
||||
|
||||
bool compressedSizePresent = (blockFlags & 0x40) != 0;
|
||||
bool uncompressedSizePresent = (blockFlags & 0x80) != 0;
|
||||
|
||||
if (compressedSizePresent)
|
||||
CompressedSize = reader.ReadXZInteger();
|
||||
if (uncompressedSizePresent)
|
||||
UncompressedSize = reader.ReadXZInteger();
|
||||
}
|
||||
|
||||
private void ReadFilters(BinaryReader reader, long baseStreamOffset = 0)
|
||||
{
|
||||
int nonLastSizeChangers = 0;
|
||||
for (int i = 0; i < _numFilters; i++)
|
||||
{
|
||||
var filter = BlockFilter.Read(reader);
|
||||
if ((i + 1 == _numFilters && !filter.AllowAsLast)
|
||||
|| (i + 1 < _numFilters && !filter.AllowAsNonLast))
|
||||
throw new InvalidDataException("Block Filters in bad order");
|
||||
if (filter.ChangesDataSize && i + 1 < _numFilters)
|
||||
nonLastSizeChangers++;
|
||||
filter.ValidateFilter();
|
||||
Filters.Push(filter);
|
||||
}
|
||||
if (nonLastSizeChangers > 2)
|
||||
throw new InvalidDataException("More than two non-last block filters cannot change stream size");
|
||||
|
||||
int blockHeaderPaddingSize = BlockHeaderSize -
|
||||
(4 + (int)(reader.BaseStream.Position - baseStreamOffset));
|
||||
byte[] blockHeaderPadding = reader.ReadBytes(blockHeaderPaddingSize);
|
||||
if (!blockHeaderPadding.All(b => b == 0))
|
||||
throw new InvalidDataException("Block header contains unknown fields");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public class XZFooter
|
||||
{
|
||||
private readonly BinaryReader _reader;
|
||||
private readonly byte[] _magicBytes = new byte[] { 0x59, 0x5A };
|
||||
public long StreamStartPosition { get; private set; }
|
||||
public long BackwardSize { get; private set; }
|
||||
public byte[] StreamFlags { get; private set; }
|
||||
|
||||
public XZFooter(BinaryReader reader)
|
||||
{
|
||||
_reader = reader;
|
||||
StreamStartPosition = reader.BaseStream.Position;
|
||||
}
|
||||
|
||||
public static XZFooter FromStream(Stream stream)
|
||||
{
|
||||
var footer = new XZFooter(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8));
|
||||
footer.Process();
|
||||
return footer;
|
||||
}
|
||||
|
||||
public void Process()
|
||||
{
|
||||
uint crc = _reader.ReadLittleEndianUInt32();
|
||||
byte[] footerBytes = _reader.ReadBytes(6);
|
||||
uint myCrc = Crc32.Compute(footerBytes);
|
||||
if (crc != myCrc)
|
||||
throw new InvalidDataException("Footer corrupt");
|
||||
using (var stream = new MemoryStream(footerBytes))
|
||||
using (var reader = new BinaryReader(stream))
|
||||
{
|
||||
BackwardSize = (reader.ReadLittleEndianUInt32() + 1) * 4;
|
||||
StreamFlags = reader.ReadBytes(2);
|
||||
}
|
||||
byte[] magBy = _reader.ReadBytes(2);
|
||||
if (!Enumerable.SequenceEqual(magBy, _magicBytes))
|
||||
{
|
||||
throw new InvalidDataException("Magic footer missing");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public class XZHeader
|
||||
{
|
||||
private readonly BinaryReader _reader;
|
||||
private readonly byte[] MagicHeader = { 0xFD, 0x37, 0x7A, 0x58, 0x5a, 0x00 };
|
||||
|
||||
public CheckType BlockCheckType { get; private set; }
|
||||
public int BlockCheckSize => ((((int)BlockCheckType) + 2) / 3) * 4;
|
||||
|
||||
public XZHeader(BinaryReader reader)
|
||||
{
|
||||
_reader = reader;
|
||||
}
|
||||
|
||||
public static XZHeader FromStream(Stream stream)
|
||||
{
|
||||
var header = new XZHeader(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8));
|
||||
header.Process();
|
||||
return header;
|
||||
}
|
||||
|
||||
public void Process()
|
||||
{
|
||||
CheckMagicBytes(_reader.ReadBytes(6));
|
||||
ProcessStreamFlags();
|
||||
}
|
||||
|
||||
private void ProcessStreamFlags()
|
||||
{
|
||||
byte[] streamFlags = _reader.ReadBytes(2);
|
||||
UInt32 crc = _reader.ReadLittleEndianUInt32();
|
||||
UInt32 calcCrc = Crc32.Compute(streamFlags);
|
||||
if (crc != calcCrc)
|
||||
throw new InvalidDataException("Stream header corrupt");
|
||||
|
||||
BlockCheckType = (CheckType)(streamFlags[1] & 0x0F);
|
||||
byte futureUse = (byte)(streamFlags[1] & 0xF0);
|
||||
if (futureUse != 0 || streamFlags[0] != 0)
|
||||
throw new InvalidDataException("Unknown XZ Stream Version");
|
||||
}
|
||||
|
||||
private void CheckMagicBytes(byte[] header)
|
||||
{
|
||||
if (!Enumerable.SequenceEqual(header, MagicHeader))
|
||||
throw new InvalidDataException("Invalid XZ Stream");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
[CLSCompliant(false)]
|
||||
public class XZIndex
|
||||
{
|
||||
private readonly BinaryReader _reader;
|
||||
public long StreamStartPosition { get; private set; }
|
||||
public ulong NumberOfRecords { get; private set; }
|
||||
public List<XZIndexRecord> Records { get; } = new List<XZIndexRecord>();
|
||||
|
||||
private bool _indexMarkerAlreadyVerified;
|
||||
|
||||
public XZIndex(BinaryReader reader, bool indexMarkerAlreadyVerified)
|
||||
{
|
||||
_reader = reader;
|
||||
_indexMarkerAlreadyVerified = indexMarkerAlreadyVerified;
|
||||
StreamStartPosition = reader.BaseStream.Position;
|
||||
if (indexMarkerAlreadyVerified)
|
||||
StreamStartPosition--;
|
||||
}
|
||||
|
||||
public static XZIndex FromStream(Stream stream, bool indexMarkerAlreadyVerified)
|
||||
{
|
||||
var index = new XZIndex(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8), indexMarkerAlreadyVerified);
|
||||
index.Process();
|
||||
return index;
|
||||
}
|
||||
|
||||
public void Process()
|
||||
{
|
||||
if (!_indexMarkerAlreadyVerified)
|
||||
VerifyIndexMarker();
|
||||
NumberOfRecords = _reader.ReadXZInteger();
|
||||
for (ulong i = 0; i < NumberOfRecords; i++)
|
||||
{
|
||||
Records.Add(XZIndexRecord.FromBinaryReader(_reader));
|
||||
}
|
||||
SkipPadding();
|
||||
VerifyCrc32();
|
||||
}
|
||||
|
||||
private void VerifyIndexMarker()
|
||||
{
|
||||
byte marker = _reader.ReadByte();
|
||||
if (marker != 0)
|
||||
throw new InvalidDataException("Not an index block");
|
||||
}
|
||||
|
||||
private void SkipPadding()
|
||||
{
|
||||
int bytes = (int)(_reader.BaseStream.Position - StreamStartPosition) % 4;
|
||||
if (bytes > 0)
|
||||
{
|
||||
byte[] paddingBytes = _reader.ReadBytes(4 - bytes);
|
||||
if (paddingBytes.Any(b => b != 0))
|
||||
throw new InvalidDataException("Padding bytes were non-null");
|
||||
}
|
||||
}
|
||||
|
||||
private void VerifyCrc32()
|
||||
{
|
||||
uint crc = _reader.ReadLittleEndianUInt32();
|
||||
// TODO verify this matches
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public class XZIndexMarkerReachedException : Exception
|
||||
{
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
[CLSCompliant(false)]
|
||||
public class XZIndexRecord
|
||||
{
|
||||
public ulong UnpaddedSize { get; private set; }
|
||||
public ulong UncompressedSize { get; private set; }
|
||||
|
||||
protected XZIndexRecord() { }
|
||||
|
||||
public static XZIndexRecord FromBinaryReader(BinaryReader br)
|
||||
{
|
||||
var record = new XZIndexRecord();
|
||||
record.UnpaddedSize = br.ReadXZInteger();
|
||||
record.UncompressedSize = br.ReadXZInteger();
|
||||
return record;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public abstract class XZReadOnlyStream : ReadOnlyStream
|
||||
{
|
||||
public XZReadOnlyStream(Stream stream)
|
||||
{
|
||||
BaseStream = stream;
|
||||
if (!BaseStream.CanRead)
|
||||
throw new InvalidDataException("Must be able to read from stream");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
[CLSCompliant(false)]
|
||||
public sealed class XZStream : XZReadOnlyStream
|
||||
{
|
||||
public static bool IsXZStream(Stream stream)
|
||||
{
|
||||
try
|
||||
{
|
||||
return null != XZHeader.FromStream(stream);
|
||||
}
|
||||
catch (Exception)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private void AssertBlockCheckTypeIsSupported()
|
||||
{
|
||||
switch (Header.BlockCheckType)
|
||||
{
|
||||
case CheckType.NONE:
|
||||
break;
|
||||
case CheckType.CRC32:
|
||||
break;
|
||||
case CheckType.CRC64:
|
||||
break;
|
||||
case CheckType.SHA256:
|
||||
throw new NotImplementedException();
|
||||
default:
|
||||
throw new NotSupportedException("Check Type unknown to this version of decoder.");
|
||||
}
|
||||
}
|
||||
public XZHeader Header { get; private set; }
|
||||
public XZIndex Index { get; private set; }
|
||||
public XZFooter Footer { get; private set; }
|
||||
public bool HeaderIsRead { get; private set; }
|
||||
private XZBlock _currentBlock;
|
||||
|
||||
bool _endOfStream;
|
||||
|
||||
public XZStream(Stream stream) : base(stream)
|
||||
{
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int bytesRead = 0;
|
||||
if (_endOfStream)
|
||||
return bytesRead;
|
||||
if (!HeaderIsRead)
|
||||
ReadHeader();
|
||||
bytesRead = ReadBlocks(buffer, offset, count);
|
||||
if (bytesRead < count)
|
||||
{
|
||||
_endOfStream = true;
|
||||
ReadIndex();
|
||||
ReadFooter();
|
||||
}
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
private void ReadHeader()
|
||||
{
|
||||
Header = XZHeader.FromStream(BaseStream);
|
||||
AssertBlockCheckTypeIsSupported();
|
||||
HeaderIsRead = true;
|
||||
}
|
||||
|
||||
private void ReadIndex()
|
||||
{
|
||||
Index = XZIndex.FromStream(BaseStream, true);
|
||||
// TODO veryfy Index
|
||||
}
|
||||
|
||||
private void ReadFooter()
|
||||
{
|
||||
Footer = XZFooter.FromStream(BaseStream);
|
||||
// TODO verify footer
|
||||
}
|
||||
|
||||
private int ReadBlocks(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int bytesRead = 0;
|
||||
if (_currentBlock == null)
|
||||
NextBlock();
|
||||
for (;;)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (bytesRead >= count)
|
||||
break;
|
||||
int remaining = count - bytesRead;
|
||||
int newOffset = offset + bytesRead;
|
||||
int justRead = _currentBlock.Read(buffer, newOffset, remaining);
|
||||
if (justRead < remaining)
|
||||
NextBlock();
|
||||
bytesRead += justRead;
|
||||
}
|
||||
catch (XZIndexMarkerReachedException)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
private void NextBlock()
|
||||
{
|
||||
_currentBlock = new XZBlock(BaseStream, Header.BlockCheckType, Header.BlockCheckSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -156,7 +156,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (dest == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(dest));
|
||||
throw new ArgumentNullException("dest");
|
||||
}
|
||||
if (destIdx < 0 || destIdx > dest.Length - size)
|
||||
{
|
||||
@@ -170,7 +170,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -195,7 +195,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -221,7 +221,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -247,7 +247,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -273,7 +273,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -299,7 +299,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -325,7 +325,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
@@ -351,7 +351,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
@@ -468,7 +468,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -494,7 +494,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -520,7 +520,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -546,7 +546,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -572,7 +572,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -598,7 +598,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -624,7 +624,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
@@ -650,7 +650,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
throw new ArgumentNullException("data");
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Crypto
|
||||
{
|
||||
internal sealed class Crc32Stream : Stream
|
||||
{
|
||||
public const uint DefaultPolynomial = 0xedb88320u;
|
||||
public const uint DefaultSeed = 0xffffffffu;
|
||||
|
||||
private static uint[] defaultTable;
|
||||
|
||||
private readonly uint[] table;
|
||||
private uint hash;
|
||||
|
||||
private readonly Stream stream;
|
||||
|
||||
public Crc32Stream(Stream stream)
|
||||
: this(stream, DefaultPolynomial, DefaultSeed)
|
||||
{
|
||||
}
|
||||
|
||||
public Crc32Stream(Stream stream, uint polynomial, uint seed)
|
||||
{
|
||||
this.stream = stream;
|
||||
table = InitializeTable(polynomial);
|
||||
hash = seed;
|
||||
}
|
||||
|
||||
public Stream WrappedStream => stream;
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
stream.Flush();
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) => throw new NotSupportedException();
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
stream.Write(buffer, offset, count);
|
||||
hash = CalculateCrc(table, hash, buffer, offset, count);
|
||||
}
|
||||
|
||||
public override bool CanRead => stream.CanRead;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => stream.CanWrite;
|
||||
public override long Length => throw new NotSupportedException();
|
||||
public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); }
|
||||
|
||||
public uint Crc => ~hash;
|
||||
|
||||
public static uint Compute(byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultSeed, buffer);
|
||||
}
|
||||
|
||||
public static uint Compute(uint seed, byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultPolynomial, seed, buffer);
|
||||
}
|
||||
|
||||
public static uint Compute(uint polynomial, uint seed, byte[] buffer)
|
||||
{
|
||||
return ~CalculateCrc(InitializeTable(polynomial), seed, buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
private static uint[] InitializeTable(uint polynomial)
|
||||
{
|
||||
if (polynomial == DefaultPolynomial && defaultTable != null)
|
||||
return defaultTable;
|
||||
|
||||
var createTable = new uint[256];
|
||||
for (var i = 0; i < 256; i++)
|
||||
{
|
||||
var entry = (uint)i;
|
||||
for (var j = 0; j < 8; j++)
|
||||
if ((entry & 1) == 1)
|
||||
entry = (entry >> 1) ^ polynomial;
|
||||
else
|
||||
entry = entry >> 1;
|
||||
createTable[i] = entry;
|
||||
}
|
||||
|
||||
if (polynomial == DefaultPolynomial)
|
||||
defaultTable = createTable;
|
||||
|
||||
return createTable;
|
||||
}
|
||||
|
||||
private static uint CalculateCrc(uint[] table, uint crc, byte[] buffer, int offset, int count)
|
||||
{
|
||||
unchecked
|
||||
{
|
||||
for (int i = offset, end = offset + count; i < end; i++)
|
||||
crc = (crc >> 8) ^ table[(crc ^ buffer[i]) & 0xFF];
|
||||
}
|
||||
return crc;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@ namespace Org.BouncyCastle.Crypto.Parameters
|
||||
{
|
||||
if (key == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(key));
|
||||
throw new ArgumentNullException("key");
|
||||
}
|
||||
|
||||
this.key = (byte[])key.Clone();
|
||||
@@ -25,15 +25,15 @@ namespace Org.BouncyCastle.Crypto.Parameters
|
||||
{
|
||||
if (key == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(key));
|
||||
throw new ArgumentNullException("key");
|
||||
}
|
||||
if (keyOff < 0 || keyOff > key.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(keyOff));
|
||||
throw new ArgumentOutOfRangeException("keyOff");
|
||||
}
|
||||
if (keyLen < 0 || (keyOff + keyLen) > key.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(keyLen));
|
||||
throw new ArgumentOutOfRangeException("keyLen");
|
||||
}
|
||||
|
||||
this.key = new byte[keyLen];
|
||||
|
||||
@@ -41,7 +41,7 @@ namespace SharpCompress.IO
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Length => BytesLeftToRead;
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); }
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ namespace SharpCompress.IO
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
writableStream.Flush();
|
||||
}
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Compressors.Filters;
|
||||
|
||||
namespace SharpCompress.IO
|
||||
{
|
||||
@@ -47,13 +46,8 @@ namespace SharpCompress.IO
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
bufferStream.TransferTo(buffer);
|
||||
//create new memorystream to allow proper resizing as memorystream could be a user provided buffer
|
||||
//https://github.com/adamhathcock/sharpcompress/issues/306
|
||||
bufferStream = new MemoryStream();
|
||||
buffer.Position = 0;
|
||||
buffer.TransferTo(bufferStream);
|
||||
bufferStream = buffer;
|
||||
bufferStream.Position = 0;
|
||||
}
|
||||
isRewound = true;
|
||||
@@ -111,12 +105,6 @@ namespace SharpCompress.IO
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
//don't actually read if we don't really want to read anything
|
||||
//currently a network stream bug on Windows for .NET Core
|
||||
if (count == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int read;
|
||||
if (isRewound && bufferStream.Position != bufferStream.Length)
|
||||
{
|
||||
|
||||
@@ -139,28 +139,30 @@ namespace SharpCompress.Readers
|
||||
}
|
||||
}
|
||||
|
||||
private readonly byte[] skipBuffer = new byte[4096];
|
||||
|
||||
private void Skip()
|
||||
{
|
||||
if (ArchiveType != ArchiveType.Rar
|
||||
&& !Entry.IsSolid
|
||||
&& Entry.CompressedSize > 0)
|
||||
if (!Entry.IsSolid)
|
||||
{
|
||||
//not solid and has a known compressed size then we can skip raw bytes.
|
||||
var part = Entry.Parts.First();
|
||||
var rawStream = part.GetRawStream();
|
||||
var rawStream = Entry.Parts.First().GetRawStream();
|
||||
|
||||
if (rawStream != null)
|
||||
{
|
||||
var bytesToAdvance = Entry.CompressedSize;
|
||||
rawStream.Skip(bytesToAdvance);
|
||||
part.Skipped = true;
|
||||
for (var i = 0; i < bytesToAdvance / skipBuffer.Length; i++)
|
||||
{
|
||||
rawStream.Read(skipBuffer, 0, skipBuffer.Length);
|
||||
}
|
||||
rawStream.Read(skipBuffer, 0, (int)(bytesToAdvance % skipBuffer.Length));
|
||||
return;
|
||||
}
|
||||
}
|
||||
//don't know the size so we have to try to decompress to skip
|
||||
using (var s = OpenEntryStream())
|
||||
{
|
||||
s.Skip();
|
||||
while (s.Read(skipBuffer, 0, skipBuffer.Length) > 0)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,11 +29,11 @@ namespace SharpCompress.Readers.GZip
|
||||
return new GZipReader(stream, options ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
#endregion Open
|
||||
#endregion
|
||||
|
||||
internal override IEnumerable<GZipEntry> GetEntries(Stream stream)
|
||||
{
|
||||
return GZipEntry.GetEntries(stream, Options);
|
||||
return GZipEntry.GetEntries(stream);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,6 @@ using SharpCompress.Readers.Rar;
|
||||
using SharpCompress.Readers.Tar;
|
||||
using SharpCompress.Readers.Zip;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
using SharpCompress.Compressors.Xz;
|
||||
|
||||
namespace SharpCompress.Readers
|
||||
{
|
||||
@@ -77,6 +76,7 @@ namespace SharpCompress.Readers
|
||||
return new TarReader(rewindableStream, options, CompressionType.LZip);
|
||||
}
|
||||
}
|
||||
|
||||
rewindableStream.Rewind(false);
|
||||
if (RarArchive.IsRarFile(rewindableStream, options))
|
||||
{
|
||||
@@ -90,18 +90,7 @@ namespace SharpCompress.Readers
|
||||
rewindableStream.Rewind(true);
|
||||
return TarReader.Open(rewindableStream, options);
|
||||
}
|
||||
rewindableStream.Rewind(false);
|
||||
if (XZStream.IsXZStream(rewindableStream))
|
||||
{
|
||||
rewindableStream.Rewind(true);
|
||||
XZStream testStream = new XZStream(rewindableStream);
|
||||
if (TarArchive.IsTarFile(testStream))
|
||||
{
|
||||
rewindableStream.Rewind(true);
|
||||
return new TarReader(rewindableStream, options, CompressionType.Xz);
|
||||
}
|
||||
}
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Reader Formats: Zip, GZip, BZip2, Tar, Rar, LZip, XZ");
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Reader Formats: Zip, GZip, BZip2, Tar, Rar");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,6 @@ namespace SharpCompress.Readers
|
||||
/// Look for RarArchive (Check for self-extracting archives or cases where RarArchive isn't at the start of the file)
|
||||
/// </summary>
|
||||
public bool LookForHeader { get; set; }
|
||||
|
||||
public string Password { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,6 @@ using SharpCompress.Compressors.BZip2;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.IO;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
using SharpCompress.Compressors.Xz;
|
||||
|
||||
namespace SharpCompress.Readers.Tar
|
||||
{
|
||||
@@ -44,10 +43,6 @@ namespace SharpCompress.Readers.Tar
|
||||
{
|
||||
return new LZipStream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
case CompressionType.Xz:
|
||||
{
|
||||
return new XZStream(stream);
|
||||
}
|
||||
case CompressionType.None:
|
||||
{
|
||||
return stream;
|
||||
@@ -114,11 +109,11 @@ namespace SharpCompress.Readers.Tar
|
||||
return new TarReader(rewindableStream, options, CompressionType.None);
|
||||
}
|
||||
|
||||
#endregion Open
|
||||
#endregion
|
||||
|
||||
internal override IEnumerable<TarEntry> GetEntries(Stream stream)
|
||||
{
|
||||
return TarEntry.GetEntries(StreamingMode.Streaming, stream, compressionType, Options.ArchiveEncoding);
|
||||
return TarEntry.GetEntries(StreamingMode.Streaming, stream, compressionType);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,13 +8,13 @@ namespace SharpCompress.Readers.Zip
|
||||
{
|
||||
public class ZipReader : AbstractReader<ZipEntry, ZipVolume>
|
||||
{
|
||||
private readonly StreamingZipHeaderFactory _headerFactory;
|
||||
private readonly StreamingZipHeaderFactory headerFactory;
|
||||
|
||||
internal ZipReader(Stream stream, ReaderOptions options)
|
||||
: base(options, ArchiveType.Zip)
|
||||
{
|
||||
Volume = new ZipVolume(stream, options);
|
||||
_headerFactory = new StreamingZipHeaderFactory(options.Password, options.ArchiveEncoding);
|
||||
headerFactory = new StreamingZipHeaderFactory(options.Password);
|
||||
}
|
||||
|
||||
public override ZipVolume Volume { get; }
|
||||
@@ -33,26 +33,26 @@ namespace SharpCompress.Readers.Zip
|
||||
return new ZipReader(stream, options ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
#endregion Open
|
||||
#endregion
|
||||
|
||||
internal override IEnumerable<ZipEntry> GetEntries(Stream stream)
|
||||
{
|
||||
foreach (ZipHeader h in _headerFactory.ReadStreamHeader(stream))
|
||||
foreach (ZipHeader h in headerFactory.ReadStreamHeader(stream))
|
||||
{
|
||||
if (h != null)
|
||||
{
|
||||
switch (h.ZipHeaderType)
|
||||
{
|
||||
case ZipHeaderType.LocalEntry:
|
||||
{
|
||||
yield return new ZipEntry(new StreamingZipFilePart(h as LocalEntryHeader,
|
||||
stream));
|
||||
}
|
||||
{
|
||||
yield return new ZipEntry(new StreamingZipFilePart(h as LocalEntryHeader,
|
||||
stream));
|
||||
}
|
||||
break;
|
||||
case ZipHeaderType.DirectoryEnd:
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<AssemblyTitle>SharpCompress - Pure C# Decompression/Compression</AssemblyTitle>
|
||||
<NeutralLanguage>en-US</NeutralLanguage>
|
||||
<VersionPrefix>0.18.2</VersionPrefix>
|
||||
<AssemblyVersion>0.18.2.0</AssemblyVersion>
|
||||
<FileVersion>0.18.2.0</FileVersion>
|
||||
<VersionPrefix>0.16.1</VersionPrefix>
|
||||
<AssemblyVersion>0.16.1.0</AssemblyVersion>
|
||||
<FileVersion>0.16.1.0</FileVersion>
|
||||
<Authors>Adam Hathcock</Authors>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'==''">net45;net35;netstandard1.0;netstandard1.3;netstandard2.0</TargetFrameworks>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'==''">net45;net35;netstandard1.0;netstandard1.3</TargetFrameworks>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'!=''">$(LibraryFrameworks)</TargetFrameworks>
|
||||
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
|
||||
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
|
||||
<AssemblyName>SharpCompress</AssemblyName>
|
||||
@@ -14,14 +16,16 @@
|
||||
<SignAssembly>true</SignAssembly>
|
||||
<PublicSign Condition=" '$(OS)' != 'Windows_NT' ">true</PublicSign>
|
||||
<PackageId>SharpCompress</PackageId>
|
||||
<PackageTags>rar;unrar;zip;unzip;bzip2;gzip;tar;7zip;lzip;xz</PackageTags>
|
||||
<PackageTags>rar;unrar;zip;unzip;bzip2;gzip;tar;7zip</PackageTags>
|
||||
<PackageProjectUrl>https://github.com/adamhathcock/sharpcompress</PackageProjectUrl>
|
||||
<PackageLicenseUrl>https://github.com/adamhathcock/sharpcompress/blob/master/LICENSE.txt</PackageLicenseUrl>
|
||||
<GenerateAssemblyTitleAttribute>false</GenerateAssemblyTitleAttribute>
|
||||
<GenerateAssemblyProductAttribute>false</GenerateAssemblyProductAttribute>
|
||||
<Description>SharpCompress is a compression library for NET Standard 1.0 that can unrar, decompress 7zip, decompress xz, zip/unzip, tar/untar lzip/unlzip, bzip2/unbzip2 and gzip/ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip is implemented.</Description>
|
||||
<Description>SharpCompress is a compression library for NET Standard 1.0 that can unrar, decompress 7zip, zip/unzip, tar/untar bzip2/unbzip2 and gzip/ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip is implemented.</Description>
|
||||
</PropertyGroup>
|
||||
|
||||
<PropertyGroup Condition=" '$(TargetFramework)' == 'netstandard1.0' ">
|
||||
<DefineConstants>$(DefineConstants);NO_FILE;NO_CRYPTO;SILVERLIGHT</DefineConstants>
|
||||
</PropertyGroup>
|
||||
</Project>
|
||||
|
||||
</Project>
|
||||
|
||||
@@ -7,7 +7,7 @@ using SharpCompress.Readers;
|
||||
namespace SharpCompress
|
||||
{
|
||||
internal static class Utility
|
||||
{
|
||||
{
|
||||
public static ReadOnlyCollection<T> ToReadOnly<T>(this IEnumerable<T> items)
|
||||
{
|
||||
return new ReadOnlyCollection<T>(items.ToList());
|
||||
@@ -138,7 +138,7 @@ namespace SharpCompress
|
||||
|
||||
public static void Skip(this Stream source, long advanceAmount)
|
||||
{
|
||||
byte[] buffer = GetTransferByteArray();
|
||||
byte[] buffer = new byte[32 * 1024];
|
||||
int read = 0;
|
||||
int readCount = 0;
|
||||
do
|
||||
@@ -162,9 +162,9 @@ namespace SharpCompress
|
||||
while (true);
|
||||
}
|
||||
|
||||
public static void Skip(this Stream source)
|
||||
public static void SkipAll(this Stream source)
|
||||
{
|
||||
byte[] buffer = GetTransferByteArray();
|
||||
byte[] buffer = new byte[32 * 1024];
|
||||
do
|
||||
{
|
||||
}
|
||||
|
||||
@@ -6,30 +6,29 @@ namespace SharpCompress.Writers
|
||||
{
|
||||
public abstract class AbstractWriter : IWriter
|
||||
{
|
||||
private bool closeStream;
|
||||
private bool isDisposed;
|
||||
|
||||
protected AbstractWriter(ArchiveType type, WriterOptions writerOptions)
|
||||
protected AbstractWriter(ArchiveType type)
|
||||
{
|
||||
WriterType = type;
|
||||
WriterOptions = writerOptions;
|
||||
}
|
||||
|
||||
protected void InitalizeStream(Stream stream)
|
||||
protected void InitalizeStream(Stream stream, bool closeStream)
|
||||
{
|
||||
OutputStream = stream;
|
||||
this.closeStream = closeStream;
|
||||
}
|
||||
|
||||
protected Stream OutputStream { get; private set; }
|
||||
|
||||
public ArchiveType WriterType { get; }
|
||||
|
||||
protected WriterOptions WriterOptions { get; }
|
||||
|
||||
public abstract void Write(string filename, Stream source, DateTime? modificationTime);
|
||||
|
||||
protected virtual void Dispose(bool isDisposing)
|
||||
{
|
||||
if (isDisposing && !WriterOptions.LeaveStreamOpen)
|
||||
if (isDisposing && closeStream)
|
||||
{
|
||||
OutputStream.Dispose();
|
||||
}
|
||||
|
||||
@@ -8,15 +8,12 @@ namespace SharpCompress.Writers.GZip
|
||||
{
|
||||
public class GZipWriter : AbstractWriter
|
||||
{
|
||||
private bool _wroteToStream;
|
||||
private bool wroteToStream;
|
||||
|
||||
public GZipWriter(Stream destination, GZipWriterOptions options = null)
|
||||
: base(ArchiveType.GZip, options ?? new GZipWriterOptions())
|
||||
public GZipWriter(Stream destination, bool leaveOpen = false)
|
||||
: base(ArchiveType.GZip)
|
||||
{
|
||||
InitalizeStream(new GZipStream(destination, CompressionMode.Compress,
|
||||
options?.CompressionLevel ?? CompressionLevel.Default,
|
||||
WriterOptions.LeaveStreamOpen,
|
||||
WriterOptions.ArchiveEncoding.GetEncoding()));
|
||||
InitalizeStream(new GZipStream(destination, CompressionMode.Compress, leaveOpen), !leaveOpen);
|
||||
}
|
||||
|
||||
protected override void Dispose(bool isDisposing)
|
||||
@@ -31,7 +28,7 @@ namespace SharpCompress.Writers.GZip
|
||||
|
||||
public override void Write(string filename, Stream source, DateTime? modificationTime)
|
||||
{
|
||||
if (_wroteToStream)
|
||||
if (wroteToStream)
|
||||
{
|
||||
throw new ArgumentException("Can only write a single stream to a GZip file.");
|
||||
}
|
||||
@@ -39,7 +36,7 @@ namespace SharpCompress.Writers.GZip
|
||||
stream.FileName = filename;
|
||||
stream.LastModified = modificationTime;
|
||||
source.TransferTo(stream);
|
||||
_wroteToStream = true;
|
||||
wroteToStream = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
|
||||
namespace SharpCompress.Writers.GZip
|
||||
{
|
||||
public class GZipWriterOptions : WriterOptions
|
||||
{
|
||||
public GZipWriterOptions()
|
||||
: base(CompressionType.GZip)
|
||||
{
|
||||
}
|
||||
|
||||
internal GZipWriterOptions(WriterOptions options)
|
||||
: base(options.CompressionType)
|
||||
{
|
||||
LeaveStreamOpen = options.LeaveStreamOpen;
|
||||
ArchiveEncoding = options.ArchiveEncoding;
|
||||
|
||||
var writerOptions = options as GZipWriterOptions;
|
||||
if (writerOptions != null)
|
||||
{
|
||||
CompressionLevel = writerOptions.CompressionLevel;
|
||||
}
|
||||
}
|
||||
|
||||
public CompressionLevel CompressionLevel { get; set; } = CompressionLevel.Default;
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,17 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Common.Tar;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.BZip2;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
|
||||
namespace SharpCompress.Writers.Tar
|
||||
{
|
||||
public class TarWriter : AbstractWriter
|
||||
{
|
||||
public TarWriter(Stream destination, WriterOptions options)
|
||||
: base(ArchiveType.Tar, options)
|
||||
: base(ArchiveType.Tar)
|
||||
{
|
||||
if (!destination.CanWrite)
|
||||
{
|
||||
@@ -24,17 +23,12 @@ namespace SharpCompress.Writers.Tar
|
||||
break;
|
||||
case CompressionType.BZip2:
|
||||
{
|
||||
destination = new BZip2Stream(destination, CompressionMode.Compress, true);
|
||||
destination = new BZip2Stream(destination, CompressionMode.Compress, options.LeaveStreamOpen);
|
||||
}
|
||||
break;
|
||||
case CompressionType.GZip:
|
||||
{
|
||||
destination = new GZipStream(destination, CompressionMode.Compress, true);
|
||||
}
|
||||
break;
|
||||
case CompressionType.LZip:
|
||||
{
|
||||
destination = new LZipStream(destination, CompressionMode.Compress, true);
|
||||
destination = new GZipStream(destination, CompressionMode.Compress, options.LeaveStreamOpen);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@@ -42,7 +36,7 @@ namespace SharpCompress.Writers.Tar
|
||||
throw new InvalidFormatException("Tar does not support compression: " + options.CompressionType);
|
||||
}
|
||||
}
|
||||
InitalizeStream(destination);
|
||||
InitalizeStream(destination, !options.LeaveStreamOpen);
|
||||
}
|
||||
|
||||
public override void Write(string filename, Stream source, DateTime? modificationTime)
|
||||
@@ -72,12 +66,11 @@ namespace SharpCompress.Writers.Tar
|
||||
|
||||
long realSize = size ?? source.Length;
|
||||
|
||||
TarHeader header = new TarHeader(WriterOptions.ArchiveEncoding);
|
||||
|
||||
header.LastModifiedTime = modificationTime ?? TarHeader.Epoch;
|
||||
TarHeader header = new TarHeader();
|
||||
header.ModTime = modificationTime ?? TarHeader.Epoch;
|
||||
header.Name = NormalizeFilename(filename);
|
||||
header.Size = realSize;
|
||||
header.Write(OutputStream);
|
||||
header.WriteHeader(OutputStream);
|
||||
size = source.TransferTo(OutputStream);
|
||||
PadTo512(size.Value, false);
|
||||
}
|
||||
@@ -99,19 +92,7 @@ namespace SharpCompress.Writers.Tar
|
||||
{
|
||||
PadTo512(0, true);
|
||||
PadTo512(0, true);
|
||||
switch (OutputStream)
|
||||
{
|
||||
case BZip2Stream b:
|
||||
{
|
||||
b.Finish();
|
||||
break;
|
||||
}
|
||||
case LZipStream l:
|
||||
{
|
||||
l.Finish();
|
||||
break;
|
||||
}
|
||||
}
|
||||
(OutputStream as BZip2Stream)?.Finish(); // required when bzip2 compression is used
|
||||
}
|
||||
base.Dispose(isDisposing);
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ namespace SharpCompress.Writers
|
||||
{
|
||||
throw new InvalidFormatException("GZip archives only support GZip compression type.");
|
||||
}
|
||||
return new GZipWriter(stream, new GZipWriterOptions(writerOptions));
|
||||
return new GZipWriter(stream, writerOptions.LeaveStreamOpen);
|
||||
}
|
||||
case ArchiveType.Zip:
|
||||
{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user