mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-04 13:34:59 +00:00
Compare commits
1 Commits
0.20.0
...
native_zli
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c63d7a07fc |
@@ -1,15 +0,0 @@
|
||||
version: 2
|
||||
jobs:
|
||||
build:
|
||||
docker:
|
||||
- image: microsoft/dotnet:2.0.5-sdk-2.1.4
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install unzip
|
||||
command: |
|
||||
apt-get update
|
||||
apt-get install -y unzip
|
||||
- run:
|
||||
name: Build
|
||||
command: ./build.sh
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -2,4 +2,4 @@
|
||||
* text=auto
|
||||
|
||||
# need original files to be windows
|
||||
*.txt text eol=crlf
|
||||
test/TestArchives/Original/*.txt eol=crlf
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -14,4 +14,3 @@ tests/TestArchives/Scratch
|
||||
.vs
|
||||
tools
|
||||
.vscode
|
||||
.idea/
|
||||
|
||||
76
FORMATS.md
76
FORMATS.md
@@ -1,60 +1,36 @@
|
||||
# Formats
|
||||
# Archive Formats
|
||||
|
||||
## Accessing Archives
|
||||
|
||||
* Archive classes allow random access to a seekable stream.
|
||||
* Reader classes allow forward-only reading on a stream.
|
||||
* Writer classes allow forward-only Writing on a stream.
|
||||
Archive classes allow random access to a seekable stream.
|
||||
Reader classes allow forward-only reading
|
||||
Writer classes allow forward-only Writing
|
||||
|
||||
## Supported Format Table
|
||||
|
||||
| Archive Format | Compression Format(s) | Compress/Decompress | Archive API | Reader API | Writer API |
|
||||
| ---------------------- | ------------------------------------------------- | ------------------- | --------------- | ---------- | ------------- |
|
||||
| Rar | Rar | Decompress (1) | RarArchive | RarReader | N/A |
|
||||
| Zip (2) | None, DEFLATE, Deflate64, BZip2, LZMA/LZMA2, PPMd | Both | ZipArchive | ZipReader | ZipWriter |
|
||||
| Tar | None | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.GZip | DEFLATE | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.BZip2 | BZip2 | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.LZip | LZMA | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.XZ | LZMA2 | Decompress | TarArchive | TarReader | TarWriter (3) |
|
||||
| GZip (single file) | DEFLATE | Both | GZipArchive | GZipReader | GZipWriter |
|
||||
| 7Zip (4) | LZMA, LZMA2, BZip2, PPMd, BCJ, BCJ2, Deflate | Decompress | SevenZipArchive | N/A | N/A |
|
||||
| LZip (single file) (5) | LZip (LZMA) | Both | LZipArchive | LZipReader | LZipWriter |
|
||||
| Archive Format | Compression Format(s) | Compress/Decompress | Archive API | Reader API | Writer API |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| Rar | Rar | Decompress (1) | RarArchive | RarReader | N/A |
|
||||
| Zip (2) | None, DEFLATE, BZip2, LZMA/LZMA2, PPMd | Both | ZipArchive | ZipReader | ZipWriter |
|
||||
| Tar | None, BZip2, GZip, LZip | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| GZip (single file) | GZip | Both | GZipArchive | GZipReader | GZipWriter |
|
||||
| 7Zip (4) | LZMA, LZMA2, BZip2, PPMd, BCJ, BCJ2, Deflate | Decompress | SevenZipArchive | N/A | N/A |
|
||||
|
||||
1. SOLID Rars are only supported in the RarReader API.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading/writing is supported but only with seekable streams as the Zip spec doesn't support Zip64 data in post data descriptors. Deflate64 is only supported for reading.
|
||||
3. The Tar format requires a file size in the header. If no size is specified to the TarWriter and the stream is not seekable, then an exception will be thrown.
|
||||
4. The 7Zip format doesn't allow for reading as a forward-only stream so 7Zip is only supported through the Archive API
|
||||
5. LZip has no support for extra data like the file name or timestamp. There is a default filename used when looking at the entry Key on the archive.
|
||||
1. SOLID Rars are only supported in the RarReader API.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading is supported.
|
||||
3. The Tar format requires a file size in the header. If no size is specified to the TarWriter and the stream is not seekable, then an exception will be thrown.
|
||||
4. The 7Zip format doesn't allow for reading as a forward-only stream so 7Zip is only supported through the Archive API
|
||||
|
||||
## Compression Streams
|
||||
## Compressors
|
||||
|
||||
For those who want to directly compress/decompress bits. The single file formats are represented here as well. However, BZip2, LZip and XZ have no metadata (GZip has a little) so using them without something like a Tar file makes little sense.
|
||||
For those who want to directly compress/decompress bits
|
||||
|
||||
| Compressor | Compress/Decompress |
|
||||
| --------------- | ------------------- |
|
||||
| BZip2Stream | Both |
|
||||
| GZipStream | Both |
|
||||
| DeflateStream | Both |
|
||||
| Deflate64Stream | Decompress |
|
||||
| LZMAStream | Both |
|
||||
| PPMdStream | Both |
|
||||
| ADCStream | Decompress |
|
||||
| LZipStream | Both |
|
||||
| XZStream | Decompress |
|
||||
|
||||
## Archive Formats vs Compression
|
||||
|
||||
Sometimes the terminology gets mixed.
|
||||
|
||||
### Compression
|
||||
|
||||
DEFLATE, LZMA are pure compression algorithms
|
||||
|
||||
### Formats
|
||||
|
||||
Formats like Zip, 7Zip, Rar are archive formats only. They use other compression methods (e.g. DEFLATE, LZMA, etc.) or propriatory (e.g RAR)
|
||||
|
||||
### Overlap
|
||||
|
||||
GZip, BZip2 and LZip are single file archival formats. The overlap in the API happens because Tar uses the single file formats as "compression" methods and the API tries to hide this a bit.
|
||||
| Compressor | Compress/Decompress |
|
||||
| --- | --- |
|
||||
| BZip2Stream | Both |
|
||||
| GZipStream | Both |
|
||||
| DeflateStream | Both |
|
||||
| LZMAStream | Both |
|
||||
| PPMdStream | Both |
|
||||
| ADCStream | Decompress |
|
||||
| LZipStream | Decompress |
|
||||
|
||||
62
README.md
62
README.md
@@ -1,36 +1,21 @@
|
||||
# SharpCompress
|
||||
|
||||
SharpCompress is a compression library in pure C# for .NET 3.5, 4.5, .NET Standard 1.0, 1.3 that can unrar, un7zip, unzip, untar unbzip2 and ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip are implemented.
|
||||
SharpCompress is a compression library for .NET/Mono/Silverlight/WP7 that can unrar, un7zip, unzip, untar unbzip2 and ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip are implemented.
|
||||
|
||||
The major feature is support for non-seekable streams so large files can be processed on the fly (i.e. download stream).
|
||||
The major feature is support for non-seekable streams so large files can be processed on the fly (i.e. download stream).
|
||||
|
||||
AppVeyor Build -
|
||||
[](https://ci.appveyor.com/project/adamhathcock/sharpcompress/branch/master)
|
||||
|
||||
Circle CI Build -
|
||||
[](https://circleci.com/gh/adamhathcock/sharpcompress)
|
||||
|
||||
## Need Help?
|
||||
|
||||
Post Issues on Github!
|
||||
|
||||
Check the [Supported Formats](FORMATS.md) and [Basic Usage.](USAGE.md)
|
||||
|
||||
## Recommended Formats
|
||||
|
||||
In general, I recommend GZip (Deflate)/BZip2 (BZip)/LZip (LZMA) as the simplicity of the formats lend to better long term archival as well as the streamability. Tar is often used in conjunction for multiple files in a single archive (e.g. `.tar.gz`)
|
||||
|
||||
Zip is okay, but it's a very hap-hazard format and the variation in headers and implementations makes it hard to get correct. Uses Deflate by default but supports a lot of compression methods.
|
||||
|
||||
RAR is not recommended as it's a propriatory format and the compression is closed source. Use Tar/LZip for LZMA
|
||||
|
||||
7Zip and XZ both are overly complicated. 7Zip does not support streamable formats. XZ has known holes explained here: (http://www.nongnu.org/lzip/xz_inadequate.html) Use Tar/LZip for LZMA compression instead.
|
||||
|
||||
## A Simple Request
|
||||
|
||||
Hi everyone. I hope you're using SharpCompress and finding it useful. Please give me feedback on what you'd like to see changed especially as far as usability goes. New feature suggestions are always welcome as well. I would also like to know what projects SharpCompress is being used in. I like seeing how it is used to give me ideas for future versions. Thanks!
|
||||
|
||||
Please do not email me directly to ask for help. If you think there is a real issue, please report it here.
|
||||
Please do not email me directly to ask for help. If you think there is a real issue, please report it here.
|
||||
|
||||
## Want to contribute?
|
||||
|
||||
@@ -42,43 +27,10 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
|
||||
* 7Zip writing
|
||||
* Zip64 (Need writing and extend Reading)
|
||||
* Multi-volume Zip support.
|
||||
* RAR5 support
|
||||
|
||||
## Version Log
|
||||
|
||||
### Version 0.18
|
||||
|
||||
* [Now on Github releases](https://github.com/adamhathcock/sharpcompress/releases/tag/0.18)
|
||||
|
||||
### Version 0.17.1
|
||||
|
||||
* Fix - [Bug Fix for .NET Core on Windows](https://github.com/adamhathcock/sharpcompress/pull/257)
|
||||
|
||||
### Version 0.17.0
|
||||
|
||||
* New - Full LZip support! Can read and write LZip files and Tars inside LZip files. [Make LZip a first class citizen. #241](https://github.com/adamhathcock/sharpcompress/issues/241)
|
||||
* New - XZ read support! Can read XZ files and Tars inside XZ files. [XZ in SharpCompress #91](https://github.com/adamhathcock/sharpcompress/issues/94)
|
||||
* Fix - [Regression - zip file writing on seekable streams always assumed stream start was 0. Introduced with Zip64 writing.](https://github.com/adamhathcock/sharpcompress/issues/244)
|
||||
* Fix - [Zip files with post-data descriptors can be properly skipped via decompression](https://github.com/adamhathcock/sharpcompress/issues/162)
|
||||
|
||||
### Version 0.16.2
|
||||
|
||||
* Fix [.NET 3.5 should support files and cryptography (was a regression from 0.16.0)](https://github.com/adamhathcock/sharpcompress/pull/251)
|
||||
* Fix [Zip per entry compression customization wrote the wrong method into the zip archive](https://github.com/adamhathcock/sharpcompress/pull/249)
|
||||
|
||||
### Version 0.16.1
|
||||
|
||||
* Fix [Preserve compression method when getting a compressed stream](https://github.com/adamhathcock/sharpcompress/pull/235)
|
||||
* Fix [RAR entry key normalization fix](https://github.com/adamhathcock/sharpcompress/issues/201)
|
||||
|
||||
### Version 0.16.0
|
||||
|
||||
* Breaking - [Progress Event Tracking rethink](https://github.com/adamhathcock/sharpcompress/pull/226)
|
||||
* Update to VS2017 - [VS2017](https://github.com/adamhathcock/sharpcompress/pull/231) - Framework targets have been changed.
|
||||
* New - [Add Zip64 writing](https://github.com/adamhathcock/sharpcompress/pull/211)
|
||||
* [Fix invalid/mismatching Zip version flags.](https://github.com/adamhathcock/sharpcompress/issues/164) - This allows nuget/System.IO.Packaging to read zip files generated by SharpCompress
|
||||
* [Fix 7Zip directory hiding](https://github.com/adamhathcock/sharpcompress/pull/215/files)
|
||||
* [Verify RAR CRC headers](https://github.com/adamhathcock/sharpcompress/pull/220)
|
||||
|
||||
### Version 0.15.2
|
||||
|
||||
* [Fix invalid headers](https://github.com/adamhathcock/sharpcompress/pull/210) - fixes an issue creating large-ish zip archives that was introduced with zip64 reading.
|
||||
@@ -137,7 +89,7 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
|
||||
### Version 0.11.6
|
||||
|
||||
* Bug fix for global header in Tar
|
||||
* Writers now have a leaveOpen `bool` overload. They won't close streams if not-requested to.
|
||||
* Writers now have a leaveOpen `bool` overload. They won't close streams if not-requested to.
|
||||
|
||||
### Version 0.11.5
|
||||
|
||||
@@ -156,7 +108,7 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
|
||||
|
||||
### Version 0.11
|
||||
|
||||
* Been over a year, contains mainly fixes from contributors!
|
||||
* Been over a year, contains mainly fixes from contributors!
|
||||
* Possible breaking change: ArchiveEncoding is UTF8 by default now.
|
||||
* TAR supports writing long names using longlink
|
||||
* RAR Protect Header added
|
||||
@@ -183,8 +135,6 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
|
||||
* Embedded some BouncyCastle crypto classes to allow RAR Decryption and Winzip AES Decryption in Portable and Windows Store DLLs
|
||||
* Built in Release (I think)
|
||||
|
||||
XZ implementation based on: https://github.com/sambott/XZ.NET by @sambott
|
||||
|
||||
7Zip implementation based on: https://code.google.com/p/managed-lzma/
|
||||
|
||||
LICENSE
|
||||
|
||||
@@ -45,14 +45,10 @@
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/INDENT_ANONYMOUS_METHOD_BLOCK/@EntryValue">True</s:Boolean>
|
||||
<s:Int64 x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/KEEP_BLANK_LINES_IN_CODE/@EntryValue">1</s:Int64>
|
||||
<s:Int64 x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/KEEP_BLANK_LINES_IN_DECLARATIONS/@EntryValue">1</s:Int64>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_ACCESSOR_ATTRIBUTE_ON_SAME_LINE_EX/@EntryValue">NEVER</s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_ACCESSORHOLDER_ATTRIBUTE_ON_SAME_LINE_EX/@EntryValue">NEVER</s:String>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_CONSTRUCTOR_INITIALIZER_ON_SAME_LINE/@EntryValue">False</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_FIELD_ATTRIBUTE_ON_SAME_LINE/@EntryValue">False</s:Boolean>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_FIELD_ATTRIBUTE_ON_SAME_LINE_EX/@EntryValue">NEVER</s:String>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_ACCESSORHOLDER_ON_SINGLE_LINE/@EntryValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_ACCESSOR_ATTRIBUTE_ON_SAME_LINE/@EntryValue">False</s:Boolean>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_EMBEDDED_STATEMENT_ON_SAME_LINE/@EntryValue">NEVER</s:String>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_INITIALIZER_ON_SINGLE_LINE/@EntryValue">True</s:Boolean>
|
||||
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_WHILE_ON_NEW_LINE/@EntryValue">True</s:Boolean>
|
||||
@@ -118,11 +114,6 @@
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=NAMESPACE_005FALIAS/@EntryIndexedValue"><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=XAML_005FFIELD/@EntryIndexedValue"><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=XAML_005FRESOURCE/@EntryIndexedValue"><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></s:String>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpAttributeForSingleLineMethodUpgrade/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpKeepExistingMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpPlaceEmbeddedOnSameLineMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpRenamePlacementToArrangementMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EAddAccessorOwnerDeclarationBracesMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002ECSharpPlaceAttributeOnSameLineMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EMigrateBlankLinesAroundFieldToBlankLinesAroundProperty/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EMigrateThisQualifierSettings/@EntryIndexedValue">True</s:Boolean></wpf:ResourceDictionary>
|
||||
|
||||
9
USAGE.md
9
USAGE.md
@@ -1,8 +1,7 @@
|
||||
# SharpCompress Usage
|
||||
|
||||
## Stream Rules
|
||||
|
||||
When dealing with Streams, the rule should be that you don't close a stream you didn't create. This, in effect, should mean you should always put a Stream in a using block to dispose it.
|
||||
When dealing with Streams, the rule should be that you don't close a stream you didn't create. This, in effect, should mean you should always put a Stream in a using block to dispose it.
|
||||
|
||||
However, the .NET Framework often has classes that will dispose streams by default to make things "easy" like the following:
|
||||
|
||||
@@ -13,7 +12,7 @@ using (var reader = new StreamReader(File.Open("foo")))
|
||||
}
|
||||
```
|
||||
|
||||
In this example, reader should get disposed. However, stream rules should say the the `FileStream` created by `File.Open` should remain open. However, the .NET Framework closes it for you by default unless you override the constructor. In general, you should be writing Stream code like this:
|
||||
In this example, reader should get disposed. However, stream rules should say the the `FileStream` created by `File.Open` should remain open. However, the .NET Framework closes it for you by default unless you override the constructor. In general, you should be writing Stream code like this:
|
||||
|
||||
```C#
|
||||
using (var fileStream = File.Open("foo"))
|
||||
@@ -26,7 +25,7 @@ using (var reader = new StreamReader(fileStream))
|
||||
To deal with the "correct" rules as well as the expectations of users, I've decided on this:
|
||||
|
||||
* When writing, leave streams open.
|
||||
* When reading, close streams
|
||||
* When reading, close streams
|
||||
|
||||
To be explicit though, consider always using the overloads that use `ReaderOptions` or `WriterOptions` and explicitly set `LeaveStreamOpen` the way you want.
|
||||
|
||||
@@ -44,9 +43,11 @@ using (var archive = ZipArchive.Create())
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Create Zip Archive from all files in a directory and save in memory
|
||||
|
||||
```C#
|
||||
|
||||
var memoryStream = new MemoryStream();
|
||||
using (var archive = ZipArchive.Create())
|
||||
{
|
||||
|
||||
14
appveyor.yml
14
appveyor.yml
@@ -11,10 +11,18 @@ branches:
|
||||
nuget:
|
||||
disable_publish_on_pr: true
|
||||
|
||||
build_script:
|
||||
- ps: .\build.ps1
|
||||
before_build:
|
||||
- cmd: dotnet restore
|
||||
|
||||
test: off
|
||||
build:
|
||||
parallel: true
|
||||
verbosity: minimal
|
||||
|
||||
after_build:
|
||||
- dotnet pack "src\SharpCompress\SharpCompress.csproj" -c Release
|
||||
|
||||
test_script:
|
||||
- dotnet test --no-build .\tests\SharpCompress.Test\SharpCompress.Test.csproj
|
||||
|
||||
artifacts:
|
||||
- path: src\SharpCompress\bin\Release\*.nupkg
|
||||
89
build.cake
89
build.cake
@@ -1,89 +0,0 @@
|
||||
var target = Argument("target", "Default");
|
||||
var tag = Argument("tag", "cake");
|
||||
|
||||
Task("Restore")
|
||||
.Does(() =>
|
||||
{
|
||||
DotNetCoreRestore(".");
|
||||
});
|
||||
|
||||
Task("Build")
|
||||
.IsDependentOn("Restore")
|
||||
.Does(() =>
|
||||
{
|
||||
if (IsRunningOnWindows())
|
||||
{
|
||||
MSBuild("./sharpcompress.sln", c =>
|
||||
{
|
||||
c.SetConfiguration("Release")
|
||||
.SetVerbosity(Verbosity.Minimal)
|
||||
.UseToolVersion(MSBuildToolVersion.VS2017);
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
var settings = new DotNetCoreBuildSettings
|
||||
{
|
||||
Framework = "netstandard1.0",
|
||||
Configuration = "Release",
|
||||
NoRestore = true
|
||||
};
|
||||
|
||||
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
|
||||
|
||||
settings.Framework = "netstandard1.3";
|
||||
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
|
||||
|
||||
settings.Framework = "netstandard2.0";
|
||||
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
|
||||
}
|
||||
});
|
||||
|
||||
Task("Test")
|
||||
.IsDependentOn("Build")
|
||||
.Does(() =>
|
||||
{
|
||||
var files = GetFiles("tests/**/*.csproj");
|
||||
foreach(var file in files)
|
||||
{
|
||||
var settings = new DotNetCoreTestSettings
|
||||
{
|
||||
Configuration = "Release",
|
||||
Framework = "netcoreapp2.0"
|
||||
};
|
||||
DotNetCoreTest(file.ToString(), settings);
|
||||
}
|
||||
});
|
||||
|
||||
Task("Pack")
|
||||
.IsDependentOn("Build")
|
||||
.Does(() =>
|
||||
{
|
||||
if (IsRunningOnWindows())
|
||||
{
|
||||
MSBuild("src/SharpCompress/SharpCompress.csproj", c => c
|
||||
.SetConfiguration("Release")
|
||||
.SetVerbosity(Verbosity.Minimal)
|
||||
.UseToolVersion(MSBuildToolVersion.VS2017)
|
||||
.WithProperty("NoBuild", "true")
|
||||
.WithTarget("Pack"));
|
||||
}
|
||||
else
|
||||
{
|
||||
Information("Skipping Pack as this is not Windows");
|
||||
}
|
||||
});
|
||||
|
||||
Task("Default")
|
||||
.IsDependentOn("Restore")
|
||||
.IsDependentOn("Build")
|
||||
.IsDependentOn("Test")
|
||||
.IsDependentOn("Pack");
|
||||
|
||||
Task("RunTests")
|
||||
.IsDependentOn("Restore")
|
||||
.IsDependentOn("Build")
|
||||
.IsDependentOn("Test");
|
||||
|
||||
|
||||
RunTarget(target);
|
||||
228
build.ps1
228
build.ps1
@@ -1,228 +0,0 @@
|
||||
##########################################################################
|
||||
# This is the Cake bootstrapper script for PowerShell.
|
||||
# This file was downloaded from https://github.com/cake-build/resources
|
||||
# Feel free to change this file to fit your needs.
|
||||
##########################################################################
|
||||
|
||||
<#
|
||||
|
||||
.SYNOPSIS
|
||||
This is a Powershell script to bootstrap a Cake build.
|
||||
|
||||
.DESCRIPTION
|
||||
This Powershell script will download NuGet if missing, restore NuGet tools (including Cake)
|
||||
and execute your Cake build script with the parameters you provide.
|
||||
|
||||
.PARAMETER Script
|
||||
The build script to execute.
|
||||
.PARAMETER Target
|
||||
The build script target to run.
|
||||
.PARAMETER Configuration
|
||||
The build configuration to use.
|
||||
.PARAMETER Verbosity
|
||||
Specifies the amount of information to be displayed.
|
||||
.PARAMETER Experimental
|
||||
Tells Cake to use the latest Roslyn release.
|
||||
.PARAMETER WhatIf
|
||||
Performs a dry run of the build script.
|
||||
No tasks will be executed.
|
||||
.PARAMETER Mono
|
||||
Tells Cake to use the Mono scripting engine.
|
||||
.PARAMETER SkipToolPackageRestore
|
||||
Skips restoring of packages.
|
||||
.PARAMETER ScriptArgs
|
||||
Remaining arguments are added here.
|
||||
|
||||
.LINK
|
||||
http://cakebuild.net
|
||||
|
||||
#>
|
||||
|
||||
[CmdletBinding()]
|
||||
Param(
|
||||
[string]$Script = "build.cake",
|
||||
[string]$Target = "Default",
|
||||
[ValidateSet("Release", "Debug")]
|
||||
[string]$Configuration = "Release",
|
||||
[ValidateSet("Quiet", "Minimal", "Normal", "Verbose", "Diagnostic")]
|
||||
[string]$Verbosity = "Verbose",
|
||||
[switch]$Experimental,
|
||||
[Alias("DryRun","Noop")]
|
||||
[switch]$WhatIf,
|
||||
[switch]$Mono,
|
||||
[switch]$SkipToolPackageRestore,
|
||||
[Parameter(Position=0,Mandatory=$false,ValueFromRemainingArguments=$true)]
|
||||
[string[]]$ScriptArgs
|
||||
)
|
||||
|
||||
[Reflection.Assembly]::LoadWithPartialName("System.Security") | Out-Null
|
||||
function MD5HashFile([string] $filePath)
|
||||
{
|
||||
if ([string]::IsNullOrEmpty($filePath) -or !(Test-Path $filePath -PathType Leaf))
|
||||
{
|
||||
return $null
|
||||
}
|
||||
|
||||
[System.IO.Stream] $file = $null;
|
||||
[System.Security.Cryptography.MD5] $md5 = $null;
|
||||
try
|
||||
{
|
||||
$md5 = [System.Security.Cryptography.MD5]::Create()
|
||||
$file = [System.IO.File]::OpenRead($filePath)
|
||||
return [System.BitConverter]::ToString($md5.ComputeHash($file))
|
||||
}
|
||||
finally
|
||||
{
|
||||
if ($file -ne $null)
|
||||
{
|
||||
$file.Dispose()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Write-Host "Preparing to run build script..."
|
||||
|
||||
if(!$PSScriptRoot){
|
||||
$PSScriptRoot = Split-Path $MyInvocation.MyCommand.Path -Parent
|
||||
}
|
||||
|
||||
$TOOLS_DIR = Join-Path $PSScriptRoot "tools"
|
||||
$ADDINS_DIR = Join-Path $TOOLS_DIR "addins"
|
||||
$MODULES_DIR = Join-Path $TOOLS_DIR "modules"
|
||||
$NUGET_EXE = Join-Path $TOOLS_DIR "nuget.exe"
|
||||
$CAKE_EXE = Join-Path $TOOLS_DIR "Cake/Cake.exe"
|
||||
$NUGET_URL = "https://dist.nuget.org/win-x86-commandline/latest/nuget.exe"
|
||||
$PACKAGES_CONFIG = Join-Path $TOOLS_DIR "packages.config"
|
||||
$PACKAGES_CONFIG_MD5 = Join-Path $TOOLS_DIR "packages.config.md5sum"
|
||||
$ADDINS_PACKAGES_CONFIG = Join-Path $ADDINS_DIR "packages.config"
|
||||
$MODULES_PACKAGES_CONFIG = Join-Path $MODULES_DIR "packages.config"
|
||||
|
||||
# Should we use mono?
|
||||
$UseMono = "";
|
||||
if($Mono.IsPresent) {
|
||||
Write-Verbose -Message "Using the Mono based scripting engine."
|
||||
$UseMono = "-mono"
|
||||
}
|
||||
|
||||
# Should we use the new Roslyn?
|
||||
$UseExperimental = "";
|
||||
if($Experimental.IsPresent -and !($Mono.IsPresent)) {
|
||||
Write-Verbose -Message "Using experimental version of Roslyn."
|
||||
$UseExperimental = "-experimental"
|
||||
}
|
||||
|
||||
# Is this a dry run?
|
||||
$UseDryRun = "";
|
||||
if($WhatIf.IsPresent) {
|
||||
$UseDryRun = "-dryrun"
|
||||
}
|
||||
|
||||
# Make sure tools folder exists
|
||||
if ((Test-Path $PSScriptRoot) -and !(Test-Path $TOOLS_DIR)) {
|
||||
Write-Verbose -Message "Creating tools directory..."
|
||||
New-Item -Path $TOOLS_DIR -Type directory | out-null
|
||||
}
|
||||
|
||||
# Make sure that packages.config exist.
|
||||
if (!(Test-Path $PACKAGES_CONFIG)) {
|
||||
Write-Verbose -Message "Downloading packages.config..."
|
||||
try { (New-Object System.Net.WebClient).DownloadFile("http://cakebuild.net/download/bootstrapper/packages", $PACKAGES_CONFIG) } catch {
|
||||
Throw "Could not download packages.config."
|
||||
}
|
||||
}
|
||||
|
||||
# Try find NuGet.exe in path if not exists
|
||||
if (!(Test-Path $NUGET_EXE)) {
|
||||
Write-Verbose -Message "Trying to find nuget.exe in PATH..."
|
||||
$existingPaths = $Env:Path -Split ';' | Where-Object { (![string]::IsNullOrEmpty($_)) -and (Test-Path $_ -PathType Container) }
|
||||
$NUGET_EXE_IN_PATH = Get-ChildItem -Path $existingPaths -Filter "nuget.exe" | Select -First 1
|
||||
if ($NUGET_EXE_IN_PATH -ne $null -and (Test-Path $NUGET_EXE_IN_PATH.FullName)) {
|
||||
Write-Verbose -Message "Found in PATH at $($NUGET_EXE_IN_PATH.FullName)."
|
||||
$NUGET_EXE = $NUGET_EXE_IN_PATH.FullName
|
||||
}
|
||||
}
|
||||
|
||||
# Try download NuGet.exe if not exists
|
||||
if (!(Test-Path $NUGET_EXE)) {
|
||||
Write-Verbose -Message "Downloading NuGet.exe..."
|
||||
try {
|
||||
(New-Object System.Net.WebClient).DownloadFile($NUGET_URL, $NUGET_EXE)
|
||||
} catch {
|
||||
Throw "Could not download NuGet.exe."
|
||||
}
|
||||
}
|
||||
|
||||
# Save nuget.exe path to environment to be available to child processed
|
||||
$ENV:NUGET_EXE = $NUGET_EXE
|
||||
|
||||
# Restore tools from NuGet?
|
||||
if(-Not $SkipToolPackageRestore.IsPresent) {
|
||||
Push-Location
|
||||
Set-Location $TOOLS_DIR
|
||||
|
||||
# Check for changes in packages.config and remove installed tools if true.
|
||||
[string] $md5Hash = MD5HashFile($PACKAGES_CONFIG)
|
||||
if((!(Test-Path $PACKAGES_CONFIG_MD5)) -Or
|
||||
($md5Hash -ne (Get-Content $PACKAGES_CONFIG_MD5 ))) {
|
||||
Write-Verbose -Message "Missing or changed package.config hash..."
|
||||
Remove-Item * -Recurse -Exclude packages.config,nuget.exe
|
||||
}
|
||||
|
||||
Write-Verbose -Message "Restoring tools from NuGet..."
|
||||
$NuGetOutput = Invoke-Expression "&`"$NUGET_EXE`" install -ExcludeVersion -OutputDirectory `"$TOOLS_DIR`""
|
||||
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Throw "An error occured while restoring NuGet tools."
|
||||
}
|
||||
else
|
||||
{
|
||||
$md5Hash | Out-File $PACKAGES_CONFIG_MD5 -Encoding "ASCII"
|
||||
}
|
||||
Write-Verbose -Message ($NuGetOutput | out-string)
|
||||
|
||||
Pop-Location
|
||||
}
|
||||
|
||||
# Restore addins from NuGet
|
||||
if (Test-Path $ADDINS_PACKAGES_CONFIG) {
|
||||
Push-Location
|
||||
Set-Location $ADDINS_DIR
|
||||
|
||||
Write-Verbose -Message "Restoring addins from NuGet..."
|
||||
$NuGetOutput = Invoke-Expression "&`"$NUGET_EXE`" install -ExcludeVersion -OutputDirectory `"$ADDINS_DIR`""
|
||||
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Throw "An error occured while restoring NuGet addins."
|
||||
}
|
||||
|
||||
Write-Verbose -Message ($NuGetOutput | out-string)
|
||||
|
||||
Pop-Location
|
||||
}
|
||||
|
||||
# Restore modules from NuGet
|
||||
if (Test-Path $MODULES_PACKAGES_CONFIG) {
|
||||
Push-Location
|
||||
Set-Location $MODULES_DIR
|
||||
|
||||
Write-Verbose -Message "Restoring modules from NuGet..."
|
||||
$NuGetOutput = Invoke-Expression "&`"$NUGET_EXE`" install -ExcludeVersion -OutputDirectory `"$MODULES_DIR`""
|
||||
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Throw "An error occured while restoring NuGet modules."
|
||||
}
|
||||
|
||||
Write-Verbose -Message ($NuGetOutput | out-string)
|
||||
|
||||
Pop-Location
|
||||
}
|
||||
|
||||
# Make sure that Cake has been installed.
|
||||
if (!(Test-Path $CAKE_EXE)) {
|
||||
Throw "Could not find Cake.exe at $CAKE_EXE"
|
||||
}
|
||||
|
||||
# Start Cake
|
||||
Write-Host "Running build script..."
|
||||
Invoke-Expression "& `"$CAKE_EXE`" `"$Script`" -target=`"$Target`" -configuration=`"$Configuration`" -verbosity=`"$Verbosity`" $UseMono $UseDryRun $UseExperimental $ScriptArgs"
|
||||
exit $LASTEXITCODE
|
||||
42
build.sh
42
build.sh
@@ -1,42 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
##########################################################################
|
||||
# This is the Cake bootstrapper script for Linux and OS X.
|
||||
# This file was downloaded from https://github.com/cake-build/resources
|
||||
# Feel free to change this file to fit your needs.
|
||||
##########################################################################
|
||||
|
||||
# Define directories.
|
||||
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
TOOLS_DIR=$SCRIPT_DIR/tools
|
||||
CAKE_VERSION=0.26.0
|
||||
CAKE_DLL=$TOOLS_DIR/Cake.CoreCLR.$CAKE_VERSION/Cake.dll
|
||||
|
||||
# Make sure the tools folder exist.
|
||||
if [ ! -d "$TOOLS_DIR" ]; then
|
||||
mkdir "$TOOLS_DIR"
|
||||
fi
|
||||
|
||||
###########################################################################
|
||||
# INSTALL CAKE
|
||||
###########################################################################
|
||||
|
||||
if [ ! -f "$CAKE_DLL" ]; then
|
||||
curl -Lsfo Cake.CoreCLR.zip "https://www.nuget.org/api/v2/package/Cake.CoreCLR/$CAKE_VERSION" && unzip -q Cake.CoreCLR.zip -d "$TOOLS_DIR/Cake.CoreCLR.$CAKE_VERSION" && rm -f Cake.CoreCLR.zip
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "An error occured while installing Cake."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Make sure that Cake has been installed.
|
||||
if [ ! -f "$CAKE_DLL" ]; then
|
||||
echo "Could not find Cake.exe at '$CAKE_DLL'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
###########################################################################
|
||||
# RUN BUILD SCRIPT
|
||||
###########################################################################
|
||||
|
||||
# Start Cake
|
||||
exec dotnet "$CAKE_DLL" "$@"
|
||||
@@ -6,7 +6,6 @@ using SharpCompress.Archives.SevenZip;
|
||||
using SharpCompress.Archives.Tar;
|
||||
using SharpCompress.Archives.Zip;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Archives
|
||||
@@ -56,7 +55,7 @@ namespace SharpCompress.Archives
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
return TarArchive.Open(stream, readerOptions);
|
||||
}
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, LZip");
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip");
|
||||
}
|
||||
|
||||
public static IWritableArchive Create(ArchiveType type)
|
||||
|
||||
@@ -14,7 +14,6 @@ namespace SharpCompress.Archives.GZip
|
||||
public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
{
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -37,7 +36,6 @@ namespace SharpCompress.Archives.GZip
|
||||
return new GZipArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -56,11 +54,11 @@ namespace SharpCompress.Archives.GZip
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="options"></param>
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="options"></param>
|
||||
internal GZipArchive(FileInfo fileInfo, ReaderOptions options)
|
||||
: base(ArchiveType.GZip, fileInfo, options)
|
||||
{
|
||||
@@ -106,9 +104,15 @@ namespace SharpCompress.Archives.GZip
|
||||
{
|
||||
// read the header on the first read
|
||||
byte[] header = new byte[10];
|
||||
int n = stream.Read(header, 0, header.Length);
|
||||
|
||||
// workitem 8501: handle edge case (decompress empty stream)
|
||||
if (!stream.ReadFully(header))
|
||||
if (n == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (n != 10)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@@ -154,7 +158,7 @@ namespace SharpCompress.Archives.GZip
|
||||
{
|
||||
throw new InvalidOperationException("Only one entry is allowed in a GZip Archive");
|
||||
}
|
||||
using (var writer = new GZipWriter(stream, new GZipWriterOptions(options)))
|
||||
using (var writer = new GZipWriter(stream))
|
||||
{
|
||||
foreach (var entry in oldEntries.Concat(newEntries)
|
||||
.Where(x => !x.IsDirectory))
|
||||
@@ -175,7 +179,7 @@ namespace SharpCompress.Archives.GZip
|
||||
protected override IEnumerable<GZipArchiveEntry> LoadEntries(IEnumerable<GZipVolume> volumes)
|
||||
{
|
||||
Stream stream = volumes.Single().Stream;
|
||||
yield return new GZipArchiveEntry(this, new GZipFilePart(stream, ReaderOptions.ArchiveEncoding));
|
||||
yield return new GZipArchiveEntry(this, new GZipFilePart(stream));
|
||||
}
|
||||
|
||||
protected override IReader CreateReaderForSolidExtraction()
|
||||
|
||||
@@ -4,7 +4,6 @@ using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.SevenZip;
|
||||
using SharpCompress.Compressors.LZMA.Utilites;
|
||||
using SharpCompress.IO;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
@@ -107,7 +106,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
for (int i = 0; i < database.Files.Count; i++)
|
||||
{
|
||||
var file = database.Files[i];
|
||||
yield return new SevenZipArchiveEntry(this, new SevenZipFilePart(stream, database, i, file, ReaderOptions.ArchiveEncoding));
|
||||
yield return new SevenZipArchiveEntry(this, new SevenZipFilePart(stream, database, i, file));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,7 +117,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
stream.Position = 0;
|
||||
var reader = new ArchiveReader();
|
||||
reader.Open(stream);
|
||||
database = reader.ReadDatabase(new PasswordProvider(ReaderOptions.Password));
|
||||
database = reader.ReadDatabase(null);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,7 +144,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
|
||||
protected override IReader CreateReaderForSolidExtraction()
|
||||
{
|
||||
return new SevenZipReader(ReaderOptions, this);
|
||||
return new SevenZipReader(this);
|
||||
}
|
||||
|
||||
public override bool IsSolid { get { return Entries.Where(x => !x.IsDirectory).GroupBy(x => x.FilePart.Folder).Count() > 1; } }
|
||||
@@ -166,8 +165,8 @@ namespace SharpCompress.Archives.SevenZip
|
||||
private Stream currentStream;
|
||||
private CFileItem currentItem;
|
||||
|
||||
internal SevenZipReader(ReaderOptions readerOptions, SevenZipArchive archive)
|
||||
: base(readerOptions, ArchiveType.SevenZip)
|
||||
internal SevenZipReader(SevenZipArchive archive)
|
||||
: base(new ReaderOptions(), ArchiveType.SevenZip)
|
||||
{
|
||||
this.archive = archive;
|
||||
}
|
||||
@@ -191,7 +190,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
}
|
||||
else
|
||||
{
|
||||
currentStream = archive.database.GetFolderStream(stream, currentFolder, new PasswordProvider(Options.Password));
|
||||
currentStream = archive.database.GetFolderStream(stream, currentFolder, null);
|
||||
}
|
||||
foreach (var entry in group)
|
||||
{
|
||||
@@ -206,21 +205,5 @@ namespace SharpCompress.Archives.SevenZip
|
||||
return CreateEntryStream(new ReadOnlySubStream(currentStream, currentItem.Size));
|
||||
}
|
||||
}
|
||||
|
||||
private class PasswordProvider : IPasswordProvider
|
||||
{
|
||||
private readonly string _password;
|
||||
|
||||
public PasswordProvider(string password)
|
||||
{
|
||||
_password = password;
|
||||
|
||||
}
|
||||
|
||||
public string CryptoGetTextPassword()
|
||||
{
|
||||
return _password;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ namespace SharpCompress.Archives.Tar
|
||||
public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
{
|
||||
#if !NO_FILE
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -39,7 +39,7 @@ namespace SharpCompress.Archives.Tar
|
||||
return new TarArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -52,7 +52,6 @@ namespace SharpCompress.Archives.Tar
|
||||
}
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
public static bool IsTarFile(string filePath)
|
||||
{
|
||||
return IsTarFile(new FileInfo(filePath));
|
||||
@@ -75,7 +74,7 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
try
|
||||
{
|
||||
TarHeader tar = new TarHeader(new ArchiveEncoding());
|
||||
TarHeader tar = new TarHeader();
|
||||
tar.Read(new BinaryReader(stream));
|
||||
return tar.Name.Length > 0 && Enum.IsDefined(typeof(EntryType), tar.EntryType);
|
||||
}
|
||||
@@ -99,6 +98,7 @@ namespace SharpCompress.Archives.Tar
|
||||
|
||||
protected override IEnumerable<TarVolume> LoadVolumes(FileInfo file)
|
||||
{
|
||||
|
||||
return new TarVolume(file.OpenRead(), ReaderOptions).AsEnumerable();
|
||||
}
|
||||
#endif
|
||||
@@ -127,7 +127,7 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
Stream stream = volumes.Single().Stream;
|
||||
TarHeader previousHeader = null;
|
||||
foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream, ReaderOptions.ArchiveEncoding))
|
||||
foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream))
|
||||
{
|
||||
if (header != null)
|
||||
{
|
||||
@@ -152,7 +152,7 @@ namespace SharpCompress.Archives.Tar
|
||||
memoryStream.Position = 0;
|
||||
var bytes = memoryStream.ToArray();
|
||||
|
||||
header.Name = ReaderOptions.ArchiveEncoding.Decode(bytes).TrimNulls();
|
||||
header.Name = ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length).TrimNulls();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ namespace SharpCompress.Archives.Tar
|
||||
IEnumerable<TarArchiveEntry> oldEntries,
|
||||
IEnumerable<TarArchiveEntry> newEntries)
|
||||
{
|
||||
using (var writer = new TarWriter(stream, new TarWriterOptions(options)))
|
||||
using (var writer = new TarWriter(stream, options))
|
||||
{
|
||||
foreach (var entry in oldEntries.Concat(newEntries)
|
||||
.Where(x => !x.IsDirectory))
|
||||
|
||||
@@ -24,7 +24,6 @@ namespace SharpCompress.Archives.Zip
|
||||
public CompressionLevel DeflateCompressionLevel { get; set; }
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -47,7 +46,6 @@ namespace SharpCompress.Archives.Zip
|
||||
return new ZipArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -60,7 +58,6 @@ namespace SharpCompress.Archives.Zip
|
||||
}
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
public static bool IsZipFile(string filePath, string password = null)
|
||||
{
|
||||
return IsZipFile(new FileInfo(filePath), password);
|
||||
@@ -81,7 +78,7 @@ namespace SharpCompress.Archives.Zip
|
||||
|
||||
public static bool IsZipFile(Stream stream, string password = null)
|
||||
{
|
||||
StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password, new ArchiveEncoding());
|
||||
StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password);
|
||||
try
|
||||
{
|
||||
ZipHeader header =
|
||||
@@ -112,7 +109,7 @@ namespace SharpCompress.Archives.Zip
|
||||
internal ZipArchive(FileInfo fileInfo, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Zip, fileInfo, readerOptions)
|
||||
{
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding);
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password);
|
||||
}
|
||||
|
||||
protected override IEnumerable<ZipVolume> LoadVolumes(FileInfo file)
|
||||
@@ -134,7 +131,7 @@ namespace SharpCompress.Archives.Zip
|
||||
internal ZipArchive(Stream stream, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Zip, stream, readerOptions)
|
||||
{
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding);
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password);
|
||||
}
|
||||
|
||||
protected override IEnumerable<ZipVolume> LoadVolumes(IEnumerable<Stream> streams)
|
||||
@@ -153,19 +150,19 @@ namespace SharpCompress.Archives.Zip
|
||||
switch (h.ZipHeaderType)
|
||||
{
|
||||
case ZipHeaderType.DirectoryEntry:
|
||||
{
|
||||
yield return new ZipArchiveEntry(this,
|
||||
new SeekableZipFilePart(headerFactory,
|
||||
h as DirectoryEntryHeader,
|
||||
stream));
|
||||
}
|
||||
{
|
||||
yield return new ZipArchiveEntry(this,
|
||||
new SeekableZipFilePart(headerFactory,
|
||||
h as DirectoryEntryHeader,
|
||||
stream));
|
||||
}
|
||||
break;
|
||||
case ZipHeaderType.DirectoryEnd:
|
||||
{
|
||||
byte[] bytes = (h as DirectoryEndHeader).Comment;
|
||||
volume.Comment = ReaderOptions.ArchiveEncoding.Decode(bytes);
|
||||
yield break;
|
||||
}
|
||||
{
|
||||
byte[] bytes = (h as DirectoryEndHeader).Comment;
|
||||
volume.Comment = ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length);
|
||||
yield break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -208,7 +205,7 @@ namespace SharpCompress.Archives.Zip
|
||||
{
|
||||
var stream = Volumes.Single().Stream;
|
||||
stream.Position = 0;
|
||||
return ZipReader.Open(stream, ReaderOptions);
|
||||
return ZipReader.Open(stream);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
#if NETCORE
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
/// <summary>
|
||||
/// Provides a resource pool that enables reusing instances of type <see cref="T:T[]"/>.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// <para>
|
||||
/// Renting and returning buffers with an <see cref="ArrayPool{T}"/> can increase performance
|
||||
/// in situations where arrays are created and destroyed frequently, resulting in significant
|
||||
/// memory pressure on the garbage collector.
|
||||
/// </para>
|
||||
/// <para>
|
||||
/// This class is thread-safe. All members may be used by multiple threads concurrently.
|
||||
/// </para>
|
||||
/// </remarks>
|
||||
internal abstract class ArrayPool<T>
|
||||
{
|
||||
/// <summary>The lazily-initialized shared pool instance.</summary>
|
||||
private static ArrayPool<T> s_sharedInstance = null;
|
||||
|
||||
/// <summary>
|
||||
/// Retrieves a shared <see cref="ArrayPool{T}"/> instance.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The shared pool provides a default implementation of <see cref="ArrayPool{T}"/>
|
||||
/// that's intended for general applicability. It maintains arrays of multiple sizes, and
|
||||
/// may hand back a larger array than was actually requested, but will never hand back a smaller
|
||||
/// array than was requested. Renting a buffer from it with <see cref="Rent"/> will result in an
|
||||
/// existing buffer being taken from the pool if an appropriate buffer is available or in a new
|
||||
/// buffer being allocated if one is not available.
|
||||
/// </remarks>
|
||||
public static ArrayPool<T> Shared
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
get { return Volatile.Read(ref s_sharedInstance) ?? EnsureSharedCreated(); }
|
||||
}
|
||||
|
||||
/// <summary>Ensures that <see cref="s_sharedInstance"/> has been initialized to a pool and returns it.</summary>
|
||||
[MethodImpl(MethodImplOptions.NoInlining)]
|
||||
private static ArrayPool<T> EnsureSharedCreated()
|
||||
{
|
||||
Interlocked.CompareExchange(ref s_sharedInstance, Create(), null);
|
||||
return s_sharedInstance;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new <see cref="ArrayPool{T}"/> instance using default configuration options.
|
||||
/// </summary>
|
||||
/// <returns>A new <see cref="ArrayPool{T}"/> instance.</returns>
|
||||
public static ArrayPool<T> Create()
|
||||
{
|
||||
return new DefaultArrayPool<T>();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new <see cref="ArrayPool{T}"/> instance using custom configuration options.
|
||||
/// </summary>
|
||||
/// <param name="maxArrayLength">The maximum length of array instances that may be stored in the pool.</param>
|
||||
/// <param name="maxArraysPerBucket">
|
||||
/// The maximum number of array instances that may be stored in each bucket in the pool. The pool
|
||||
/// groups arrays of similar lengths into buckets for faster access.
|
||||
/// </param>
|
||||
/// <returns>A new <see cref="ArrayPool{T}"/> instance with the specified configuration options.</returns>
|
||||
/// <remarks>
|
||||
/// The created pool will group arrays into buckets, with no more than <paramref name="maxArraysPerBucket"/>
|
||||
/// in each bucket and with those arrays not exceeding <paramref name="maxArrayLength"/> in length.
|
||||
/// </remarks>
|
||||
public static ArrayPool<T> Create(int maxArrayLength, int maxArraysPerBucket)
|
||||
{
|
||||
return new DefaultArrayPool<T>(maxArrayLength, maxArraysPerBucket);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Retrieves a buffer that is at least the requested length.
|
||||
/// </summary>
|
||||
/// <param name="minimumLength">The minimum length of the array needed.</param>
|
||||
/// <returns>
|
||||
/// An <see cref="T:T[]"/> that is at least <paramref name="minimumLength"/> in length.
|
||||
/// </returns>
|
||||
/// <remarks>
|
||||
/// This buffer is loaned to the caller and should be returned to the same pool via
|
||||
/// <see cref="Return"/> so that it may be reused in subsequent usage of <see cref="Rent"/>.
|
||||
/// It is not a fatal error to not return a rented buffer, but failure to do so may lead to
|
||||
/// decreased application performance, as the pool may need to create a new buffer to replace
|
||||
/// the one lost.
|
||||
/// </remarks>
|
||||
public abstract T[] Rent(int minimumLength);
|
||||
|
||||
/// <summary>
|
||||
/// Returns to the pool an array that was previously obtained via <see cref="Rent"/> on the same
|
||||
/// <see cref="ArrayPool{T}"/> instance.
|
||||
/// </summary>
|
||||
/// <param name="array">
|
||||
/// The buffer previously obtained from <see cref="Rent"/> to return to the pool.
|
||||
/// </param>
|
||||
/// <param name="clearArray">
|
||||
/// If <c>true</c> and if the pool will store the buffer to enable subsequent reuse, <see cref="Return"/>
|
||||
/// will clear <paramref name="array"/> of its contents so that a subsequent consumer via <see cref="Rent"/>
|
||||
/// will not see the previous consumer's content. If <c>false</c> or if the pool will release the buffer,
|
||||
/// the array's contents are left unchanged.
|
||||
/// </param>
|
||||
/// <remarks>
|
||||
/// Once a buffer has been returned to the pool, the caller gives up all ownership of the buffer
|
||||
/// and must not use it. The reference returned from a given call to <see cref="Rent"/> must only be
|
||||
/// returned via <see cref="Return"/> once. The default <see cref="ArrayPool{T}"/>
|
||||
/// may hold onto the returned buffer in order to rent it again, or it may release the returned buffer
|
||||
/// if it's determined that the pool already has enough buffers stored.
|
||||
/// </remarks>
|
||||
public abstract void Return(T[] array, bool clearArray = false);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,144 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
#if NETCORE
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
internal sealed partial class DefaultArrayPool<T> : ArrayPool<T>
|
||||
{
|
||||
/// <summary>The default maximum length of each array in the pool (2^20).</summary>
|
||||
private const int DefaultMaxArrayLength = 1024 * 1024;
|
||||
/// <summary>The default maximum number of arrays per bucket that are available for rent.</summary>
|
||||
private const int DefaultMaxNumberOfArraysPerBucket = 50;
|
||||
/// <summary>Lazily-allocated empty array used when arrays of length 0 are requested.</summary>
|
||||
private static T[] s_emptyArray; // we support contracts earlier than those with Array.Empty<T>()
|
||||
|
||||
private readonly Bucket[] _buckets;
|
||||
|
||||
internal DefaultArrayPool() : this(DefaultMaxArrayLength, DefaultMaxNumberOfArraysPerBucket)
|
||||
{
|
||||
}
|
||||
|
||||
internal DefaultArrayPool(int maxArrayLength, int maxArraysPerBucket)
|
||||
{
|
||||
if (maxArrayLength <= 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(maxArrayLength));
|
||||
}
|
||||
if (maxArraysPerBucket <= 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(maxArraysPerBucket));
|
||||
}
|
||||
|
||||
// Our bucketing algorithm has a min length of 2^4 and a max length of 2^30.
|
||||
// Constrain the actual max used to those values.
|
||||
const int MinimumArrayLength = 0x10, MaximumArrayLength = 0x40000000;
|
||||
if (maxArrayLength > MaximumArrayLength)
|
||||
{
|
||||
maxArrayLength = MaximumArrayLength;
|
||||
}
|
||||
else if (maxArrayLength < MinimumArrayLength)
|
||||
{
|
||||
maxArrayLength = MinimumArrayLength;
|
||||
}
|
||||
|
||||
// Create the buckets.
|
||||
int poolId = Id;
|
||||
int maxBuckets = Utilities.SelectBucketIndex(maxArrayLength);
|
||||
var buckets = new Bucket[maxBuckets + 1];
|
||||
for (int i = 0; i < buckets.Length; i++)
|
||||
{
|
||||
buckets[i] = new Bucket(Utilities.GetMaxSizeForBucket(i), maxArraysPerBucket, poolId);
|
||||
}
|
||||
_buckets = buckets;
|
||||
}
|
||||
|
||||
/// <summary>Gets an ID for the pool to use with events.</summary>
|
||||
private int Id => GetHashCode();
|
||||
|
||||
public override T[] Rent(int minimumLength)
|
||||
{
|
||||
// Arrays can't be smaller than zero. We allow requesting zero-length arrays (even though
|
||||
// pooling such an array isn't valuable) as it's a valid length array, and we want the pool
|
||||
// to be usable in general instead of using `new`, even for computed lengths.
|
||||
if (minimumLength < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(minimumLength));
|
||||
}
|
||||
else if (minimumLength == 0)
|
||||
{
|
||||
// No need for events with the empty array. Our pool is effectively infinite
|
||||
// and we'll never allocate for rents and never store for returns.
|
||||
return s_emptyArray ?? (s_emptyArray = new T[0]);
|
||||
}
|
||||
|
||||
T[] buffer = null;
|
||||
|
||||
int index = Utilities.SelectBucketIndex(minimumLength);
|
||||
if (index < _buckets.Length)
|
||||
{
|
||||
// Search for an array starting at the 'index' bucket. If the bucket is empty, bump up to the
|
||||
// next higher bucket and try that one, but only try at most a few buckets.
|
||||
const int MaxBucketsToTry = 2;
|
||||
int i = index;
|
||||
do
|
||||
{
|
||||
// Attempt to rent from the bucket. If we get a buffer from it, return it.
|
||||
buffer = _buckets[i].Rent();
|
||||
if (buffer != null)
|
||||
{
|
||||
return buffer;
|
||||
}
|
||||
}
|
||||
while (++i < _buckets.Length && i != index + MaxBucketsToTry);
|
||||
|
||||
// The pool was exhausted for this buffer size. Allocate a new buffer with a size corresponding
|
||||
// to the appropriate bucket.
|
||||
buffer = new T[_buckets[index]._bufferLength];
|
||||
}
|
||||
else
|
||||
{
|
||||
// The request was for a size too large for the pool. Allocate an array of exactly the requested length.
|
||||
// When it's returned to the pool, we'll simply throw it away.
|
||||
buffer = new T[minimumLength];
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public override void Return(T[] array, bool clearArray = false)
|
||||
{
|
||||
if (array == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(array));
|
||||
}
|
||||
else if (array.Length == 0)
|
||||
{
|
||||
// Ignore empty arrays. When a zero-length array is rented, we return a singleton
|
||||
// rather than actually taking a buffer out of the lowest bucket.
|
||||
return;
|
||||
}
|
||||
|
||||
// Determine with what bucket this array length is associated
|
||||
int bucket = Utilities.SelectBucketIndex(array.Length);
|
||||
|
||||
// If we can tell that the buffer was allocated, drop it. Otherwise, check if we have space in the pool
|
||||
if (bucket < _buckets.Length)
|
||||
{
|
||||
// Clear the array if the user requests
|
||||
if (clearArray)
|
||||
{
|
||||
Array.Clear(array, 0, array.Length);
|
||||
}
|
||||
|
||||
// Return the buffer to its bucket. In the future, we might consider having Return return false
|
||||
// instead of dropping a bucket, in which case we could try to return to a lower-sized bucket,
|
||||
// just as how in Rent we allow renting from a higher-sized bucket.
|
||||
_buckets[bucket].Return(array);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,111 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
#if NETCORE
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
internal sealed partial class DefaultArrayPool<T> : ArrayPool<T>
|
||||
{
|
||||
/// <summary>Provides a thread-safe bucket containing buffers that can be Rent'd and Return'd.</summary>
|
||||
private sealed class Bucket
|
||||
{
|
||||
internal readonly int _bufferLength;
|
||||
private readonly T[][] _buffers;
|
||||
private readonly int _poolId;
|
||||
|
||||
private SpinLock _lock; // do not make this readonly; it's a mutable struct
|
||||
private int _index;
|
||||
|
||||
/// <summary>
|
||||
/// Creates the pool with numberOfBuffers arrays where each buffer is of bufferLength length.
|
||||
/// </summary>
|
||||
internal Bucket(int bufferLength, int numberOfBuffers, int poolId)
|
||||
{
|
||||
_lock = new SpinLock(Debugger.IsAttached); // only enable thread tracking if debugger is attached; it adds non-trivial overheads to Enter/Exit
|
||||
_buffers = new T[numberOfBuffers][];
|
||||
_bufferLength = bufferLength;
|
||||
_poolId = poolId;
|
||||
}
|
||||
|
||||
/// <summary>Gets an ID for the bucket to use with events.</summary>
|
||||
internal int Id => GetHashCode();
|
||||
|
||||
/// <summary>Takes an array from the bucket. If the bucket is empty, returns null.</summary>
|
||||
internal T[] Rent()
|
||||
{
|
||||
T[][] buffers = _buffers;
|
||||
T[] buffer = null;
|
||||
|
||||
// While holding the lock, grab whatever is at the next available index and
|
||||
// update the index. We do as little work as possible while holding the spin
|
||||
// lock to minimize contention with other threads. The try/finally is
|
||||
// necessary to properly handle thread aborts on platforms which have them.
|
||||
bool lockTaken = false, allocateBuffer = false;
|
||||
try
|
||||
{
|
||||
_lock.Enter(ref lockTaken);
|
||||
|
||||
if (_index < buffers.Length)
|
||||
{
|
||||
buffer = buffers[_index];
|
||||
buffers[_index++] = null;
|
||||
allocateBuffer = buffer == null;
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (lockTaken) _lock.Exit(false);
|
||||
}
|
||||
|
||||
// While we were holding the lock, we grabbed whatever was at the next available index, if
|
||||
// there was one. If we tried and if we got back null, that means we hadn't yet allocated
|
||||
// for that slot, in which case we should do so now.
|
||||
if (allocateBuffer)
|
||||
{
|
||||
buffer = new T[_bufferLength];
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Attempts to return the buffer to the bucket. If successful, the buffer will be stored
|
||||
/// in the bucket and true will be returned; otherwise, the buffer won't be stored, and false
|
||||
/// will be returned.
|
||||
/// </summary>
|
||||
internal void Return(T[] array)
|
||||
{
|
||||
// Check to see if the buffer is the correct size for this bucket
|
||||
if (array.Length != _bufferLength)
|
||||
{
|
||||
throw new ArgumentException("Buffer not from pool", nameof(array));
|
||||
}
|
||||
|
||||
// While holding the spin lock, if there's room available in the bucket,
|
||||
// put the buffer into the next available slot. Otherwise, we just drop it.
|
||||
// The try/finally is necessary to properly handle thread aborts on platforms
|
||||
// which have them.
|
||||
bool lockTaken = false;
|
||||
try
|
||||
{
|
||||
_lock.Enter(ref lockTaken);
|
||||
|
||||
if (_index != 0)
|
||||
{
|
||||
_buffers[--_index] = array;
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (lockTaken) _lock.Exit(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,38 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
#if NETCORE
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
internal static class Utilities
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
internal static int SelectBucketIndex(int bufferSize)
|
||||
{
|
||||
Debug.Assert(bufferSize > 0);
|
||||
|
||||
uint bitsRemaining = ((uint)bufferSize - 1) >> 4;
|
||||
|
||||
int poolIndex = 0;
|
||||
if (bitsRemaining > 0xFFFF) { bitsRemaining >>= 16; poolIndex = 16; }
|
||||
if (bitsRemaining > 0xFF) { bitsRemaining >>= 8; poolIndex += 8; }
|
||||
if (bitsRemaining > 0xF) { bitsRemaining >>= 4; poolIndex += 4; }
|
||||
if (bitsRemaining > 0x3) { bitsRemaining >>= 2; poolIndex += 2; }
|
||||
if (bitsRemaining > 0x1) { bitsRemaining >>= 1; poolIndex += 1; }
|
||||
|
||||
return poolIndex + (int)bitsRemaining;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
internal static int GetMaxSizeForBucket(int binIndex)
|
||||
{
|
||||
int maxSize = 16 << binIndex;
|
||||
Debug.Assert(maxSize >= 0);
|
||||
return maxSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,60 +1,23 @@
|
||||
using System;
|
||||
using System.Text;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common
|
||||
{
|
||||
public class ArchiveEncoding
|
||||
public static class ArchiveEncoding
|
||||
{
|
||||
/// <summary>
|
||||
/// Default encoding to use when archive format doesn't specify one.
|
||||
/// </summary>
|
||||
public Encoding Default { get; set; }
|
||||
public static Encoding Default { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// ArchiveEncoding used by encryption schemes which don't comply with RFC 2898.
|
||||
/// Encoding used by encryption schemes which don't comply with RFC 2898.
|
||||
/// </summary>
|
||||
public Encoding Password { get; set; }
|
||||
public static Encoding Password { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Set this encoding when you want to force it for all encoding operations.
|
||||
/// </summary>
|
||||
public Encoding Forced { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Set this when you want to use a custom method for all decoding operations.
|
||||
/// </summary>
|
||||
/// <returns>string Func(bytes, index, length)</returns>
|
||||
public Func<byte[], int, int, string> CustomDecoder { get; set; }
|
||||
|
||||
public ArchiveEncoding()
|
||||
static ArchiveEncoding()
|
||||
{
|
||||
Default = Encoding.UTF8;
|
||||
Password = Encoding.UTF8;
|
||||
}
|
||||
|
||||
public string Decode(byte[] bytes)
|
||||
{
|
||||
return Decode(bytes, 0, bytes.Length);
|
||||
}
|
||||
|
||||
public string Decode(byte[] bytes, int start, int length)
|
||||
{
|
||||
return GetDecoder().Invoke(bytes, start, length);
|
||||
}
|
||||
|
||||
public byte[] Encode(string str)
|
||||
{
|
||||
return GetEncoding().GetBytes(str);
|
||||
}
|
||||
|
||||
public Encoding GetEncoding()
|
||||
{
|
||||
return Forced ?? Default ?? Encoding.UTF8;
|
||||
}
|
||||
|
||||
public Func<byte[], int, int, string> GetDecoder()
|
||||
{
|
||||
return CustomDecoder ?? ((bytes, index, count) => (Default ?? Encoding.UTF8).GetString(bytes, index, count));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,6 @@ namespace SharpCompress.Common
|
||||
Item = entry;
|
||||
}
|
||||
|
||||
public T Item { get; }
|
||||
public T Item { get; private set; }
|
||||
}
|
||||
}
|
||||
@@ -12,8 +12,6 @@
|
||||
BCJ,
|
||||
BCJ2,
|
||||
LZip,
|
||||
Xz,
|
||||
Unknown,
|
||||
Deflate64
|
||||
Unknown
|
||||
}
|
||||
}
|
||||
@@ -65,12 +65,6 @@ namespace SharpCompress.Common
|
||||
/// </summary>
|
||||
public abstract bool IsSplit { get; }
|
||||
|
||||
/// <inheritdoc/>
|
||||
public override string ToString()
|
||||
{
|
||||
return this.Key;
|
||||
}
|
||||
|
||||
internal abstract IEnumerable<FilePart> Parts { get; }
|
||||
internal bool IsSolid { get; set; }
|
||||
|
||||
|
||||
@@ -4,17 +4,9 @@ namespace SharpCompress.Common
|
||||
{
|
||||
public abstract class FilePart
|
||||
{
|
||||
protected FilePart(ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
|
||||
internal abstract string FilePartName { get; }
|
||||
|
||||
internal abstract Stream GetCompressedStream();
|
||||
internal abstract Stream GetRawStream();
|
||||
internal bool Skipped { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.GZip
|
||||
{
|
||||
@@ -40,9 +39,9 @@ namespace SharpCompress.Common.GZip
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => filePart.AsEnumerable<FilePart>();
|
||||
|
||||
internal static IEnumerable<GZipEntry> GetEntries(Stream stream, OptionsBase options)
|
||||
internal static IEnumerable<GZipEntry> GetEntries(Stream stream)
|
||||
{
|
||||
yield return new GZipEntry(new GZipFilePart(stream, options.ArchiveEncoding));
|
||||
yield return new GZipEntry(new GZipFilePart(stream));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,37 +5,35 @@ using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.GZip
|
||||
{
|
||||
internal class GZipFilePart : FilePart
|
||||
{
|
||||
private string _name;
|
||||
private readonly Stream _stream;
|
||||
private string name;
|
||||
private readonly Stream stream;
|
||||
|
||||
internal GZipFilePart(Stream stream, ArchiveEncoding archiveEncoding)
|
||||
: base(archiveEncoding)
|
||||
internal GZipFilePart(Stream stream)
|
||||
{
|
||||
ReadAndValidateGzipHeader(stream);
|
||||
EntryStartPosition = stream.Position;
|
||||
this._stream = stream;
|
||||
this.stream = stream;
|
||||
}
|
||||
|
||||
internal long EntryStartPosition { get; }
|
||||
|
||||
internal DateTime? DateModified { get; private set; }
|
||||
|
||||
internal override string FilePartName => _name;
|
||||
internal override string FilePartName => name;
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
return new DeflateStream(_stream, CompressionMode.Decompress, CompressionLevel.Default, false);
|
||||
return new DeflateStream(stream, CompressionMode.Decompress, CompressionLevel.Default, false);
|
||||
}
|
||||
|
||||
internal override Stream GetRawStream()
|
||||
{
|
||||
return _stream;
|
||||
return stream;
|
||||
}
|
||||
|
||||
private void ReadAndValidateGzipHeader(Stream stream)
|
||||
@@ -69,16 +67,15 @@ namespace SharpCompress.Common.GZip
|
||||
|
||||
Int16 extraLength = (Int16)(header[0] + header[1] * 256);
|
||||
byte[] extra = new byte[extraLength];
|
||||
|
||||
if (!stream.ReadFully(extra))
|
||||
n = stream.Read(extra, 0, extra.Length);
|
||||
if (n != extraLength)
|
||||
{
|
||||
throw new ZlibException("Unexpected end-of-file reading GZIP header.");
|
||||
}
|
||||
n = extraLength;
|
||||
}
|
||||
if ((header[3] & 0x08) == 0x08)
|
||||
{
|
||||
_name = ReadZeroTerminatedString(stream);
|
||||
name = ReadZeroTerminatedString(stream);
|
||||
}
|
||||
if ((header[3] & 0x10) == 0x010)
|
||||
{
|
||||
@@ -90,7 +87,7 @@ namespace SharpCompress.Common.GZip
|
||||
}
|
||||
}
|
||||
|
||||
private string ReadZeroTerminatedString(Stream stream)
|
||||
private static string ReadZeroTerminatedString(Stream stream)
|
||||
{
|
||||
byte[] buf1 = new byte[1];
|
||||
var list = new List<byte>();
|
||||
@@ -113,8 +110,8 @@ namespace SharpCompress.Common.GZip
|
||||
}
|
||||
}
|
||||
while (!done);
|
||||
byte[] buffer = list.ToArray();
|
||||
return ArchiveEncoding.Decode(buffer);
|
||||
byte[] a = list.ToArray();
|
||||
return ArchiveEncoding.Default.GetString(a, 0, a.Length);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
|
||||
namespace SharpCompress.Common
|
||||
namespace SharpCompress.Common
|
||||
{
|
||||
public class OptionsBase
|
||||
{
|
||||
@@ -7,7 +6,5 @@ namespace SharpCompress.Common
|
||||
/// SharpCompress will keep the supplied streams open. Default is true.
|
||||
/// </summary>
|
||||
public bool LeaveStreamOpen { get; set; } = true;
|
||||
|
||||
public ArchiveEncoding ArchiveEncoding { get; set; } = new ArchiveEncoding();
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
using SharpCompress.IO;
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
@@ -52,50 +52,50 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
switch (HeaderType)
|
||||
{
|
||||
case HeaderType.FileHeader:
|
||||
{
|
||||
if (FileFlags.HasFlag(FileFlags.UNICODE))
|
||||
{
|
||||
if (FileFlags.HasFlag(FileFlags.UNICODE))
|
||||
int length = 0;
|
||||
while (length < fileNameBytes.Length
|
||||
&& fileNameBytes[length] != 0)
|
||||
{
|
||||
int length = 0;
|
||||
while (length < fileNameBytes.Length
|
||||
&& fileNameBytes[length] != 0)
|
||||
{
|
||||
length++;
|
||||
}
|
||||
if (length != nameSize)
|
||||
{
|
||||
length++;
|
||||
FileName = FileNameDecoder.Decode(fileNameBytes, length);
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = ArchiveEncoding.Decode(fileNameBytes);
|
||||
}
|
||||
length++;
|
||||
}
|
||||
if (length != nameSize)
|
||||
{
|
||||
length++;
|
||||
FileName = FileNameDecoder.Decode(fileNameBytes, length);
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = ArchiveEncoding.Decode(fileNameBytes);
|
||||
FileName = DecodeDefault(fileNameBytes);
|
||||
}
|
||||
FileName = ConvertPath(FileName, HostOS);
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = DecodeDefault(fileNameBytes);
|
||||
}
|
||||
FileName = ConvertPath(FileName, HostOS);
|
||||
}
|
||||
break;
|
||||
case HeaderType.NewSubHeader:
|
||||
{
|
||||
int datasize = HeaderSize - NEWLHD_SIZE - nameSize;
|
||||
if (FileFlags.HasFlag(FileFlags.SALT))
|
||||
{
|
||||
int datasize = HeaderSize - NEWLHD_SIZE - nameSize;
|
||||
if (FileFlags.HasFlag(FileFlags.SALT))
|
||||
{
|
||||
datasize -= SALT_SIZE;
|
||||
}
|
||||
if (datasize > 0)
|
||||
{
|
||||
SubData = reader.ReadBytes(datasize);
|
||||
}
|
||||
|
||||
if (NewSubHeaderType.SUBHEAD_TYPE_RR.Equals(fileNameBytes))
|
||||
{
|
||||
RecoverySectors = SubData[8] + (SubData[9] << 8)
|
||||
+ (SubData[10] << 16) + (SubData[11] << 24);
|
||||
}
|
||||
datasize -= SALT_SIZE;
|
||||
}
|
||||
if (datasize > 0)
|
||||
{
|
||||
SubData = reader.ReadBytes(datasize);
|
||||
}
|
||||
|
||||
if (NewSubHeaderType.SUBHEAD_TYPE_RR.Equals(fileNameBytes))
|
||||
{
|
||||
RecoverySectors = SubData[8] + (SubData[9] << 8)
|
||||
+ (SubData[10] << 16) + (SubData[11] << 24);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -118,6 +118,12 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
//only the full .net framework will do other code pages than unicode/utf8
|
||||
private string DecodeDefault(byte[] bytes)
|
||||
{
|
||||
return ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length);
|
||||
}
|
||||
|
||||
private long UInt32To64(uint x, uint y)
|
||||
{
|
||||
long l = x;
|
||||
@@ -159,20 +165,31 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
#if NO_FILE
|
||||
return path.Replace('\\', '/');
|
||||
#else
|
||||
if (Path.DirectorySeparatorChar == '/')
|
||||
switch (os)
|
||||
{
|
||||
return path.Replace('\\', '/');
|
||||
}
|
||||
else if (Path.DirectorySeparatorChar == '\\')
|
||||
{
|
||||
return path.Replace('/', '\\');
|
||||
case HostOS.MacOS:
|
||||
case HostOS.Unix:
|
||||
{
|
||||
if (Path.DirectorySeparatorChar == '\\')
|
||||
{
|
||||
return path.Replace('/', '\\');
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
if (Path.DirectorySeparatorChar == '/')
|
||||
{
|
||||
return path.Replace('\\', '/');
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
return path;
|
||||
#endif
|
||||
}
|
||||
|
||||
internal long DataStartPosition { get; set; }
|
||||
|
||||
internal HostOS HostOS { get; private set; }
|
||||
|
||||
internal uint FileCRC { get; private set; }
|
||||
@@ -194,7 +211,6 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
internal FileFlags FileFlags => (FileFlags)Flags;
|
||||
|
||||
internal long CompressedSize { get; private set; }
|
||||
|
||||
internal long UncompressedSize { get; private set; }
|
||||
|
||||
internal string FileName { get; private set; }
|
||||
|
||||
@@ -18,9 +18,9 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
Flags == 0x1A21 &&
|
||||
HeaderSize == 0x07;
|
||||
|
||||
// Rar5 signature: 52 61 72 21 1A 07 01 00 (not supported yet)
|
||||
// Rar5 signature: 52 61 72 21 1A 07 10 00 (not supported yet)
|
||||
}
|
||||
|
||||
internal bool OldFormat { get; private set; }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
@@ -18,16 +17,14 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
HeaderSize = baseHeader.HeaderSize;
|
||||
AdditionalSize = baseHeader.AdditionalSize;
|
||||
ReadBytes = baseHeader.ReadBytes;
|
||||
ArchiveEncoding = baseHeader.ArchiveEncoding;
|
||||
}
|
||||
|
||||
internal static RarHeader Create(RarCrcBinaryReader reader, ArchiveEncoding archiveEncoding)
|
||||
internal static RarHeader Create(RarCrcBinaryReader reader)
|
||||
{
|
||||
try
|
||||
{
|
||||
RarHeader header = new RarHeader();
|
||||
|
||||
header.ArchiveEncoding = archiveEncoding;
|
||||
reader.Mark();
|
||||
header.ReadStartFromReader(reader);
|
||||
header.ReadBytes += reader.CurrentReadByteCount;
|
||||
@@ -53,8 +50,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
protected virtual void ReadFromReader(MarkingBinaryReader reader)
|
||||
{
|
||||
protected virtual void ReadFromReader(MarkingBinaryReader reader) {
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
@@ -80,11 +76,10 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
return header;
|
||||
}
|
||||
|
||||
private void VerifyHeaderCrc(ushort crc)
|
||||
{
|
||||
if (HeaderType != HeaderType.MarkHeader)
|
||||
private void VerifyHeaderCrc(ushort crc) {
|
||||
if (HeaderType != HeaderType.MarkHeader)
|
||||
{
|
||||
if (crc != HeadCRC)
|
||||
if (crc != HeadCRC)
|
||||
{
|
||||
throw new InvalidFormatException("rar header crc mismatch");
|
||||
}
|
||||
@@ -111,8 +106,6 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
protected short HeaderSize { get; private set; }
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; private set; }
|
||||
|
||||
/// <summary>
|
||||
/// This additional size of the header could be file data
|
||||
/// </summary>
|
||||
|
||||
@@ -117,7 +117,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
#if !NO_CRYPTO
|
||||
var reader = new RarCryptoBinaryReader(stream, Options.Password);
|
||||
|
||||
|
||||
if (IsEncrypted)
|
||||
{
|
||||
if (Options.Password == null)
|
||||
@@ -133,7 +133,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
#endif
|
||||
|
||||
RarHeader header = RarHeader.Create(reader, Options.ArchiveEncoding);
|
||||
RarHeader header = RarHeader.Create(reader);
|
||||
if (header == null)
|
||||
{
|
||||
return null;
|
||||
@@ -141,110 +141,110 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
switch (header.HeaderType)
|
||||
{
|
||||
case HeaderType.ArchiveHeader:
|
||||
{
|
||||
var ah = header.PromoteHeader<ArchiveHeader>(reader);
|
||||
IsEncrypted = ah.HasPassword;
|
||||
return ah;
|
||||
}
|
||||
{
|
||||
var ah = header.PromoteHeader<ArchiveHeader>(reader);
|
||||
IsEncrypted = ah.HasPassword;
|
||||
return ah;
|
||||
}
|
||||
case HeaderType.MarkHeader:
|
||||
{
|
||||
return header.PromoteHeader<MarkHeader>(reader);
|
||||
}
|
||||
{
|
||||
return header.PromoteHeader<MarkHeader>(reader);
|
||||
}
|
||||
|
||||
case HeaderType.ProtectHeader:
|
||||
{
|
||||
ProtectHeader ph = header.PromoteHeader<ProtectHeader>(reader);
|
||||
|
||||
// skip the recovery record data, we do not use it.
|
||||
switch (StreamingMode)
|
||||
{
|
||||
ProtectHeader ph = header.PromoteHeader<ProtectHeader>(reader);
|
||||
|
||||
// skip the recovery record data, we do not use it.
|
||||
switch (StreamingMode)
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
reader.BaseStream.Position += ph.DataSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
reader.BaseStream.Skip(ph.DataSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
reader.BaseStream.Position += ph.DataSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
reader.BaseStream.Skip(ph.DataSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
|
||||
return ph;
|
||||
}
|
||||
|
||||
return ph;
|
||||
}
|
||||
|
||||
case HeaderType.NewSubHeader:
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
//skip the data because it's useless?
|
||||
reader.BaseStream.Skip(fh.CompressedSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
case HeaderType.FileHeader:
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
var ms = new ReadOnlySubStream(reader.BaseStream, fh.CompressedSize);
|
||||
if (fh.Salt == null)
|
||||
{
|
||||
fh.PackedStream = ms;
|
||||
}
|
||||
else
|
||||
{
|
||||
//skip the data because it's useless?
|
||||
reader.BaseStream.Skip(fh.CompressedSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
case HeaderType.FileHeader:
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
var ms = new ReadOnlySubStream(reader.BaseStream, fh.CompressedSize);
|
||||
if (fh.Salt == null)
|
||||
{
|
||||
fh.PackedStream = ms;
|
||||
}
|
||||
else
|
||||
{
|
||||
#if !NO_CRYPTO
|
||||
fh.PackedStream = new RarCryptoWrapper(ms, Options.Password, fh.Salt);
|
||||
fh.PackedStream = new RarCryptoWrapper(ms, Options.Password, fh.Salt);
|
||||
#else
|
||||
throw new NotSupportedException("RarCrypto not supported");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
case HeaderType.EndArchiveHeader:
|
||||
{
|
||||
return header.PromoteHeader<EndArchiveHeader>(reader);
|
||||
}
|
||||
{
|
||||
return header.PromoteHeader<EndArchiveHeader>(reader);
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid Rar Header: " + header.HeaderType);
|
||||
}
|
||||
{
|
||||
throw new InvalidFormatException("Invalid Rar Header: " + header.HeaderType);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,15 +9,14 @@ namespace SharpCompress.Common.Rar
|
||||
internal abstract class RarFilePart : FilePart
|
||||
{
|
||||
internal RarFilePart(MarkHeader mh, FileHeader fh)
|
||||
: base(fh.ArchiveEncoding)
|
||||
{
|
||||
MarkHeader = mh;
|
||||
FileHeader = fh;
|
||||
}
|
||||
|
||||
internal MarkHeader MarkHeader { get; }
|
||||
internal MarkHeader MarkHeader { get; private set; }
|
||||
|
||||
internal FileHeader FileHeader { get; }
|
||||
internal FileHeader FileHeader { get; private set; }
|
||||
|
||||
internal override Stream GetRawStream()
|
||||
{
|
||||
|
||||
@@ -11,7 +11,7 @@ namespace SharpCompress.Common
|
||||
ReaderProgress = readerProgress;
|
||||
}
|
||||
|
||||
public T Item { get; }
|
||||
public ReaderProgress ReaderProgress { get; }
|
||||
public T Item { get; private set; }
|
||||
public ReaderProgress ReaderProgress { get; private set; }
|
||||
}
|
||||
}
|
||||
@@ -22,13 +22,6 @@ namespace SharpCompress.Common.SevenZip
|
||||
internal List<long> PackStreamStartPositions = new List<long>();
|
||||
internal List<int> FolderStartFileIndex = new List<int>();
|
||||
internal List<int> FileIndexToFolderIndexMap = new List<int>();
|
||||
|
||||
internal IPasswordProvider PasswordProvider { get; }
|
||||
|
||||
public ArchiveDatabase(IPasswordProvider passwordProvider)
|
||||
{
|
||||
PasswordProvider = passwordProvider;
|
||||
}
|
||||
|
||||
internal void Clear()
|
||||
{
|
||||
|
||||
@@ -182,7 +182,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
private DateTime? TranslateTime(long? time)
|
||||
{
|
||||
if (time.HasValue && time.Value >= 0 && time.Value <= 2650467743999999999) //maximum Windows file time 31.12.9999
|
||||
if (time.HasValue)
|
||||
{
|
||||
return TranslateTime(time.Value);
|
||||
}
|
||||
@@ -1211,7 +1211,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
public ArchiveDatabase ReadDatabase(IPasswordProvider pass)
|
||||
{
|
||||
var db = new ArchiveDatabase(pass);
|
||||
var db = new ArchiveDatabase();
|
||||
db.Clear();
|
||||
|
||||
db.MajorVersion = _header[6];
|
||||
@@ -1279,7 +1279,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
throw new InvalidOperationException();
|
||||
}
|
||||
|
||||
var dataVector = ReadAndDecodePackedStreams(db.StartPositionAfterHeader, db.PasswordProvider);
|
||||
var dataVector = ReadAndDecodePackedStreams(db.StartPositionAfterHeader, pass);
|
||||
|
||||
// compressed header without content is odd but ok
|
||||
if (dataVector.Count == 0)
|
||||
@@ -1301,7 +1301,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
}
|
||||
}
|
||||
|
||||
ReadHeader(db, db.PasswordProvider);
|
||||
ReadHeader(db, pass);
|
||||
}
|
||||
db.Fill();
|
||||
return db;
|
||||
@@ -1441,7 +1441,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
#endregion
|
||||
}
|
||||
|
||||
private Stream GetCachedDecoderStream(ArchiveDatabase _db, int folderIndex)
|
||||
private Stream GetCachedDecoderStream(ArchiveDatabase _db, int folderIndex, IPasswordProvider pw)
|
||||
{
|
||||
Stream s;
|
||||
if (!_cachedStreams.TryGetValue(folderIndex, out s))
|
||||
@@ -1456,13 +1456,13 @@ namespace SharpCompress.Common.SevenZip
|
||||
}
|
||||
|
||||
s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes.ToArray(), folderInfo,
|
||||
_db.PasswordProvider);
|
||||
pw);
|
||||
_cachedStreams.Add(folderIndex, s);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
public Stream OpenStream(ArchiveDatabase _db, int fileIndex)
|
||||
public Stream OpenStream(ArchiveDatabase _db, int fileIndex, IPasswordProvider pw)
|
||||
{
|
||||
int folderIndex = _db.FileIndexToFolderIndexMap[fileIndex];
|
||||
int numFilesInFolder = _db.NumUnpackStreamsVector[folderIndex];
|
||||
@@ -1479,12 +1479,12 @@ namespace SharpCompress.Common.SevenZip
|
||||
skipSize += _db.Files[firstFileIndex + i].Size;
|
||||
}
|
||||
|
||||
Stream s = GetCachedDecoderStream(_db, folderIndex);
|
||||
Stream s = GetCachedDecoderStream(_db, folderIndex, pw);
|
||||
s.Position = skipSize;
|
||||
return new ReadOnlySubStream(s, _db.Files[fileIndex].Size);
|
||||
}
|
||||
|
||||
public void Extract(ArchiveDatabase _db, int[] indices)
|
||||
public void Extract(ArchiveDatabase _db, int[] indices, IPasswordProvider pw)
|
||||
{
|
||||
int numItems;
|
||||
bool allFilesMode = (indices == null);
|
||||
@@ -1562,7 +1562,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
// TODO: If the decoding fails the last file may be extracted incompletely. Delete it?
|
||||
|
||||
Stream s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes.ToArray(),
|
||||
folderInfo, _db.PasswordProvider);
|
||||
folderInfo, pw);
|
||||
byte[] buffer = new byte[4 << 10];
|
||||
for (;;)
|
||||
{
|
||||
@@ -1588,4 +1588,4 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,15 +7,14 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
internal class SevenZipFilePart : FilePart
|
||||
{
|
||||
private CompressionType? _type;
|
||||
private readonly Stream _stream;
|
||||
private readonly ArchiveDatabase _database;
|
||||
private CompressionType? type;
|
||||
private readonly Stream stream;
|
||||
private readonly ArchiveDatabase database;
|
||||
|
||||
internal SevenZipFilePart(Stream stream, ArchiveDatabase database, int index, CFileItem fileEntry, ArchiveEncoding archiveEncoding)
|
||||
: base(archiveEncoding)
|
||||
internal SevenZipFilePart(Stream stream, ArchiveDatabase database, int index, CFileItem fileEntry)
|
||||
{
|
||||
this._stream = stream;
|
||||
this._database = database;
|
||||
this.stream = stream;
|
||||
this.database = database;
|
||||
Index = index;
|
||||
Header = fileEntry;
|
||||
if (Header.HasStream)
|
||||
@@ -42,14 +41,14 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
return null;
|
||||
}
|
||||
var folderStream = _database.GetFolderStream(_stream, Folder, _database.PasswordProvider);
|
||||
var folderStream = database.GetFolderStream(stream, Folder, null);
|
||||
|
||||
int firstFileIndex = _database.FolderStartFileIndex[_database.Folders.IndexOf(Folder)];
|
||||
int firstFileIndex = database.FolderStartFileIndex[database.Folders.IndexOf(Folder)];
|
||||
int skipCount = Index - firstFileIndex;
|
||||
long skipSize = 0;
|
||||
for (int i = 0; i < skipCount; i++)
|
||||
{
|
||||
skipSize += _database.Files[firstFileIndex + i].Size;
|
||||
skipSize += database.Files[firstFileIndex + i].Size;
|
||||
}
|
||||
if (skipSize > 0)
|
||||
{
|
||||
@@ -62,11 +61,11 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_type == null)
|
||||
if (type == null)
|
||||
{
|
||||
_type = GetCompression();
|
||||
type = GetCompression();
|
||||
}
|
||||
return _type.Value;
|
||||
return type.Value;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +84,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
var coder = Folder.Coders.First();
|
||||
switch (coder.MethodId.Id)
|
||||
{
|
||||
{
|
||||
case k_LZMA:
|
||||
case k_LZMA2:
|
||||
{
|
||||
|
||||
@@ -9,11 +9,6 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
internal static readonly DateTime Epoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
public TarHeader(ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal string Name { get; set; }
|
||||
|
||||
//internal int Mode { get; set; }
|
||||
@@ -25,7 +20,6 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
internal DateTime LastModifiedTime { get; set; }
|
||||
internal EntryType EntryType { get; set; }
|
||||
internal Stream PackedStream { get; set; }
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
|
||||
internal const int BlockSize = 512;
|
||||
|
||||
@@ -37,7 +31,7 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
WriteOctalBytes(0, buffer, 108, 8); // owner ID
|
||||
WriteOctalBytes(0, buffer, 116, 8); // group ID
|
||||
|
||||
//ArchiveEncoding.UTF8.GetBytes("magic").CopyTo(buffer, 257);
|
||||
//Encoding.UTF8.GetBytes("magic").CopyTo(buffer, 257);
|
||||
if (Name.Length > 100)
|
||||
{
|
||||
// Set mock filename and filetype to indicate the next block is the actual name of the file
|
||||
@@ -78,7 +72,7 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
|
||||
private void WriteLongFilenameHeader(Stream output)
|
||||
{
|
||||
byte[] nameBytes = ArchiveEncoding.Encode(Name);
|
||||
byte[] nameBytes = ArchiveEncoding.Default.GetBytes(Name);
|
||||
output.Write(nameBytes, 0, nameBytes.Length);
|
||||
|
||||
// pad to multiple of BlockSize bytes, and make sure a terminating null is added
|
||||
@@ -105,7 +99,7 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
}
|
||||
else
|
||||
{
|
||||
Name = ArchiveEncoding.Decode(buffer, 0, 100).TrimNulls();
|
||||
Name = ArchiveEncoding.Default.GetString(buffer, 0, 100).TrimNulls();
|
||||
}
|
||||
|
||||
EntryType = ReadEntryType(buffer);
|
||||
@@ -117,12 +111,12 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
long unixTimeStamp = ReadASCIIInt64Base8(buffer, 136, 11);
|
||||
LastModifiedTime = Epoch.AddSeconds(unixTimeStamp).ToLocalTime();
|
||||
|
||||
Magic = ArchiveEncoding.Decode(buffer, 257, 6).TrimNulls();
|
||||
Magic = ArchiveEncoding.Default.GetString(buffer, 257, 6).TrimNulls();
|
||||
|
||||
if (!string.IsNullOrEmpty(Magic)
|
||||
&& "ustar".Equals(Magic))
|
||||
{
|
||||
string namePrefix = ArchiveEncoding.Decode(buffer, 345, 157);
|
||||
string namePrefix = ArchiveEncoding.Default.GetString(buffer, 345, 157);
|
||||
namePrefix = namePrefix.TrimNulls();
|
||||
if (!string.IsNullOrEmpty(namePrefix))
|
||||
{
|
||||
@@ -149,7 +143,7 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
reader.ReadBytes(remainingBytesToRead);
|
||||
}
|
||||
return ArchiveEncoding.Decode(nameBytes, 0, nameBytes.Length).TrimNulls();
|
||||
return ArchiveEncoding.Default.GetString(nameBytes, 0, nameBytes.Length).TrimNulls();
|
||||
}
|
||||
|
||||
private static EntryType ReadEntryType(byte[] buffer)
|
||||
|
||||
@@ -3,7 +3,6 @@ using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
@@ -44,9 +43,9 @@ namespace SharpCompress.Common.Tar
|
||||
internal override IEnumerable<FilePart> Parts => filePart.AsEnumerable<FilePart>();
|
||||
|
||||
internal static IEnumerable<TarEntry> GetEntries(StreamingMode mode, Stream stream,
|
||||
CompressionType compressionType, ArchiveEncoding archiveEncoding)
|
||||
CompressionType compressionType)
|
||||
{
|
||||
foreach (TarHeader h in TarHeaderFactory.ReadHeader(mode, stream, archiveEncoding))
|
||||
foreach (TarHeader h in TarHeaderFactory.ReadHeader(mode, stream))
|
||||
{
|
||||
if (h != null)
|
||||
{
|
||||
|
||||
@@ -6,12 +6,11 @@ namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal class TarFilePart : FilePart
|
||||
{
|
||||
private readonly Stream _seekableStream;
|
||||
private readonly Stream seekableStream;
|
||||
|
||||
internal TarFilePart(TarHeader header, Stream seekableStream)
|
||||
: base(header.ArchiveEncoding)
|
||||
{
|
||||
this._seekableStream = seekableStream;
|
||||
this.seekableStream = seekableStream;
|
||||
Header = header;
|
||||
}
|
||||
|
||||
@@ -21,10 +20,10 @@ namespace SharpCompress.Common.Tar
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (_seekableStream != null)
|
||||
if (seekableStream != null)
|
||||
{
|
||||
_seekableStream.Position = Header.DataStartPosition.Value;
|
||||
return new ReadOnlySubStream(_seekableStream, Header.Size);
|
||||
seekableStream.Position = Header.DataStartPosition.Value;
|
||||
return new ReadOnlySubStream(seekableStream, Header.Size);
|
||||
}
|
||||
return Header.PackedStream;
|
||||
}
|
||||
|
||||
@@ -2,13 +2,12 @@
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal static class TarHeaderFactory
|
||||
{
|
||||
internal static IEnumerable<TarHeader> ReadHeader(StreamingMode mode, Stream stream, ArchiveEncoding archiveEncoding)
|
||||
internal static IEnumerable<TarHeader> ReadHeader(StreamingMode mode, Stream stream)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
@@ -16,8 +15,7 @@ namespace SharpCompress.Common.Tar
|
||||
try
|
||||
{
|
||||
BinaryReader reader = new BinaryReader(stream);
|
||||
header = new TarHeader(archiveEncoding);
|
||||
|
||||
header = new TarHeader();
|
||||
if (!header.Read(reader))
|
||||
{
|
||||
yield break;
|
||||
@@ -25,22 +23,22 @@ namespace SharpCompress.Common.Tar
|
||||
switch (mode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
header.DataStartPosition = reader.BaseStream.Position;
|
||||
{
|
||||
header.DataStartPosition = reader.BaseStream.Position;
|
||||
|
||||
//skip to nearest 512
|
||||
reader.BaseStream.Position += PadTo512(header.Size);
|
||||
}
|
||||
//skip to nearest 512
|
||||
reader.BaseStream.Position += PadTo512(header.Size);
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
header.PackedStream = new TarReadOnlySubStream(stream, header.Size);
|
||||
}
|
||||
{
|
||||
header.PackedStream = new TarReadOnlySubStream(stream, header.Size);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
}
|
||||
catch
|
||||
|
||||
@@ -21,6 +21,18 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
Comment = reader.ReadBytes(CommentLength);
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
writer.Write(VolumeNumber);
|
||||
writer.Write(FirstVolumeWithDirectory);
|
||||
writer.Write(TotalNumberOfEntriesInDisk);
|
||||
writer.Write(TotalNumberOfEntries);
|
||||
writer.Write(DirectorySize);
|
||||
writer.Write(DirectoryStartOffsetRelativeToDisk);
|
||||
writer.Write(CommentLength);
|
||||
writer.Write(Comment);
|
||||
}
|
||||
|
||||
public ushort VolumeNumber { get; private set; }
|
||||
|
||||
public ushort FirstVolumeWithDirectory { get; private set; }
|
||||
|
||||
@@ -6,8 +6,8 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal class DirectoryEntryHeader : ZipFileEntry
|
||||
{
|
||||
public DirectoryEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
: base(ZipHeaderType.DirectoryEntry, archiveEncoding)
|
||||
public DirectoryEntryHeader()
|
||||
: base(ZipHeaderType.DirectoryEntry)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -31,10 +31,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
RelativeOffsetOfEntryHeader = reader.ReadUInt32();
|
||||
|
||||
byte[] name = reader.ReadBytes(nameLength);
|
||||
Name = ArchiveEncoding.Decode(name);
|
||||
Name = DecodeString(name);
|
||||
byte[] extra = reader.ReadBytes(extraLength);
|
||||
byte[] comment = reader.ReadBytes(commentLength);
|
||||
Comment = ArchiveEncoding.Decode(comment);
|
||||
Comment = DecodeString(comment);
|
||||
LoadExtra(extra);
|
||||
|
||||
var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField);
|
||||
@@ -61,6 +61,56 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
}
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
var zip64 = CompressedSize >= uint.MaxValue || UncompressedSize >= uint.MaxValue || RelativeOffsetOfEntryHeader >= uint.MaxValue;
|
||||
if (zip64)
|
||||
Version = (ushort)(Version > 45 ? Version : 45);
|
||||
|
||||
writer.Write(Version);
|
||||
writer.Write(VersionNeededToExtract);
|
||||
writer.Write((ushort)Flags);
|
||||
writer.Write((ushort)CompressionMethod);
|
||||
writer.Write(LastModifiedTime);
|
||||
writer.Write(LastModifiedDate);
|
||||
writer.Write(Crc);
|
||||
writer.Write(zip64 ? uint.MaxValue : CompressedSize);
|
||||
writer.Write(zip64 ? uint.MaxValue : UncompressedSize);
|
||||
|
||||
byte[] nameBytes = EncodeString(Name);
|
||||
writer.Write((ushort)nameBytes.Length);
|
||||
|
||||
if (zip64)
|
||||
{
|
||||
writer.Write((ushort)(2 + 2 + 8 + 8 + 8 + 4));
|
||||
}
|
||||
else
|
||||
{
|
||||
//writer.Write((ushort)Extra.Length);
|
||||
writer.Write((ushort)0);
|
||||
}
|
||||
writer.Write((ushort)Comment.Length);
|
||||
|
||||
writer.Write(DiskNumberStart);
|
||||
writer.Write(InternalFileAttributes);
|
||||
writer.Write(ExternalFileAttributes);
|
||||
writer.Write(zip64 ? uint.MaxValue : RelativeOffsetOfEntryHeader);
|
||||
|
||||
writer.Write(nameBytes);
|
||||
|
||||
if (zip64)
|
||||
{
|
||||
writer.Write((ushort)0x0001);
|
||||
writer.Write((ushort)((8 + 8 + 8 + 4)));
|
||||
|
||||
writer.Write((ulong)UncompressedSize);
|
||||
writer.Write((ulong)CompressedSize);
|
||||
writer.Write((ulong)RelativeOffsetOfEntryHeader);
|
||||
writer.Write((uint)0); // VolumeNumber = 0
|
||||
}
|
||||
writer.Write(Comment);
|
||||
}
|
||||
|
||||
internal ushort Version { get; private set; }
|
||||
|
||||
public ushort VersionNeededToExtract { get; set; }
|
||||
|
||||
@@ -5,7 +5,6 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
[Flags]
|
||||
internal enum HeaderFlags : ushort
|
||||
{
|
||||
None = 0,
|
||||
Encrypted = 1, // http://www.pkware.com/documents/casestudies/APPNOTE.TXT
|
||||
Bit1 = 2,
|
||||
Bit2 = 4,
|
||||
|
||||
@@ -13,5 +13,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
internal override void Read(BinaryReader reader)
|
||||
{
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,12 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal class LocalEntryHeader : ZipFileEntry
|
||||
{
|
||||
public LocalEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
: base(ZipHeaderType.LocalEntry, archiveEncoding)
|
||||
public LocalEntryHeader()
|
||||
: base(ZipHeaderType.LocalEntry)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -25,7 +24,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
ushort extraLength = reader.ReadUInt16();
|
||||
byte[] name = reader.ReadBytes(nameLength);
|
||||
byte[] extra = reader.ReadBytes(extraLength);
|
||||
Name = ArchiveEncoding.Decode(name);
|
||||
Name = DecodeString(name);
|
||||
LoadExtra(extra);
|
||||
|
||||
var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField);
|
||||
@@ -48,6 +47,56 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
}
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
if (IsZip64)
|
||||
Version = (ushort)(Version > 45 ? Version : 45);
|
||||
|
||||
writer.Write(Version);
|
||||
|
||||
writer.Write((ushort)Flags);
|
||||
writer.Write((ushort)CompressionMethod);
|
||||
writer.Write(LastModifiedTime);
|
||||
writer.Write(LastModifiedDate);
|
||||
writer.Write(Crc);
|
||||
|
||||
if (IsZip64)
|
||||
{
|
||||
writer.Write(uint.MaxValue);
|
||||
writer.Write(uint.MaxValue);
|
||||
}
|
||||
else
|
||||
{
|
||||
writer.Write(CompressedSize);
|
||||
writer.Write(UncompressedSize);
|
||||
}
|
||||
|
||||
byte[] nameBytes = EncodeString(Name);
|
||||
|
||||
writer.Write((ushort)nameBytes.Length);
|
||||
if (IsZip64)
|
||||
{
|
||||
writer.Write((ushort)(2 + 2 + (2 * 8)));
|
||||
}
|
||||
else
|
||||
{
|
||||
writer.Write((ushort)0);
|
||||
}
|
||||
|
||||
//if (Extra != null)
|
||||
//{
|
||||
// writer.Write(Extra);
|
||||
//}
|
||||
writer.Write(nameBytes);
|
||||
if (IsZip64)
|
||||
{
|
||||
writer.Write((ushort)0x0001);
|
||||
writer.Write((ushort)(2 * 8));
|
||||
writer.Write((ulong)CompressedSize);
|
||||
writer.Write((ulong)UncompressedSize);
|
||||
}
|
||||
}
|
||||
|
||||
internal ushort Version { get; private set; }
|
||||
}
|
||||
}
|
||||
@@ -14,5 +14,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,11 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
|
||||
const int SizeOfFixedHeaderDataExceptSignatureAndSizeFields = 44;
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public long SizeOfDirectoryEndRecord { get; private set; }
|
||||
|
||||
public ushort VersionMadeBy { get; private set; }
|
||||
|
||||
@@ -16,6 +16,11 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
TotalNumberOfVolumes = reader.ReadUInt32();
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
throw new System.NotImplementedException();
|
||||
}
|
||||
|
||||
public uint FirstVolumeWithDirectory { get; private set; }
|
||||
|
||||
public long RelativeOffsetOfTheEndOfDirectoryRecord { get; private set; }
|
||||
|
||||
@@ -8,11 +8,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal abstract class ZipFileEntry : ZipHeader
|
||||
{
|
||||
protected ZipFileEntry(ZipHeaderType type, ArchiveEncoding archiveEncoding)
|
||||
protected ZipFileEntry(ZipHeaderType type)
|
||||
: base(type)
|
||||
{
|
||||
Extra = new List<ExtraData>();
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal bool IsDirectory
|
||||
@@ -30,10 +29,27 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
&& Name.EndsWith("\\");
|
||||
}
|
||||
}
|
||||
|
||||
internal Stream PackedStream { get; set; }
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
protected string DecodeString(byte[] str)
|
||||
{
|
||||
if (FlagUtility.HasFlag(Flags, HeaderFlags.UTF8))
|
||||
{
|
||||
return Encoding.UTF8.GetString(str, 0, str.Length);
|
||||
}
|
||||
|
||||
return ArchiveEncoding.Default.GetString(str, 0, str.Length);
|
||||
}
|
||||
|
||||
protected byte[] EncodeString(string str)
|
||||
{
|
||||
if (FlagUtility.HasFlag(Flags, HeaderFlags.UTF8))
|
||||
{
|
||||
return Encoding.UTF8.GetBytes(str);
|
||||
}
|
||||
return ArchiveEncoding.Default.GetBytes(str);
|
||||
}
|
||||
|
||||
internal Stream PackedStream { get; set; }
|
||||
|
||||
internal string Name { get; set; }
|
||||
|
||||
@@ -48,7 +64,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
internal long UncompressedSize { get; set; }
|
||||
|
||||
internal List<ExtraData> Extra { get; set; }
|
||||
|
||||
|
||||
public string Password { get; set; }
|
||||
|
||||
internal PkwareTraditionalEncryptionData ComposeEncryptionData(Stream archiveStream)
|
||||
@@ -59,10 +75,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
}
|
||||
|
||||
var buffer = new byte[12];
|
||||
archiveStream.ReadFully(buffer);
|
||||
archiveStream.Read(buffer, 0, 12);
|
||||
|
||||
PkwareTraditionalEncryptionData encryptionData = PkwareTraditionalEncryptionData.ForRead(Password, this, buffer);
|
||||
|
||||
|
||||
return encryptionData;
|
||||
}
|
||||
|
||||
|
||||
@@ -10,10 +10,12 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
HasData = true;
|
||||
}
|
||||
|
||||
internal ZipHeaderType ZipHeaderType { get; }
|
||||
internal ZipHeaderType ZipHeaderType { get; private set; }
|
||||
|
||||
internal abstract void Read(BinaryReader reader);
|
||||
|
||||
internal abstract void Write(BinaryWriter writer);
|
||||
|
||||
internal bool HasData { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -42,7 +42,7 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
throw new ArgumentNullException("buffer");
|
||||
}
|
||||
|
||||
byte[] temp = new byte[count];
|
||||
|
||||
@@ -9,11 +9,9 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
private static readonly CRC32 crc32 = new CRC32();
|
||||
private readonly UInt32[] _Keys = {0x12345678, 0x23456789, 0x34567890};
|
||||
private readonly ArchiveEncoding _archiveEncoding;
|
||||
|
||||
private PkwareTraditionalEncryptionData(string password, ArchiveEncoding archiveEncoding)
|
||||
private PkwareTraditionalEncryptionData(string password)
|
||||
{
|
||||
_archiveEncoding = archiveEncoding;
|
||||
Initialize(password);
|
||||
}
|
||||
|
||||
@@ -29,7 +27,7 @@ namespace SharpCompress.Common.Zip
|
||||
public static PkwareTraditionalEncryptionData ForRead(string password, ZipFileEntry header,
|
||||
byte[] encryptionHeader)
|
||||
{
|
||||
var encryptor = new PkwareTraditionalEncryptionData(password, header.ArchiveEncoding);
|
||||
var encryptor = new PkwareTraditionalEncryptionData(password);
|
||||
byte[] plainTextHeader = encryptor.Decrypt(encryptionHeader, encryptionHeader.Length);
|
||||
if (plainTextHeader[11] != (byte)((header.Crc >> 24) & 0xff))
|
||||
{
|
||||
@@ -49,7 +47,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
if (length > cipherText.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(length),
|
||||
throw new ArgumentOutOfRangeException("length",
|
||||
"Bad length during Decryption: the length parameter must be smaller than or equal to the size of the destination array.");
|
||||
}
|
||||
|
||||
@@ -72,7 +70,7 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (length > plainText.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(length),
|
||||
throw new ArgumentOutOfRangeException("length",
|
||||
"Bad length during Encryption: The length parameter must be smaller than or equal to the size of the destination array.");
|
||||
}
|
||||
|
||||
@@ -95,12 +93,17 @@ namespace SharpCompress.Common.Zip
|
||||
}
|
||||
}
|
||||
|
||||
internal byte[] StringToByteArray(string value)
|
||||
internal static byte[] StringToByteArray(string value, Encoding encoding)
|
||||
{
|
||||
byte[] a = _archiveEncoding.Password.GetBytes(value);
|
||||
byte[] a = encoding.GetBytes(value);
|
||||
return a;
|
||||
}
|
||||
|
||||
internal static byte[] StringToByteArray(string value)
|
||||
{
|
||||
return StringToByteArray(value, ArchiveEncoding.Password);
|
||||
}
|
||||
|
||||
private void UpdateKeys(byte byteValue)
|
||||
{
|
||||
_Keys[0] = (UInt32)crc32.ComputeCrc32((int)_Keys[0], byteValue);
|
||||
|
||||
@@ -5,21 +5,21 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class SeekableZipFilePart : ZipFilePart
|
||||
{
|
||||
private bool _isLocalHeaderLoaded;
|
||||
private readonly SeekableZipHeaderFactory _headerFactory;
|
||||
private bool isLocalHeaderLoaded;
|
||||
private readonly SeekableZipHeaderFactory headerFactory;
|
||||
|
||||
internal SeekableZipFilePart(SeekableZipHeaderFactory headerFactory, DirectoryEntryHeader header, Stream stream)
|
||||
: base(header, stream)
|
||||
{
|
||||
this._headerFactory = headerFactory;
|
||||
this.headerFactory = headerFactory;
|
||||
}
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (!_isLocalHeaderLoaded)
|
||||
if (!isLocalHeaderLoaded)
|
||||
{
|
||||
LoadLocalHeader();
|
||||
_isLocalHeaderLoaded = true;
|
||||
isLocalHeaderLoaded = true;
|
||||
}
|
||||
return base.GetCompressedStream();
|
||||
}
|
||||
@@ -29,7 +29,7 @@ namespace SharpCompress.Common.Zip
|
||||
private void LoadLocalHeader()
|
||||
{
|
||||
bool hasData = Header.HasData;
|
||||
Header = _headerFactory.GetLocalHeader(BaseStream, Header as DirectoryEntryHeader);
|
||||
Header = headerFactory.GetLocalHeader(BaseStream, Header as DirectoryEntryHeader);
|
||||
Header.HasData = hasData;
|
||||
}
|
||||
|
||||
|
||||
@@ -3,17 +3,16 @@ using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class SeekableZipHeaderFactory : ZipHeaderFactory
|
||||
{
|
||||
private const int MAX_ITERATIONS_FOR_DIRECTORY_HEADER = 4096;
|
||||
private bool _zip64;
|
||||
private bool zip64;
|
||||
|
||||
internal SeekableZipHeaderFactory(string password, ArchiveEncoding archiveEncoding)
|
||||
: base(StreamingMode.Seekable, password, archiveEncoding)
|
||||
internal SeekableZipHeaderFactory(string password)
|
||||
: base(StreamingMode.Seekable, password)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -27,14 +26,14 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (entry.IsZip64)
|
||||
{
|
||||
_zip64 = true;
|
||||
zip64 = true;
|
||||
SeekBackToHeader(stream, reader, ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR);
|
||||
var zip64Locator = new Zip64DirectoryEndLocatorHeader();
|
||||
zip64Locator.Read(reader);
|
||||
|
||||
stream.Seek(zip64Locator.RelativeOffsetOfTheEndOfDirectoryRecord, SeekOrigin.Begin);
|
||||
uint zip64Signature = reader.ReadUInt32();
|
||||
if (zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
|
||||
if(zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
|
||||
throw new ArchiveException("Failed to locate the Zip64 Header");
|
||||
|
||||
var zip64Entry = new Zip64DirectoryEndHeader();
|
||||
@@ -51,7 +50,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
stream.Position = position;
|
||||
uint signature = reader.ReadUInt32();
|
||||
var directoryEntryHeader = ReadHeader(signature, reader, _zip64) as DirectoryEntryHeader;
|
||||
var directoryEntryHeader = ReadHeader(signature, reader, zip64) as DirectoryEntryHeader;
|
||||
position = stream.Position;
|
||||
if (directoryEntryHeader == null)
|
||||
{
|
||||
@@ -92,7 +91,7 @@ namespace SharpCompress.Common.Zip
|
||||
stream.Seek(directoryEntryHeader.RelativeOffsetOfEntryHeader, SeekOrigin.Begin);
|
||||
BinaryReader reader = new BinaryReader(stream);
|
||||
uint signature = reader.ReadUInt32();
|
||||
var localEntryHeader = ReadHeader(signature, reader, _zip64) as LocalEntryHeader;
|
||||
var localEntryHeader = ReadHeader(signature, reader, zip64) as LocalEntryHeader;
|
||||
if (localEntryHeader == null)
|
||||
{
|
||||
throw new InvalidOperationException();
|
||||
|
||||
@@ -25,7 +25,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return Stream.Null;
|
||||
}
|
||||
decompressionStream = CreateDecompressionStream(GetCryptoStream(CreateBaseStream()), Header.CompressionMethod);
|
||||
decompressionStream = CreateDecompressionStream(GetCryptoStream(CreateBaseStream()));
|
||||
if (LeaveStreamOpen)
|
||||
{
|
||||
return new NonDisposingStream(decompressionStream);
|
||||
@@ -39,20 +39,19 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return new BinaryReader(rewindableStream);
|
||||
}
|
||||
if (Header.HasData && !Skipped)
|
||||
if (Header.HasData)
|
||||
{
|
||||
if (decompressionStream == null)
|
||||
{
|
||||
decompressionStream = GetCompressedStream();
|
||||
}
|
||||
decompressionStream.Skip();
|
||||
decompressionStream.SkipAll();
|
||||
|
||||
DeflateStream deflateStream = decompressionStream as DeflateStream;
|
||||
if (deflateStream != null)
|
||||
{
|
||||
rewindableStream.Rewind(deflateStream.InputBuffer);
|
||||
}
|
||||
Skipped = true;
|
||||
}
|
||||
var reader = new BinaryReader(rewindableStream);
|
||||
decompressionStream = null;
|
||||
|
||||
@@ -2,14 +2,13 @@
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class StreamingZipHeaderFactory : ZipHeaderFactory
|
||||
{
|
||||
internal StreamingZipHeaderFactory(string password, ArchiveEncoding archiveEncoding)
|
||||
: base(StreamingMode.Streaming, password, archiveEncoding)
|
||||
internal StreamingZipHeaderFactory(string password)
|
||||
: base(StreamingMode.Streaming, password)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
//read out last 10 auth bytes
|
||||
var ten = new byte[10];
|
||||
stream.ReadFully(ten);
|
||||
stream.Read(ten, 0, 10);
|
||||
stream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,10 +32,6 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return CompressionType.Deflate;
|
||||
}
|
||||
case ZipCompressionMethod.Deflate64:
|
||||
{
|
||||
return CompressionType.Deflate64;
|
||||
}
|
||||
case ZipCompressionMethod.LZMA:
|
||||
{
|
||||
return CompressionType.LZMA;
|
||||
|
||||
@@ -5,7 +5,6 @@ using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.BZip2;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Compressors.Deflate64;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
using SharpCompress.Compressors.PPMd;
|
||||
using SharpCompress.Converters;
|
||||
@@ -16,14 +15,13 @@ namespace SharpCompress.Common.Zip
|
||||
internal abstract class ZipFilePart : FilePart
|
||||
{
|
||||
internal ZipFilePart(ZipFileEntry header, Stream stream)
|
||||
: base(header.ArchiveEncoding)
|
||||
{
|
||||
Header = header;
|
||||
header.Part = this;
|
||||
BaseStream = stream;
|
||||
}
|
||||
|
||||
internal Stream BaseStream { get; }
|
||||
internal Stream BaseStream { get; private set; }
|
||||
internal ZipFileEntry Header { get; set; }
|
||||
|
||||
internal override string FilePartName => Header.Name;
|
||||
@@ -34,7 +32,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return Stream.Null;
|
||||
}
|
||||
Stream decompressionStream = CreateDecompressionStream(GetCryptoStream(CreateBaseStream()), Header.CompressionMethod);
|
||||
Stream decompressionStream = CreateDecompressionStream(GetCryptoStream(CreateBaseStream()));
|
||||
if (LeaveStreamOpen)
|
||||
{
|
||||
return new NonDisposingStream(decompressionStream);
|
||||
@@ -55,9 +53,9 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
protected bool LeaveStreamOpen => FlagUtility.HasFlag(Header.Flags, HeaderFlags.UsePostDataDescriptor) || Header.IsZip64;
|
||||
|
||||
protected Stream CreateDecompressionStream(Stream stream, ZipCompressionMethod method)
|
||||
protected Stream CreateDecompressionStream(Stream stream)
|
||||
{
|
||||
switch (method)
|
||||
switch (Header.CompressionMethod)
|
||||
{
|
||||
case ZipCompressionMethod.None:
|
||||
{
|
||||
@@ -67,10 +65,6 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return new DeflateStream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
case ZipCompressionMethod.Deflate64:
|
||||
{
|
||||
return new Deflate64Stream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
case ZipCompressionMethod.BZip2:
|
||||
{
|
||||
return new BZip2Stream(stream, CompressionMode.Decompress);
|
||||
@@ -94,7 +88,7 @@ namespace SharpCompress.Common.Zip
|
||||
case ZipCompressionMethod.PPMd:
|
||||
{
|
||||
var props = new byte[2];
|
||||
stream.ReadFully(props);
|
||||
stream.Read(props, 0, props.Length);
|
||||
return new PpmdStream(new PpmdProperties(props), stream, false);
|
||||
}
|
||||
case ZipCompressionMethod.WinzipAes:
|
||||
@@ -108,9 +102,9 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
throw new InvalidFormatException("Winzip data length is not 7.");
|
||||
}
|
||||
ushort compressedMethod = DataConverter.LittleEndian.GetUInt16(data.DataBytes, 0);
|
||||
ushort method = DataConverter.LittleEndian.GetUInt16(data.DataBytes, 0);
|
||||
|
||||
if (compressedMethod != 0x01 && compressedMethod != 0x02)
|
||||
if (method != 0x01 && method != 0x02)
|
||||
{
|
||||
throw new InvalidFormatException("Unexpected vendor version number for WinZip AES metadata");
|
||||
}
|
||||
@@ -120,7 +114,8 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
throw new InvalidFormatException("Unexpected vendor ID for WinZip AES metadata");
|
||||
}
|
||||
return CreateDecompressionStream(stream, (ZipCompressionMethod)DataConverter.LittleEndian.GetUInt16(data.DataBytes, 5));
|
||||
Header.CompressionMethod = (ZipCompressionMethod)DataConverter.LittleEndian.GetUInt16(data.DataBytes, 5);
|
||||
return CreateDecompressionStream(stream);
|
||||
}
|
||||
default:
|
||||
{
|
||||
@@ -181,6 +176,7 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return plainStream;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ using System.Linq;
|
||||
#endif
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
@@ -24,13 +23,11 @@ namespace SharpCompress.Common.Zip
|
||||
protected LocalEntryHeader lastEntryHeader;
|
||||
private readonly string password;
|
||||
private readonly StreamingMode mode;
|
||||
private readonly ArchiveEncoding archiveEncoding;
|
||||
|
||||
protected ZipHeaderFactory(StreamingMode mode, string password, ArchiveEncoding archiveEncoding)
|
||||
protected ZipHeaderFactory(StreamingMode mode, string password)
|
||||
{
|
||||
this.mode = mode;
|
||||
this.password = password;
|
||||
this.archiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
protected ZipHeader ReadHeader(uint headerBytes, BinaryReader reader, bool zip64 = false)
|
||||
@@ -39,7 +36,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
case ENTRY_HEADER_BYTES:
|
||||
{
|
||||
var entryHeader = new LocalEntryHeader(archiveEncoding);
|
||||
var entryHeader = new LocalEntryHeader();
|
||||
entryHeader.Read(reader);
|
||||
LoadHeader(entryHeader, reader.BaseStream);
|
||||
|
||||
@@ -48,48 +45,48 @@ namespace SharpCompress.Common.Zip
|
||||
}
|
||||
case DIRECTORY_START_HEADER_BYTES:
|
||||
{
|
||||
var entry = new DirectoryEntryHeader(archiveEncoding);
|
||||
var entry = new DirectoryEntryHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case POST_DATA_DESCRIPTOR:
|
||||
{
|
||||
if (FlagUtility.HasFlag(lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor))
|
||||
{
|
||||
if (FlagUtility.HasFlag(lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor))
|
||||
{
|
||||
lastEntryHeader.Crc = reader.ReadUInt32();
|
||||
lastEntryHeader.CompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
lastEntryHeader.UncompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
}
|
||||
else
|
||||
{
|
||||
reader.ReadBytes(zip64 ? 20 : 12);
|
||||
}
|
||||
return null;
|
||||
lastEntryHeader.Crc = reader.ReadUInt32();
|
||||
lastEntryHeader.CompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
lastEntryHeader.UncompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
}
|
||||
else
|
||||
{
|
||||
reader.ReadBytes(zip64 ? 20 : 12);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
case DIGITAL_SIGNATURE:
|
||||
return null;
|
||||
case DIRECTORY_END_HEADER_BYTES:
|
||||
{
|
||||
var entry = new DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case SPLIT_ARCHIVE_HEADER_BYTES:
|
||||
{
|
||||
return new SplitHeader();
|
||||
}
|
||||
{
|
||||
return new SplitHeader();
|
||||
}
|
||||
case ZIP64_END_OF_CENTRAL_DIRECTORY:
|
||||
{
|
||||
var entry = new Zip64DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new Zip64DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR:
|
||||
{
|
||||
var entry = new Zip64DirectoryEndLocatorHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new Zip64DirectoryEndLocatorHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
default:
|
||||
throw new NotSupportedException("Unknown header: " + headerBytes);
|
||||
}
|
||||
@@ -168,22 +165,22 @@ namespace SharpCompress.Common.Zip
|
||||
switch (mode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
entryHeader.DataStartPosition = stream.Position;
|
||||
stream.Position += entryHeader.CompressedSize;
|
||||
break;
|
||||
}
|
||||
{
|
||||
entryHeader.DataStartPosition = stream.Position;
|
||||
stream.Position += entryHeader.CompressedSize;
|
||||
break;
|
||||
}
|
||||
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
entryHeader.PackedStream = stream;
|
||||
break;
|
||||
}
|
||||
{
|
||||
entryHeader.PackedStream = stream;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
|
||||
//}
|
||||
|
||||
@@ -105,19 +105,19 @@ namespace SharpCompress.Compressors.ADC
|
||||
}
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
throw new ArgumentNullException("buffer");
|
||||
}
|
||||
if (count < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
}
|
||||
if (offset < buffer.GetLowerBound(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
}
|
||||
if ((offset + count) > buffer.GetLength(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
}
|
||||
|
||||
int size = -1;
|
||||
|
||||
@@ -99,7 +99,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// </summary>
|
||||
/// <param name="input">The stream over which to calculate the CRC32</param>
|
||||
/// <returns>the CRC32 calculation</returns>
|
||||
public UInt32 GetCrc32(Stream input)
|
||||
public Int32 GetCrc32(Stream input)
|
||||
{
|
||||
return GetCrc32AndCopy(input, null);
|
||||
}
|
||||
@@ -111,7 +111,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// <param name="input">The stream over which to calculate the CRC32</param>
|
||||
/// <param name="output">The stream into which to deflate the input</param>
|
||||
/// <returns>the CRC32 calculation</returns>
|
||||
public UInt32 GetCrc32AndCopy(Stream input, Stream output)
|
||||
public Int32 GetCrc32AndCopy(Stream input, Stream output)
|
||||
{
|
||||
if (input == null)
|
||||
{
|
||||
@@ -143,7 +143,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
TotalBytesRead += count;
|
||||
}
|
||||
|
||||
return ~runningCrc32Result;
|
||||
return (Int32)(~runningCrc32Result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -37,10 +36,9 @@ namespace SharpCompress.Compressors.Deflate
|
||||
|
||||
public DeflateStream(Stream stream, CompressionMode mode,
|
||||
CompressionLevel level = CompressionLevel.Default,
|
||||
bool leaveOpen = false,
|
||||
Encoding forceEncoding = null)
|
||||
bool leaveOpen = false)
|
||||
{
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen, forceEncoding);
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen);
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
|
||||
@@ -0,0 +1,525 @@
|
||||
#if NETSTANDARD1_3
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Converters;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
public class GZipStream : Stream
|
||||
{
|
||||
private enum Mode
|
||||
{
|
||||
Unknown,
|
||||
Reader,
|
||||
Writer
|
||||
}
|
||||
|
||||
internal static readonly DateTime UnixEpoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
public DateTime? LastModified { get; set; }
|
||||
private byte[] _buf1 = new byte[1];
|
||||
|
||||
private System.IO.Compression.DeflateStream BaseStream;
|
||||
private bool disposed;
|
||||
private Mode mode;
|
||||
|
||||
|
||||
private string _GzipFileName;
|
||||
private string _GzipComment;
|
||||
private DateTime _GzipMtime;
|
||||
private int _gzipHeaderByteCount;
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, CompressionLevel.Default, false)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level)
|
||||
: this(stream, mode, level, false)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
|
||||
{
|
||||
if (mode == CompressionMode.Decompress)
|
||||
{
|
||||
BaseStream = new System.IO.Compression.DeflateStream(stream, System.IO.Compression.CompressionMode.Decompress, leaveOpen);
|
||||
}
|
||||
else
|
||||
{
|
||||
System.IO.Compression.CompressionLevel l;
|
||||
switch (level)
|
||||
{
|
||||
case CompressionLevel.BestSpeed:
|
||||
{
|
||||
l = System.IO.Compression.CompressionLevel.Fastest;
|
||||
break;
|
||||
}
|
||||
case CompressionLevel.None:
|
||||
{
|
||||
l = System.IO.Compression.CompressionLevel.NoCompression;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
l = System.IO.Compression.CompressionLevel.Optimal;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
BaseStream = new System.IO.Compression.DeflateStream(stream, l, leaveOpen);
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#region Stream methods
|
||||
|
||||
/// <summary>
|
||||
/// Indicates whether the stream can be read.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The return value depends on whether the captive stream supports reading.
|
||||
/// </remarks>
|
||||
public override bool CanRead
|
||||
{
|
||||
get
|
||||
{
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
return BaseStream.CanRead;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Indicates whether the stream supports Seek operations.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Always returns false.
|
||||
/// </remarks>
|
||||
public override bool CanSeek => false;
|
||||
|
||||
/// <summary>
|
||||
/// Indicates whether the stream can be written.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The return value depends on whether the captive stream supports writing.
|
||||
/// </remarks>
|
||||
public override bool CanWrite
|
||||
{
|
||||
get
|
||||
{
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
return BaseStream.CanWrite;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reading this property always throws a <see cref="NotImplementedException"/>.
|
||||
/// </summary>
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
/// <summary>
|
||||
/// The position of the stream pointer.
|
||||
/// </summary>
|
||||
///
|
||||
/// <remarks>
|
||||
/// Setting this property always throws a <see
|
||||
/// cref="NotImplementedException"/>. Reading will return the total bytes
|
||||
/// written out, if used in writing, or the total bytes read in, if used in
|
||||
/// reading. The count may refer to compressed bytes or uncompressed bytes,
|
||||
/// depending on how you've used the stream.
|
||||
/// </remarks>
|
||||
public override long Position
|
||||
{
|
||||
get
|
||||
{
|
||||
switch (mode)
|
||||
{
|
||||
case Mode.Writer:
|
||||
return BaseStream.Position + _gzipHeaderByteCount;
|
||||
case Mode.Reader:
|
||||
return BaseStream.Position + _gzipHeaderByteCount;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Dispose the stream.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// This may or may not result in a <c>Close()</c> call on the captive stream.
|
||||
/// See the doc on constructors that take a <c>leaveOpen</c> parameter for more information.
|
||||
/// </remarks>
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (!disposed)
|
||||
{
|
||||
if (disposing && (BaseStream != null))
|
||||
{
|
||||
BaseStream.Dispose();;
|
||||
}
|
||||
disposed = true;
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Flush the stream.
|
||||
/// </summary>
|
||||
public override void Flush()
|
||||
{
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
BaseStream.Flush();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read and decompress data from the source stream.
|
||||
/// </summary>
|
||||
///
|
||||
/// <remarks>
|
||||
/// With a <c>GZipStream</c>, decompression is done through reading.
|
||||
/// </remarks>
|
||||
///
|
||||
/// <example>
|
||||
/// <code>
|
||||
/// byte[] working = new byte[WORKING_BUFFER_SIZE];
|
||||
/// using (System.IO.Stream input = System.IO.File.OpenRead(_CompressedFile))
|
||||
/// {
|
||||
/// using (Stream decompressor= new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, true))
|
||||
/// {
|
||||
/// using (var output = System.IO.File.Create(_DecompressedFile))
|
||||
/// {
|
||||
/// int n;
|
||||
/// while ((n= decompressor.Read(working, 0, working.Length)) !=0)
|
||||
/// {
|
||||
/// output.Write(working, 0, n);
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// </code>
|
||||
/// </example>
|
||||
/// <param name="buffer">The buffer into which the decompressed data should be placed.</param>
|
||||
/// <param name="offset">the offset within that data array to put the first byte read.</param>
|
||||
/// <param name="count">the number of bytes to read.</param>
|
||||
/// <returns>the number of bytes actually read</returns>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
int n = BaseStream.Read(buffer, offset, count);
|
||||
|
||||
// Console.WriteLine("GZipStream::Read(buffer, off({0}), c({1}) = {2}", offset, count, n);
|
||||
// Console.WriteLine( Util.FormatByteArray(buffer, offset, n) );
|
||||
|
||||
if (mode == Mode.Unknown)
|
||||
{
|
||||
_gzipHeaderByteCount = _ReadAndValidateGzipHeader();
|
||||
mode = Mode.Reader;
|
||||
FileName = _GzipFileName;
|
||||
Comment = _GzipComment;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
private int _ReadAndValidateGzipHeader()
|
||||
{
|
||||
int totalBytesRead = 0;
|
||||
|
||||
// read the header on the first read
|
||||
byte[] header = new byte[10];
|
||||
int n = BaseStream.Read(header, 0, header.Length);
|
||||
|
||||
// workitem 8501: handle edge case (decompress empty stream)
|
||||
if (n == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (n != 10)
|
||||
{
|
||||
throw new ZlibException("Not a valid GZIP stream.");
|
||||
}
|
||||
|
||||
if (header[0] != 0x1F || header[1] != 0x8B || header[2] != 8)
|
||||
{
|
||||
throw new ZlibException("Bad GZIP header.");
|
||||
}
|
||||
|
||||
Int32 timet = DataConverter.LittleEndian.GetInt32(header, 4);
|
||||
_GzipMtime = UnixEpoch.AddSeconds(timet);
|
||||
totalBytesRead += n;
|
||||
if ((header[3] & 0x04) == 0x04)
|
||||
{
|
||||
// read and discard extra field
|
||||
n = BaseStream.Read(header, 0, 2); // 2-byte length field
|
||||
totalBytesRead += n;
|
||||
|
||||
Int16 extraLength = (Int16)(header[0] + header[1] * 256);
|
||||
byte[] extra = new byte[extraLength];
|
||||
n = BaseStream.Read(extra, 0, extra.Length);
|
||||
if (n != extraLength)
|
||||
{
|
||||
throw new ZlibException("Unexpected end-of-file reading GZIP header.");
|
||||
}
|
||||
totalBytesRead += n;
|
||||
}
|
||||
if ((header[3] & 0x08) == 0x08)
|
||||
{
|
||||
_GzipFileName = ReadZeroTerminatedString();
|
||||
}
|
||||
if ((header[3] & 0x10) == 0x010)
|
||||
{
|
||||
_GzipComment = ReadZeroTerminatedString();
|
||||
}
|
||||
if ((header[3] & 0x02) == 0x02)
|
||||
{
|
||||
Read(_buf1, 0, 1); // CRC16, ignore
|
||||
}
|
||||
|
||||
return totalBytesRead;
|
||||
}
|
||||
|
||||
private string ReadZeroTerminatedString()
|
||||
{
|
||||
var list = new List<byte>();
|
||||
bool done = false;
|
||||
do
|
||||
{
|
||||
// workitem 7740
|
||||
int n = BaseStream.Read(_buf1, 0, 1);
|
||||
if (n != 1)
|
||||
{
|
||||
throw new ZlibException("Unexpected EOF reading GZIP header.");
|
||||
}
|
||||
if (_buf1[0] == 0)
|
||||
{
|
||||
done = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
list.Add(_buf1[0]);
|
||||
}
|
||||
}
|
||||
while (!done);
|
||||
byte[] a = list.ToArray();
|
||||
return ArchiveEncoding.Default.GetString(a, 0, a.Length);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Calling this method always throws a <see cref="NotImplementedException"/>.
|
||||
/// </summary>
|
||||
/// <param name="offset">irrelevant; it will always throw!</param>
|
||||
/// <param name="origin">irrelevant; it will always throw!</param>
|
||||
/// <returns>irrelevant!</returns>
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Calling this method always throws a <see cref="NotImplementedException"/>.
|
||||
/// </summary>
|
||||
/// <param name="value">irrelevant; this method will always throw!</param>
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write data to the stream.
|
||||
/// </summary>
|
||||
///
|
||||
/// <remarks>
|
||||
/// <para>
|
||||
/// If you wish to use the <c>GZipStream</c> to compress data while writing,
|
||||
/// you can create a <c>GZipStream</c> with <c>CompressionMode.Compress</c>, and a
|
||||
/// writable output stream. Then call <c>Write()</c> on that <c>GZipStream</c>,
|
||||
/// providing uncompressed data as input. The data sent to the output stream
|
||||
/// will be the compressed form of the data written.
|
||||
/// </para>
|
||||
///
|
||||
/// <para>
|
||||
/// A <c>GZipStream</c> can be used for <c>Read()</c> or <c>Write()</c>, but not
|
||||
/// both. Writing implies compression. Reading implies decompression.
|
||||
/// </para>
|
||||
///
|
||||
/// </remarks>
|
||||
/// <param name="buffer">The buffer holding data to write to the stream.</param>
|
||||
/// <param name="offset">the offset within that data array to find the first byte to write.</param>
|
||||
/// <param name="count">the number of bytes to write.</param>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
if (mode == Mode.Unknown)
|
||||
{
|
||||
// first write in compression, therefore, emit the GZIP header
|
||||
_gzipHeaderByteCount = EmitHeader();
|
||||
mode = Mode.Writer;
|
||||
}
|
||||
|
||||
BaseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
public string Comment
|
||||
{
|
||||
get => _GzipComment;
|
||||
set
|
||||
{
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
_GzipComment = value;
|
||||
}
|
||||
}
|
||||
|
||||
public string FileName
|
||||
{
|
||||
get => _GzipFileName;
|
||||
set
|
||||
{
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
_GzipFileName = value;
|
||||
if (_GzipFileName == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (_GzipFileName.IndexOf("/") != -1)
|
||||
{
|
||||
_GzipFileName = _GzipFileName.Replace("/", "\\");
|
||||
}
|
||||
if (_GzipFileName.EndsWith("\\"))
|
||||
{
|
||||
throw new InvalidOperationException("Illegal filename");
|
||||
}
|
||||
|
||||
var index = _GzipFileName.IndexOf("\\");
|
||||
if (index != -1)
|
||||
{
|
||||
// trim any leading path
|
||||
int length = _GzipFileName.Length;
|
||||
int num = length;
|
||||
while (--num >= 0)
|
||||
{
|
||||
char c = _GzipFileName[num];
|
||||
if (c == '\\')
|
||||
{
|
||||
_GzipFileName = _GzipFileName.Substring(num + 1, length - num - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private int EmitHeader()
|
||||
{
|
||||
byte[] commentBytes = (Comment == null) ? null : ArchiveEncoding.Default.GetBytes(Comment);
|
||||
byte[] filenameBytes = (FileName == null) ? null : ArchiveEncoding.Default.GetBytes(FileName);
|
||||
|
||||
int cbLength = (Comment == null) ? 0 : commentBytes.Length + 1;
|
||||
int fnLength = (FileName == null) ? 0 : filenameBytes.Length + 1;
|
||||
|
||||
int bufferLength = 10 + cbLength + fnLength;
|
||||
var header = new byte[bufferLength];
|
||||
int i = 0;
|
||||
|
||||
// ID
|
||||
header[i++] = 0x1F;
|
||||
header[i++] = 0x8B;
|
||||
|
||||
// compression method
|
||||
header[i++] = 8;
|
||||
byte flag = 0;
|
||||
if (Comment != null)
|
||||
{
|
||||
flag ^= 0x10;
|
||||
}
|
||||
if (FileName != null)
|
||||
{
|
||||
flag ^= 0x8;
|
||||
}
|
||||
|
||||
// flag
|
||||
header[i++] = flag;
|
||||
|
||||
// mtime
|
||||
if (!LastModified.HasValue)
|
||||
{
|
||||
LastModified = DateTime.Now;
|
||||
}
|
||||
TimeSpan delta = LastModified.Value - UnixEpoch;
|
||||
var timet = (Int32)delta.TotalSeconds;
|
||||
DataConverter.LittleEndian.PutBytes(header, i, timet);
|
||||
i += 4;
|
||||
|
||||
// xflg
|
||||
header[i++] = 0; // this field is totally useless
|
||||
|
||||
// OS
|
||||
header[i++] = 0xFF; // 0xFF == unspecified
|
||||
|
||||
// extra field length - only if FEXTRA is set, which it is not.
|
||||
//header[i++]= 0;
|
||||
//header[i++]= 0;
|
||||
|
||||
// filename
|
||||
if (fnLength != 0)
|
||||
{
|
||||
Array.Copy(filenameBytes, 0, header, i, fnLength - 1);
|
||||
i += fnLength - 1;
|
||||
header[i++] = 0; // terminate
|
||||
}
|
||||
|
||||
// comment
|
||||
if (cbLength != 0)
|
||||
{
|
||||
Array.Copy(commentBytes, 0, header, i, cbLength - 1);
|
||||
i += cbLength - 1;
|
||||
header[i++] = 0; // terminate
|
||||
}
|
||||
|
||||
BaseStream.Write(header, 0, header.Length);
|
||||
|
||||
return header.Length; // bytes written
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,3 +1,4 @@
|
||||
#if NETSTANDARD1_0
|
||||
// GZipStream.cs
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
@@ -30,45 +31,41 @@ using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
public class GZipStream : Stream
|
||||
{
|
||||
internal static readonly DateTime UNIX_EPOCH = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
internal static readonly DateTime UnixEpoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
public DateTime? LastModified { get; set; }
|
||||
|
||||
private string _comment;
|
||||
private string _fileName;
|
||||
private string comment;
|
||||
private string fileName;
|
||||
|
||||
internal ZlibBaseStream BaseStream;
|
||||
private bool _disposed;
|
||||
private bool _firstReadDone;
|
||||
private int _headerByteCount;
|
||||
|
||||
private readonly Encoding _encoding;
|
||||
private bool disposed;
|
||||
private bool firstReadDone;
|
||||
private int headerByteCount;
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, CompressionLevel.Default, false, Encoding.UTF8)
|
||||
: this(stream, mode, CompressionLevel.Default, false)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level)
|
||||
: this(stream, mode, level, false, Encoding.UTF8)
|
||||
: this(stream, mode, level, false)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen, Encoding.UTF8)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen, Encoding encoding)
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
|
||||
{
|
||||
BaseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen, encoding);
|
||||
_encoding = encoding;
|
||||
BaseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen);
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
@@ -78,7 +75,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
get => (BaseStream._flushMode);
|
||||
set
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -91,7 +88,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
get => BaseStream._bufferSize;
|
||||
set
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -127,7 +124,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -153,7 +150,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -183,7 +180,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Writer)
|
||||
{
|
||||
return BaseStream._z.TotalBytesOut + _headerByteCount;
|
||||
return BaseStream._z.TotalBytesOut + headerByteCount;
|
||||
}
|
||||
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Reader)
|
||||
{
|
||||
@@ -206,14 +203,14 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
try
|
||||
{
|
||||
if (!_disposed)
|
||||
if (!disposed)
|
||||
{
|
||||
if (disposing && (BaseStream != null))
|
||||
{
|
||||
BaseStream.Dispose();
|
||||
Crc32 = BaseStream.Crc32;
|
||||
}
|
||||
_disposed = true;
|
||||
disposed = true;
|
||||
}
|
||||
}
|
||||
finally
|
||||
@@ -227,7 +224,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// </summary>
|
||||
public override void Flush()
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -267,7 +264,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// <returns>the number of bytes actually read</returns>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -276,9 +273,9 @@ namespace SharpCompress.Compressors.Deflate
|
||||
// Console.WriteLine("GZipStream::Read(buffer, off({0}), c({1}) = {2}", offset, count, n);
|
||||
// Console.WriteLine( Util.FormatByteArray(buffer, offset, n) );
|
||||
|
||||
if (!_firstReadDone)
|
||||
if (!firstReadDone)
|
||||
{
|
||||
_firstReadDone = true;
|
||||
firstReadDone = true;
|
||||
FileName = BaseStream._GzipFileName;
|
||||
Comment = BaseStream._GzipComment;
|
||||
}
|
||||
@@ -329,7 +326,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// <param name="count">the number of bytes to write.</param>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -339,7 +336,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
if (BaseStream._wantCompress)
|
||||
{
|
||||
// first write in compression, therefore, emit the GZIP header
|
||||
_headerByteCount = EmitHeader();
|
||||
headerByteCount = EmitHeader();
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -350,56 +347,56 @@ namespace SharpCompress.Compressors.Deflate
|
||||
BaseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
#endregion Stream methods
|
||||
#endregion
|
||||
|
||||
public String Comment
|
||||
{
|
||||
get => _comment;
|
||||
get => comment;
|
||||
set
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
_comment = value;
|
||||
comment = value;
|
||||
}
|
||||
}
|
||||
|
||||
public string FileName
|
||||
{
|
||||
get => _fileName;
|
||||
get => fileName;
|
||||
set
|
||||
{
|
||||
if (_disposed)
|
||||
if (disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
_fileName = value;
|
||||
if (_fileName == null)
|
||||
fileName = value;
|
||||
if (fileName == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (_fileName.IndexOf("/") != -1)
|
||||
if (fileName.IndexOf("/") != -1)
|
||||
{
|
||||
_fileName = _fileName.Replace("/", "\\");
|
||||
fileName = fileName.Replace("/", "\\");
|
||||
}
|
||||
if (_fileName.EndsWith("\\"))
|
||||
if (fileName.EndsWith("\\"))
|
||||
{
|
||||
throw new InvalidOperationException("Illegal filename");
|
||||
}
|
||||
|
||||
var index = _fileName.IndexOf("\\");
|
||||
var index = fileName.IndexOf("\\");
|
||||
if (index != -1)
|
||||
{
|
||||
// trim any leading path
|
||||
int length = _fileName.Length;
|
||||
int length = fileName.Length;
|
||||
int num = length;
|
||||
while (--num >= 0)
|
||||
{
|
||||
char c = _fileName[num];
|
||||
char c = fileName[num];
|
||||
if (c == '\\')
|
||||
{
|
||||
_fileName = _fileName.Substring(num + 1, length - num - 1);
|
||||
fileName = fileName.Substring(num + 1, length - num - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -410,10 +407,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
|
||||
private int EmitHeader()
|
||||
{
|
||||
byte[] commentBytes = (Comment == null) ? null
|
||||
: _encoding.GetBytes(Comment);
|
||||
byte[] filenameBytes = (FileName == null) ? null
|
||||
: _encoding.GetBytes(FileName);
|
||||
byte[] commentBytes = (Comment == null) ? null : ArchiveEncoding.Default.GetBytes(Comment);
|
||||
byte[] filenameBytes = (FileName == null) ? null : ArchiveEncoding.Default.GetBytes(FileName);
|
||||
|
||||
int cbLength = (Comment == null) ? 0 : commentBytes.Length + 1;
|
||||
int fnLength = (FileName == null) ? 0 : filenameBytes.Length + 1;
|
||||
@@ -446,7 +441,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
LastModified = DateTime.Now;
|
||||
}
|
||||
TimeSpan delta = LastModified.Value - UNIX_EPOCH;
|
||||
TimeSpan delta = LastModified.Value - UnixEpoch;
|
||||
var timet = (Int32)delta.TotalSeconds;
|
||||
DataConverter.LittleEndian.PutBytes(header, i, timet);
|
||||
i += 4;
|
||||
@@ -482,4 +477,5 @@ namespace SharpCompress.Compressors.Deflate
|
||||
return header.Length; // bytes written
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -418,7 +418,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
internal sealed class Adler
|
||||
{
|
||||
// largest prime smaller than 65536
|
||||
private static readonly uint BASE = 65521U;
|
||||
private static readonly int BASE = 65521;
|
||||
|
||||
// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
|
||||
private static readonly int NMAX = 5552;
|
||||
@@ -430,8 +430,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
return 1;
|
||||
}
|
||||
|
||||
uint s1 = adler & 0xffffU;
|
||||
uint s2 = (adler >> 16) & 0xffffU;
|
||||
int s1 = (int)(adler & 0xffff);
|
||||
int s2 = (int)((adler >> 16) & 0xffff);
|
||||
|
||||
while (len > 0)
|
||||
{
|
||||
@@ -486,7 +486,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
s1 %= BASE;
|
||||
s2 %= BASE;
|
||||
}
|
||||
return (s2 << 16) | s1;
|
||||
return (uint)((s2 << 16) | s1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +1,20 @@
|
||||
// ZlibBaseStream.cs
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
|
||||
// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
|
||||
// All rights reserved.
|
||||
//
|
||||
// This code module is part of DotNetZip, a zipfile class library.
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// This code is licensed under the Microsoft Public License.
|
||||
// This code is licensed under the Microsoft Public License.
|
||||
// See the file License.txt for the license details.
|
||||
// More info on: http://dotnetzip.codeplex.com
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// last saved (in emacs):
|
||||
// last saved (in emacs):
|
||||
// Time-stamp: <2009-October-28 15:45:15>
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
@@ -30,7 +30,6 @@ using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -65,8 +64,6 @@ namespace SharpCompress.Compressors.Deflate
|
||||
protected internal DateTime _GzipMtime;
|
||||
protected internal int _gzipHeaderByteCount;
|
||||
|
||||
private readonly Encoding _encoding;
|
||||
|
||||
internal int Crc32
|
||||
{
|
||||
get
|
||||
@@ -83,8 +80,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
CompressionMode compressionMode,
|
||||
CompressionLevel level,
|
||||
ZlibStreamFlavor flavor,
|
||||
bool leaveOpen,
|
||||
Encoding encoding)
|
||||
bool leaveOpen)
|
||||
{
|
||||
_flushMode = FlushType.None;
|
||||
|
||||
@@ -95,8 +91,6 @@ namespace SharpCompress.Compressors.Deflate
|
||||
_flavor = flavor;
|
||||
_level = level;
|
||||
|
||||
_encoding = encoding;
|
||||
|
||||
// workitem 7159
|
||||
if (flavor == ZlibStreamFlavor.GZIP)
|
||||
{
|
||||
@@ -424,8 +418,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
}
|
||||
}
|
||||
while (!done);
|
||||
byte[] buffer = list.ToArray();
|
||||
return _encoding.GetString(buffer, 0, buffer.Length);
|
||||
byte[] a = list.ToArray();
|
||||
return ArchiveEncoding.Default.GetString(a, 0, a.Length);
|
||||
}
|
||||
|
||||
private int _ReadAndValidateGzipHeader()
|
||||
@@ -534,19 +528,19 @@ namespace SharpCompress.Compressors.Deflate
|
||||
}
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
throw new ArgumentNullException("buffer");
|
||||
}
|
||||
if (count < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
}
|
||||
if (offset < buffer.GetLowerBound(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
}
|
||||
if ((offset + count) > buffer.GetLength(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
}
|
||||
|
||||
int rc = 0;
|
||||
@@ -599,7 +593,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
while (_z.AvailableBytesOut > 0 && !nomoreinput && rc == ZlibConstants.Z_OK);
|
||||
|
||||
// workitem 8557
|
||||
// is there more room in output?
|
||||
// is there more room in output?
|
||||
if (_z.AvailableBytesOut > 0)
|
||||
{
|
||||
if (rc == ZlibConstants.Z_OK && _z.AvailableBytesIn == 0)
|
||||
|
||||
@@ -27,7 +27,6 @@
|
||||
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -37,23 +36,23 @@ namespace SharpCompress.Compressors.Deflate
|
||||
private bool _disposed;
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, CompressionLevel.Default, false, Encoding.UTF8)
|
||||
: this(stream, mode, CompressionLevel.Default, false)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level)
|
||||
: this(stream, mode, level, false, Encoding.UTF8)
|
||||
: this(stream, mode, level, false)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen, Encoding.UTF8)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen, Encoding encoding)
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
|
||||
{
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen, encoding);
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen);
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
@@ -327,6 +326,6 @@ namespace SharpCompress.Compressors.Deflate
|
||||
_baseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
#endregion System.IO.Stream methods
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
internal enum BlockType
|
||||
{
|
||||
Uncompressed = 0,
|
||||
Static = 1,
|
||||
Dynamic = 2
|
||||
}
|
||||
}
|
||||
@@ -1,257 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using SharpCompress.Common.Zip;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.IO;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
public sealed partial class Deflate64Stream : Stream
|
||||
{
|
||||
internal const int DefaultBufferSize = 8192;
|
||||
|
||||
private Stream _stream;
|
||||
private CompressionMode _mode;
|
||||
private bool _leaveOpen;
|
||||
private InflaterManaged _inflater;
|
||||
private byte[] _buffer;
|
||||
|
||||
public Deflate64Stream(Stream stream, CompressionMode mode,
|
||||
CompressionLevel level = CompressionLevel.Default,
|
||||
bool leaveOpen = false)
|
||||
{
|
||||
if (stream == null)
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
if (mode != CompressionMode.Decompress)
|
||||
throw new NotImplementedException("Deflate64: this implementation only supports decompression");
|
||||
if (!stream.CanRead)
|
||||
throw new ArgumentException("Deflate64: input stream is not readable", nameof(stream));
|
||||
|
||||
InitializeInflater(stream, leaveOpen, ZipCompressionMethod.Deflate64);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets up this DeflateManagedStream to be used for Inflation/Decompression
|
||||
/// </summary>
|
||||
internal void InitializeInflater(Stream stream, bool leaveOpen, ZipCompressionMethod method = ZipCompressionMethod.Deflate)
|
||||
{
|
||||
Debug.Assert(stream != null);
|
||||
Debug.Assert(method == ZipCompressionMethod.Deflate || method == ZipCompressionMethod.Deflate64);
|
||||
if (!stream.CanRead)
|
||||
throw new ArgumentException("Deflate64: input stream is not readable", nameof(stream));
|
||||
|
||||
_inflater = new InflaterManaged(method == ZipCompressionMethod.Deflate64);
|
||||
|
||||
_stream = stream;
|
||||
_mode = CompressionMode.Decompress;
|
||||
_leaveOpen = leaveOpen;
|
||||
_buffer = new byte[DefaultBufferSize];
|
||||
}
|
||||
|
||||
public override bool CanRead
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_stream == null)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return (_mode == CompressionMode.Decompress && _stream.CanRead);
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanWrite
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_stream == null)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return (_mode == CompressionMode.Compress && _stream.CanWrite);
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override long Length
|
||||
{
|
||||
get { throw new NotSupportedException("Deflate64: not supported"); }
|
||||
}
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get { throw new NotSupportedException("Deflate64: not supported"); }
|
||||
set { throw new NotSupportedException("Deflate64: not supported"); }
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException("Deflate64: not supported");
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException("Deflate64: not supported");
|
||||
}
|
||||
|
||||
public override int Read(byte[] array, int offset, int count)
|
||||
{
|
||||
EnsureDecompressionMode();
|
||||
ValidateParameters(array, offset, count);
|
||||
EnsureNotDisposed();
|
||||
|
||||
int bytesRead;
|
||||
int currentOffset = offset;
|
||||
int remainingCount = count;
|
||||
|
||||
while (true)
|
||||
{
|
||||
bytesRead = _inflater.Inflate(array, currentOffset, remainingCount);
|
||||
currentOffset += bytesRead;
|
||||
remainingCount -= bytesRead;
|
||||
|
||||
if (remainingCount == 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
if (_inflater.Finished())
|
||||
{
|
||||
// if we finished decompressing, we can't have anything left in the outputwindow.
|
||||
Debug.Assert(_inflater.AvailableOutput == 0, "We should have copied all stuff out!");
|
||||
break;
|
||||
}
|
||||
|
||||
int bytes = _stream.Read(_buffer, 0, _buffer.Length);
|
||||
if (bytes <= 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
else if (bytes > _buffer.Length)
|
||||
{
|
||||
// The stream is either malicious or poorly implemented and returned a number of
|
||||
// bytes larger than the buffer supplied to it.
|
||||
throw new InvalidDataException("Deflate64: invalid data");
|
||||
}
|
||||
|
||||
_inflater.SetInput(_buffer, 0, bytes);
|
||||
}
|
||||
|
||||
return count - remainingCount;
|
||||
}
|
||||
|
||||
private void ValidateParameters(byte[] array, int offset, int count)
|
||||
{
|
||||
if (array == null)
|
||||
throw new ArgumentNullException(nameof(array));
|
||||
|
||||
if (offset < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
|
||||
if (count < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
|
||||
if (array.Length - offset < count)
|
||||
throw new ArgumentException("Deflate64: invalid offset/count combination");
|
||||
}
|
||||
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (_stream == null)
|
||||
ThrowStreamClosedException();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.NoInlining)]
|
||||
private static void ThrowStreamClosedException()
|
||||
{
|
||||
throw new ObjectDisposedException(null, "Deflate64: stream has been disposed");
|
||||
}
|
||||
|
||||
private void EnsureDecompressionMode()
|
||||
{
|
||||
if (_mode != CompressionMode.Decompress)
|
||||
ThrowCannotReadFromDeflateManagedStreamException();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.NoInlining)]
|
||||
private static void ThrowCannotReadFromDeflateManagedStreamException()
|
||||
{
|
||||
throw new InvalidOperationException("Deflate64: cannot read from this stream");
|
||||
}
|
||||
|
||||
private void EnsureCompressionMode()
|
||||
{
|
||||
if (_mode != CompressionMode.Compress)
|
||||
ThrowCannotWriteToDeflateManagedStreamException();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.NoInlining)]
|
||||
private static void ThrowCannotWriteToDeflateManagedStreamException()
|
||||
{
|
||||
throw new InvalidOperationException("Deflate64: cannot write to this stream");
|
||||
}
|
||||
|
||||
public override void Write(byte[] array, int offset, int count)
|
||||
{
|
||||
ThrowCannotWriteToDeflateManagedStreamException();
|
||||
}
|
||||
|
||||
// This is called by Dispose:
|
||||
private void PurgeBuffers(bool disposing)
|
||||
{
|
||||
if (!disposing)
|
||||
return;
|
||||
|
||||
if (_stream == null)
|
||||
return;
|
||||
|
||||
Flush();
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
try
|
||||
{
|
||||
PurgeBuffers(disposing);
|
||||
}
|
||||
finally
|
||||
{
|
||||
// Close the underlying stream even if PurgeBuffers threw.
|
||||
// Stream.Close() may throw here (may or may not be due to the same error).
|
||||
// In this case, we still need to clean up internal resources, hence the inner finally blocks.
|
||||
try
|
||||
{
|
||||
if (disposing && !_leaveOpen && _stream != null)
|
||||
_stream.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
_stream = null;
|
||||
|
||||
try
|
||||
{
|
||||
_inflater?.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
_inflater = null;
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using System.Diagnostics;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
internal sealed class DeflateInput
|
||||
{
|
||||
internal byte[] Buffer { get; set; }
|
||||
internal int Count { get; set; }
|
||||
internal int StartIndex { get; set; }
|
||||
|
||||
internal void ConsumeBytes(int n)
|
||||
{
|
||||
Debug.Assert(n <= Count, "Should use more bytes than what we have in the buffer");
|
||||
StartIndex += n;
|
||||
Count -= n;
|
||||
Debug.Assert(StartIndex + Count <= Buffer.Length, "Input buffer is in invalid state!");
|
||||
}
|
||||
|
||||
internal InputState DumpState() => new InputState(Count, StartIndex);
|
||||
|
||||
internal void RestoreState(InputState state)
|
||||
{
|
||||
Count = state._count;
|
||||
StartIndex = state._startIndex;
|
||||
}
|
||||
|
||||
internal /*readonly */struct InputState
|
||||
{
|
||||
internal readonly int _count;
|
||||
internal readonly int _startIndex;
|
||||
|
||||
internal InputState(int count, int startIndex)
|
||||
{
|
||||
_count = count;
|
||||
_startIndex = startIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,245 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using System.Diagnostics;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
internal static class FastEncoderStatics
|
||||
{
|
||||
// static information for encoding, DO NOT MODIFY
|
||||
|
||||
internal static readonly byte[] FastEncoderTreeStructureData =
|
||||
{
|
||||
0xec,0xbd,0x07,0x60,0x1c,0x49,0x96,0x25,0x26,0x2f,0x6d,0xca,
|
||||
0x7b,0x7f,0x4a,0xf5,0x4a,0xd7,0xe0,0x74,0xa1,0x08,0x80,0x60,
|
||||
0x13,0x24,0xd8,0x90,0x40,0x10,0xec,0xc1,0x88,0xcd,0xe6,0x92,
|
||||
0xec,0x1d,0x69,0x47,0x23,0x29,0xab,0x2a,0x81,0xca,0x65,0x56,
|
||||
0x65,0x5d,0x66,0x16,0x40,0xcc,0xed,0x9d,0xbc,0xf7,0xde,0x7b,
|
||||
0xef,0xbd,0xf7,0xde,0x7b,0xef,0xbd,0xf7,0xba,0x3b,0x9d,0x4e,
|
||||
0x27,0xf7,0xdf,0xff,0x3f,0x5c,0x66,0x64,0x01,0x6c,0xf6,0xce,
|
||||
0x4a,0xda,0xc9,0x9e,0x21,0x80,0xaa,0xc8,0x1f,0x3f,0x7e,0x7c,
|
||||
0x1f,0x3f
|
||||
};
|
||||
|
||||
internal static readonly byte[] BFinalFastEncoderTreeStructureData =
|
||||
{
|
||||
0xed,0xbd,0x07,0x60,0x1c,0x49,0x96,0x25,0x26,0x2f,0x6d,0xca,
|
||||
0x7b,0x7f,0x4a,0xf5,0x4a,0xd7,0xe0,0x74,0xa1,0x08,0x80,0x60,
|
||||
0x13,0x24,0xd8,0x90,0x40,0x10,0xec,0xc1,0x88,0xcd,0xe6,0x92,
|
||||
0xec,0x1d,0x69,0x47,0x23,0x29,0xab,0x2a,0x81,0xca,0x65,0x56,
|
||||
0x65,0x5d,0x66,0x16,0x40,0xcc,0xed,0x9d,0xbc,0xf7,0xde,0x7b,
|
||||
0xef,0xbd,0xf7,0xde,0x7b,0xef,0xbd,0xf7,0xba,0x3b,0x9d,0x4e,
|
||||
0x27,0xf7,0xdf,0xff,0x3f,0x5c,0x66,0x64,0x01,0x6c,0xf6,0xce,
|
||||
0x4a,0xda,0xc9,0x9e,0x21,0x80,0xaa,0xc8,0x1f,0x3f,0x7e,0x7c,
|
||||
0x1f,0x3f
|
||||
};
|
||||
|
||||
// Output a currentMatch with length matchLen (>= MIN_MATCH) and displacement matchPos
|
||||
//
|
||||
// Optimisation: unlike the other encoders, here we have an array of codes for each currentMatch
|
||||
// length (not just each currentMatch length slot), complete with all the extra bits filled in, in
|
||||
// a single array element.
|
||||
//
|
||||
// There are many advantages to doing this:
|
||||
//
|
||||
// 1. A single array lookup on g_FastEncoderLiteralCodeInfo, instead of separate array lookups
|
||||
// on g_LengthLookup (to get the length slot), g_FastEncoderLiteralTreeLength,
|
||||
// g_FastEncoderLiteralTreeCode, g_ExtraLengthBits, and g_BitMask
|
||||
//
|
||||
// 2. The array is an array of ULONGs, so no access penalty, unlike for accessing those USHORT
|
||||
// code arrays in the other encoders (although they could be made into ULONGs with some
|
||||
// modifications to the source).
|
||||
//
|
||||
// Note, if we could guarantee that codeLen <= 16 always, then we could skip an if statement here.
|
||||
//
|
||||
// A completely different optimisation is used for the distance codes since, obviously, a table for
|
||||
// all 8192 distances combining their extra bits is not feasible. The distance codeinfo table is
|
||||
// made up of code[], len[] and # extraBits for this code.
|
||||
//
|
||||
// The advantages are similar to the above; a ULONG array instead of a USHORT and BYTE array, better
|
||||
// cache locality, fewer memory operations.
|
||||
//
|
||||
|
||||
|
||||
// Encoding information for literal and Length.
|
||||
// The least 5 significant bits are the length
|
||||
// and the rest is the code bits.
|
||||
|
||||
internal static readonly uint[] FastEncoderLiteralCodeInfo =
|
||||
{
|
||||
0x0000d7ee,0x0004d7ee,0x0002d7ee,0x0006d7ee,0x0001d7ee,0x0005d7ee,0x0003d7ee,
|
||||
0x0007d7ee,0x000037ee,0x0000c7ec,0x00000126,0x000437ee,0x000237ee,0x000637ee,
|
||||
0x000137ee,0x000537ee,0x000337ee,0x000737ee,0x0000b7ee,0x0004b7ee,0x0002b7ee,
|
||||
0x0006b7ee,0x0001b7ee,0x0005b7ee,0x0003b7ee,0x0007b7ee,0x000077ee,0x000477ee,
|
||||
0x000277ee,0x000677ee,0x000017ed,0x000177ee,0x00000526,0x000577ee,0x000023ea,
|
||||
0x0001c7ec,0x000377ee,0x000777ee,0x000217ed,0x000063ea,0x00000b68,0x00000ee9,
|
||||
0x00005beb,0x000013ea,0x00000467,0x00001b68,0x00000c67,0x00002ee9,0x00000768,
|
||||
0x00001768,0x00000f68,0x00001ee9,0x00001f68,0x00003ee9,0x000053ea,0x000001e9,
|
||||
0x000000e8,0x000021e9,0x000011e9,0x000010e8,0x000031e9,0x000033ea,0x000008e8,
|
||||
0x0000f7ee,0x0004f7ee,0x000018e8,0x000009e9,0x000004e8,0x000029e9,0x000014e8,
|
||||
0x000019e9,0x000073ea,0x0000dbeb,0x00000ce8,0x00003beb,0x0002f7ee,0x000039e9,
|
||||
0x00000bea,0x000005e9,0x00004bea,0x000025e9,0x000027ec,0x000015e9,0x000035e9,
|
||||
0x00000de9,0x00002bea,0x000127ec,0x0000bbeb,0x0006f7ee,0x0001f7ee,0x0000a7ec,
|
||||
0x00007beb,0x0005f7ee,0x0000fbeb,0x0003f7ee,0x0007f7ee,0x00000fee,0x00000326,
|
||||
0x00000267,0x00000a67,0x00000667,0x00000726,0x00001ce8,0x000002e8,0x00000e67,
|
||||
0x000000a6,0x0001a7ec,0x00002de9,0x000004a6,0x00000167,0x00000967,0x000002a6,
|
||||
0x00000567,0x000117ed,0x000006a6,0x000001a6,0x000005a6,0x00000d67,0x000012e8,
|
||||
0x00000ae8,0x00001de9,0x00001ae8,0x000007eb,0x000317ed,0x000067ec,0x000097ed,
|
||||
0x000297ed,0x00040fee,0x00020fee,0x00060fee,0x00010fee,0x00050fee,0x00030fee,
|
||||
0x00070fee,0x00008fee,0x00048fee,0x00028fee,0x00068fee,0x00018fee,0x00058fee,
|
||||
0x00038fee,0x00078fee,0x00004fee,0x00044fee,0x00024fee,0x00064fee,0x00014fee,
|
||||
0x00054fee,0x00034fee,0x00074fee,0x0000cfee,0x0004cfee,0x0002cfee,0x0006cfee,
|
||||
0x0001cfee,0x0005cfee,0x0003cfee,0x0007cfee,0x00002fee,0x00042fee,0x00022fee,
|
||||
0x00062fee,0x00012fee,0x00052fee,0x00032fee,0x00072fee,0x0000afee,0x0004afee,
|
||||
0x0002afee,0x0006afee,0x0001afee,0x0005afee,0x0003afee,0x0007afee,0x00006fee,
|
||||
0x00046fee,0x00026fee,0x00066fee,0x00016fee,0x00056fee,0x00036fee,0x00076fee,
|
||||
0x0000efee,0x0004efee,0x0002efee,0x0006efee,0x0001efee,0x0005efee,0x0003efee,
|
||||
0x0007efee,0x00001fee,0x00041fee,0x00021fee,0x00061fee,0x00011fee,0x00051fee,
|
||||
0x00031fee,0x00071fee,0x00009fee,0x00049fee,0x00029fee,0x00069fee,0x00019fee,
|
||||
0x00059fee,0x00039fee,0x00079fee,0x00005fee,0x00045fee,0x00025fee,0x00065fee,
|
||||
0x00015fee,0x00055fee,0x00035fee,0x00075fee,0x0000dfee,0x0004dfee,0x0002dfee,
|
||||
0x0006dfee,0x0001dfee,0x0005dfee,0x0003dfee,0x0007dfee,0x00003fee,0x00043fee,
|
||||
0x00023fee,0x00063fee,0x00013fee,0x00053fee,0x00033fee,0x00073fee,0x0000bfee,
|
||||
0x0004bfee,0x0002bfee,0x0006bfee,0x0001bfee,0x0005bfee,0x0003bfee,0x0007bfee,
|
||||
0x00007fee,0x00047fee,0x00027fee,0x00067fee,0x00017fee,0x000197ed,0x000397ed,
|
||||
0x000057ed,0x00057fee,0x000257ed,0x00037fee,0x000157ed,0x00077fee,0x000357ed,
|
||||
0x0000ffee,0x0004ffee,0x0002ffee,0x0006ffee,0x0001ffee,0x00000084,0x00000003,
|
||||
0x00000184,0x00000044,0x00000144,0x000000c5,0x000002c5,0x000001c5,0x000003c6,
|
||||
0x000007c6,0x00000026,0x00000426,0x000003a7,0x00000ba7,0x000007a7,0x00000fa7,
|
||||
0x00000227,0x00000627,0x00000a27,0x00000e27,0x00000068,0x00000868,0x00001068,
|
||||
0x00001868,0x00000369,0x00001369,0x00002369,0x00003369,0x000006ea,0x000026ea,
|
||||
0x000046ea,0x000066ea,0x000016eb,0x000036eb,0x000056eb,0x000076eb,0x000096eb,
|
||||
0x0000b6eb,0x0000d6eb,0x0000f6eb,0x00003dec,0x00007dec,0x0000bdec,0x0000fdec,
|
||||
0x00013dec,0x00017dec,0x0001bdec,0x0001fdec,0x00006bed,0x0000ebed,0x00016bed,
|
||||
0x0001ebed,0x00026bed,0x0002ebed,0x00036bed,0x0003ebed,0x000003ec,0x000043ec,
|
||||
0x000083ec,0x0000c3ec,0x000103ec,0x000143ec,0x000183ec,0x0001c3ec,0x00001bee,
|
||||
0x00009bee,0x00011bee,0x00019bee,0x00021bee,0x00029bee,0x00031bee,0x00039bee,
|
||||
0x00041bee,0x00049bee,0x00051bee,0x00059bee,0x00061bee,0x00069bee,0x00071bee,
|
||||
0x00079bee,0x000167f0,0x000367f0,0x000567f0,0x000767f0,0x000967f0,0x000b67f0,
|
||||
0x000d67f0,0x000f67f0,0x001167f0,0x001367f0,0x001567f0,0x001767f0,0x001967f0,
|
||||
0x001b67f0,0x001d67f0,0x001f67f0,0x000087ef,0x000187ef,0x000287ef,0x000387ef,
|
||||
0x000487ef,0x000587ef,0x000687ef,0x000787ef,0x000887ef,0x000987ef,0x000a87ef,
|
||||
0x000b87ef,0x000c87ef,0x000d87ef,0x000e87ef,0x000f87ef,0x0000e7f0,0x0002e7f0,
|
||||
0x0004e7f0,0x0006e7f0,0x0008e7f0,0x000ae7f0,0x000ce7f0,0x000ee7f0,0x0010e7f0,
|
||||
0x0012e7f0,0x0014e7f0,0x0016e7f0,0x0018e7f0,0x001ae7f0,0x001ce7f0,0x001ee7f0,
|
||||
0x0005fff3,0x000dfff3,0x0015fff3,0x001dfff3,0x0025fff3,0x002dfff3,0x0035fff3,
|
||||
0x003dfff3,0x0045fff3,0x004dfff3,0x0055fff3,0x005dfff3,0x0065fff3,0x006dfff3,
|
||||
0x0075fff3,0x007dfff3,0x0085fff3,0x008dfff3,0x0095fff3,0x009dfff3,0x00a5fff3,
|
||||
0x00adfff3,0x00b5fff3,0x00bdfff3,0x00c5fff3,0x00cdfff3,0x00d5fff3,0x00ddfff3,
|
||||
0x00e5fff3,0x00edfff3,0x00f5fff3,0x00fdfff3,0x0003fff3,0x000bfff3,0x0013fff3,
|
||||
0x001bfff3,0x0023fff3,0x002bfff3,0x0033fff3,0x003bfff3,0x0043fff3,0x004bfff3,
|
||||
0x0053fff3,0x005bfff3,0x0063fff3,0x006bfff3,0x0073fff3,0x007bfff3,0x0083fff3,
|
||||
0x008bfff3,0x0093fff3,0x009bfff3,0x00a3fff3,0x00abfff3,0x00b3fff3,0x00bbfff3,
|
||||
0x00c3fff3,0x00cbfff3,0x00d3fff3,0x00dbfff3,0x00e3fff3,0x00ebfff3,0x00f3fff3,
|
||||
0x00fbfff3,0x0007fff3,0x000ffff3,0x0017fff3,0x001ffff3,0x0027fff3,0x002ffff3,
|
||||
0x0037fff3,0x003ffff3,0x0047fff3,0x004ffff3,0x0057fff3,0x005ffff3,0x0067fff3,
|
||||
0x006ffff3,0x0077fff3,0x007ffff3,0x0087fff3,0x008ffff3,0x0097fff3,0x009ffff3,
|
||||
0x00a7fff3,0x00affff3,0x00b7fff3,0x00bffff3,0x00c7fff3,0x00cffff3,0x00d7fff3,
|
||||
0x00dffff3,0x00e7fff3,0x00effff3,0x00f7fff3,0x00fffff3,0x0001e7f1,0x0003e7f1,
|
||||
0x0005e7f1,0x0007e7f1,0x0009e7f1,0x000be7f1,0x000de7f1,0x000fe7f1,0x0011e7f1,
|
||||
0x0013e7f1,0x0015e7f1,0x0017e7f1,0x0019e7f1,0x001be7f1,0x001de7f1,0x001fe7f1,
|
||||
0x0021e7f1,0x0023e7f1,0x0025e7f1,0x0027e7f1,0x0029e7f1,0x002be7f1,0x002de7f1,
|
||||
0x002fe7f1,0x0031e7f1,0x0033e7f1,0x0035e7f1,0x0037e7f1,0x0039e7f1,0x003be7f1,
|
||||
0x003de7f1,0x000047eb
|
||||
};
|
||||
|
||||
internal static readonly uint[] FastEncoderDistanceCodeInfo =
|
||||
{
|
||||
0x00000f06,0x0001ff0a,0x0003ff0b,0x0007ff0b,0x0000ff19,0x00003f18,0x0000bf28,
|
||||
0x00007f28,0x00001f37,0x00005f37,0x00000d45,0x00002f46,0x00000054,0x00001d55,
|
||||
0x00000864,0x00000365,0x00000474,0x00001375,0x00000c84,0x00000284,0x00000a94,
|
||||
0x00000694,0x00000ea4,0x000001a4,0x000009b4,0x00000bb5,0x000005c4,0x00001bc5,
|
||||
0x000007d5,0x000017d5,0x00000000,0x00000100
|
||||
};
|
||||
|
||||
internal static readonly uint[] BitMask = { 0, 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767 };
|
||||
internal static readonly byte[] ExtraLengthBits = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 };
|
||||
internal static readonly byte[] ExtraDistanceBits = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 0, 0 };
|
||||
internal const int NumChars = 256;
|
||||
internal const int NumLengthBaseCodes = 29;
|
||||
internal const int NumDistBaseCodes = 30;
|
||||
|
||||
internal const uint FastEncoderPostTreeBitBuf = 0x0022;
|
||||
internal const int FastEncoderPostTreeBitCount = 9;
|
||||
|
||||
internal const uint NoCompressionHeader = 0x0;
|
||||
internal const int NoCompressionHeaderBitCount = 3;
|
||||
internal const uint BFinalNoCompressionHeader = 0x1;
|
||||
internal const int BFinalNoCompressionHeaderBitCount = 3;
|
||||
internal const int MaxCodeLen = 16;
|
||||
|
||||
private static readonly byte[] s_distLookup = CreateDistanceLookup();
|
||||
|
||||
private static byte[] CreateDistanceLookup()
|
||||
{
|
||||
byte[] result = new byte[512];
|
||||
|
||||
// Generate the global slot tables which allow us to convert a distance
|
||||
// (0..32K) to a distance slot (0..29)
|
||||
//
|
||||
// Distance table
|
||||
// Extra Extra Extra
|
||||
// Code Bits Dist Code Bits Dist Code Bits Distance
|
||||
// ---- ---- ---- ---- ---- ------ ---- ---- --------
|
||||
// 0 0 1 10 4 33-48 20 9 1025-1536
|
||||
// 1 0 2 11 4 49-64 21 9 1537-2048
|
||||
// 2 0 3 12 5 65-96 22 10 2049-3072
|
||||
// 3 0 4 13 5 97-128 23 10 3073-4096
|
||||
// 4 1 5,6 14 6 129-192 24 11 4097-6144
|
||||
// 5 1 7,8 15 6 193-256 25 11 6145-8192
|
||||
// 6 2 9-12 16 7 257-384 26 12 8193-12288
|
||||
// 7 2 13-16 17 7 385-512 27 12 12289-16384
|
||||
// 8 3 17-24 18 8 513-768 28 13 16385-24576
|
||||
// 9 3 25-32 19 8 769-1024 29 13 24577-32768
|
||||
|
||||
// Initialize the mapping length (0..255) -> length code (0..28)
|
||||
//int length = 0;
|
||||
//for (code = 0; code < FastEncoderStatics.NumLengthBaseCodes-1; code++) {
|
||||
// for (int n = 0; n < (1 << FastEncoderStatics.ExtraLengthBits[code]); n++)
|
||||
// lengthLookup[length++] = (byte) code;
|
||||
//}
|
||||
//lengthLookup[length-1] = (byte) code;
|
||||
|
||||
// Initialize the mapping dist (0..32K) -> dist code (0..29)
|
||||
int dist = 0;
|
||||
int code;
|
||||
for (code = 0; code < 16; code++)
|
||||
{
|
||||
for (int n = 0; n < (1 << ExtraDistanceBits[code]); n++)
|
||||
result[dist++] = (byte)code;
|
||||
}
|
||||
|
||||
dist >>= 7; // from now on, all distances are divided by 128
|
||||
|
||||
for (; code < NumDistBaseCodes; code++)
|
||||
{
|
||||
for (int n = 0; n < (1 << (ExtraDistanceBits[code] - 7)); n++)
|
||||
result[256 + dist++] = (byte)code;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Return the position slot (0...29) of a match offset (0...32767)
|
||||
internal static int GetSlot(int pos) =>
|
||||
s_distLookup[((pos) < 256) ? (pos) : (256 + ((pos) >> 7))];
|
||||
|
||||
// Reverse 'length' of the bits in code
|
||||
public static uint BitReverse(uint code, int length)
|
||||
{
|
||||
uint new_code = 0;
|
||||
|
||||
Debug.Assert(length > 0 && length <= 16, "Invalid len");
|
||||
do
|
||||
{
|
||||
new_code |= (code & 1);
|
||||
new_code <<= 1;
|
||||
code >>= 1;
|
||||
} while (--length > 0);
|
||||
|
||||
return new_code >> 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,311 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using System.Diagnostics;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
// Strictly speaking this class is not a HuffmanTree, this class is
|
||||
// a lookup table combined with a HuffmanTree. The idea is to speed up
|
||||
// the lookup for short symbols (they should appear more frequently ideally.)
|
||||
// However we don't want to create a huge table since it might take longer to
|
||||
// build the table than decoding (Deflate usually generates new tables frequently.)
|
||||
//
|
||||
// Jean-loup Gailly and Mark Adler gave a very good explanation about this.
|
||||
// The full text (algorithm.txt) can be found inside
|
||||
// ftp://ftp.uu.net/pub/archiving/zip/zlib/zlib.zip.
|
||||
//
|
||||
// Following paper explains decoding in details:
|
||||
// Hirschberg and Lelewer, "Efficient decoding of prefix codes,"
|
||||
// Comm. ACM, 33,4, April 1990, pp. 449-459.
|
||||
//
|
||||
|
||||
internal sealed class HuffmanTree
|
||||
{
|
||||
internal const int MaxLiteralTreeElements = 288;
|
||||
internal const int MaxDistTreeElements = 32;
|
||||
internal const int EndOfBlockCode = 256;
|
||||
internal const int NumberOfCodeLengthTreeElements = 19;
|
||||
|
||||
private readonly int _tableBits;
|
||||
private readonly short[] _table;
|
||||
private readonly short[] _left;
|
||||
private readonly short[] _right;
|
||||
private readonly byte[] _codeLengthArray;
|
||||
#if DEBUG
|
||||
private uint[] _codeArrayDebug;
|
||||
#endif
|
||||
|
||||
private readonly int _tableMask;
|
||||
|
||||
// huffman tree for static block
|
||||
public static HuffmanTree StaticLiteralLengthTree { get; } = new HuffmanTree(GetStaticLiteralTreeLength());
|
||||
|
||||
public static HuffmanTree StaticDistanceTree { get; } = new HuffmanTree(GetStaticDistanceTreeLength());
|
||||
|
||||
public HuffmanTree(byte[] codeLengths)
|
||||
{
|
||||
Debug.Assert(
|
||||
codeLengths.Length == MaxLiteralTreeElements ||
|
||||
codeLengths.Length == MaxDistTreeElements ||
|
||||
codeLengths.Length == NumberOfCodeLengthTreeElements,
|
||||
"we only expect three kinds of Length here");
|
||||
_codeLengthArray = codeLengths;
|
||||
|
||||
if (_codeLengthArray.Length == MaxLiteralTreeElements)
|
||||
{
|
||||
// bits for Literal/Length tree table
|
||||
_tableBits = 9;
|
||||
}
|
||||
else
|
||||
{
|
||||
// bits for distance tree table and code length tree table
|
||||
_tableBits = 7;
|
||||
}
|
||||
_tableMask = (1 << _tableBits) - 1;
|
||||
|
||||
_table = new short[1 << _tableBits];
|
||||
|
||||
// I need to find proof that left and right array will always be
|
||||
// enough. I think they are.
|
||||
_left = new short[2 * _codeLengthArray.Length];
|
||||
_right = new short[2 * _codeLengthArray.Length];
|
||||
|
||||
CreateTable();
|
||||
}
|
||||
|
||||
// Generate the array contains huffman codes lengths for static huffman tree.
|
||||
// The data is in RFC 1951.
|
||||
private static byte[] GetStaticLiteralTreeLength()
|
||||
{
|
||||
byte[] literalTreeLength = new byte[MaxLiteralTreeElements];
|
||||
for (int i = 0; i <= 143; i++)
|
||||
literalTreeLength[i] = 8;
|
||||
|
||||
for (int i = 144; i <= 255; i++)
|
||||
literalTreeLength[i] = 9;
|
||||
|
||||
for (int i = 256; i <= 279; i++)
|
||||
literalTreeLength[i] = 7;
|
||||
|
||||
for (int i = 280; i <= 287; i++)
|
||||
literalTreeLength[i] = 8;
|
||||
|
||||
return literalTreeLength;
|
||||
}
|
||||
|
||||
private static byte[] GetStaticDistanceTreeLength()
|
||||
{
|
||||
byte[] staticDistanceTreeLength = new byte[MaxDistTreeElements];
|
||||
for (int i = 0; i < MaxDistTreeElements; i++)
|
||||
{
|
||||
staticDistanceTreeLength[i] = 5;
|
||||
}
|
||||
return staticDistanceTreeLength;
|
||||
}
|
||||
|
||||
// Calculate the huffman code for each character based on the code length for each character.
|
||||
// This algorithm is described in standard RFC 1951
|
||||
private uint[] CalculateHuffmanCode()
|
||||
{
|
||||
uint[] bitLengthCount = new uint[17];
|
||||
foreach (int codeLength in _codeLengthArray)
|
||||
{
|
||||
bitLengthCount[codeLength]++;
|
||||
}
|
||||
bitLengthCount[0] = 0; // clear count for length 0
|
||||
|
||||
uint[] nextCode = new uint[17];
|
||||
uint tempCode = 0;
|
||||
for (int bits = 1; bits <= 16; bits++)
|
||||
{
|
||||
tempCode = (tempCode + bitLengthCount[bits - 1]) << 1;
|
||||
nextCode[bits] = tempCode;
|
||||
}
|
||||
|
||||
uint[] code = new uint[MaxLiteralTreeElements];
|
||||
for (int i = 0; i < _codeLengthArray.Length; i++)
|
||||
{
|
||||
int len = _codeLengthArray[i];
|
||||
|
||||
if (len > 0)
|
||||
{
|
||||
code[i] = FastEncoderStatics.BitReverse(nextCode[len], len);
|
||||
nextCode[len]++;
|
||||
}
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
private void CreateTable()
|
||||
{
|
||||
uint[] codeArray = CalculateHuffmanCode();
|
||||
#if DEBUG
|
||||
_codeArrayDebug = codeArray;
|
||||
#endif
|
||||
|
||||
short avail = (short)_codeLengthArray.Length;
|
||||
|
||||
for (int ch = 0; ch < _codeLengthArray.Length; ch++)
|
||||
{
|
||||
// length of this code
|
||||
int len = _codeLengthArray[ch];
|
||||
if (len > 0)
|
||||
{
|
||||
// start value (bit reversed)
|
||||
int start = (int)codeArray[ch];
|
||||
|
||||
if (len <= _tableBits)
|
||||
{
|
||||
// If a particular symbol is shorter than nine bits,
|
||||
// then that symbol's translation is duplicated
|
||||
// in all those entries that start with that symbol's bits.
|
||||
// For example, if the symbol is four bits, then it's duplicated
|
||||
// 32 times in a nine-bit table. If a symbol is nine bits long,
|
||||
// it appears in the table once.
|
||||
//
|
||||
// Make sure that in the loop below, code is always
|
||||
// less than table_size.
|
||||
//
|
||||
// On last iteration we store at array index:
|
||||
// initial_start_at + (locs-1)*increment
|
||||
// = initial_start_at + locs*increment - increment
|
||||
// = initial_start_at + (1 << tableBits) - increment
|
||||
// = initial_start_at + table_size - increment
|
||||
//
|
||||
// Therefore we must ensure:
|
||||
// initial_start_at + table_size - increment < table_size
|
||||
// or: initial_start_at < increment
|
||||
//
|
||||
int increment = 1 << len;
|
||||
if (start >= increment)
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: invalid Huffman data");
|
||||
}
|
||||
|
||||
// Note the bits in the table are reverted.
|
||||
int locs = 1 << (_tableBits - len);
|
||||
for (int j = 0; j < locs; j++)
|
||||
{
|
||||
_table[start] = (short)ch;
|
||||
start += increment;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// For any code which has length longer than num_elements,
|
||||
// build a binary tree.
|
||||
|
||||
int overflowBits = len - _tableBits; // the nodes we need to respent the data.
|
||||
int codeBitMask = 1 << _tableBits; // mask to get current bit (the bits can't fit in the table)
|
||||
|
||||
// the left, right table is used to repesent the
|
||||
// the rest bits. When we got the first part (number bits.) and look at
|
||||
// tbe table, we will need to follow the tree to find the real character.
|
||||
// This is in place to avoid bloating the table if there are
|
||||
// a few ones with long code.
|
||||
int index = start & ((1 << _tableBits) - 1);
|
||||
short[] array = _table;
|
||||
|
||||
do
|
||||
{
|
||||
short value = array[index];
|
||||
|
||||
if (value == 0)
|
||||
{
|
||||
// set up next pointer if this node is not used before.
|
||||
array[index] = (short)-avail; // use next available slot.
|
||||
value = (short)-avail;
|
||||
avail++;
|
||||
}
|
||||
|
||||
if (value > 0)
|
||||
{
|
||||
// prevent an IndexOutOfRangeException from array[index]
|
||||
throw new InvalidDataException("Deflate64: invalid Huffman data");
|
||||
}
|
||||
|
||||
Debug.Assert(value < 0, "CreateTable: Only negative numbers are used for tree pointers!");
|
||||
|
||||
if ((start & codeBitMask) == 0)
|
||||
{
|
||||
// if current bit is 0, go change the left array
|
||||
array = _left;
|
||||
}
|
||||
else
|
||||
{
|
||||
// if current bit is 1, set value in the right array
|
||||
array = _right;
|
||||
}
|
||||
index = -value; // go to next node
|
||||
|
||||
codeBitMask <<= 1;
|
||||
overflowBits--;
|
||||
} while (overflowBits != 0);
|
||||
|
||||
array[index] = (short)ch;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// This function will try to get enough bits from input and
|
||||
// try to decode the bits.
|
||||
// If there are no enought bits in the input, this function will return -1.
|
||||
//
|
||||
public int GetNextSymbol(InputBuffer input)
|
||||
{
|
||||
// Try to load 16 bits into input buffer if possible and get the bitBuffer value.
|
||||
// If there aren't 16 bits available we will return all we have in the
|
||||
// input buffer.
|
||||
uint bitBuffer = input.TryLoad16Bits();
|
||||
if (input.AvailableBits == 0)
|
||||
{ // running out of input.
|
||||
return -1;
|
||||
}
|
||||
|
||||
// decode an element
|
||||
int symbol = _table[bitBuffer & _tableMask];
|
||||
if (symbol < 0)
|
||||
{ // this will be the start of the binary tree
|
||||
// navigate the tree
|
||||
uint mask = (uint)1 << _tableBits;
|
||||
do
|
||||
{
|
||||
symbol = -symbol;
|
||||
if ((bitBuffer & mask) == 0)
|
||||
symbol = _left[symbol];
|
||||
else
|
||||
symbol = _right[symbol];
|
||||
mask <<= 1;
|
||||
} while (symbol < 0);
|
||||
}
|
||||
|
||||
int codeLength = _codeLengthArray[symbol];
|
||||
|
||||
// huffman code lengths must be at least 1 bit long
|
||||
if (codeLength <= 0)
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: invalid Huffman data");
|
||||
}
|
||||
|
||||
//
|
||||
// If this code is longer than the # bits we had in the bit buffer (i.e.
|
||||
// we read only part of the code), we can hit the entry in the table or the tree
|
||||
// for another symbol. However the length of another symbol will not match the
|
||||
// available bits count.
|
||||
if (codeLength > input.AvailableBits)
|
||||
{
|
||||
// We already tried to load 16 bits and maximum length is 15,
|
||||
// so this means we are running out of input.
|
||||
return -1;
|
||||
}
|
||||
|
||||
input.SkipBits(codeLength);
|
||||
return symbol;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,738 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
//
|
||||
// zlib.h -- interface of the 'zlib' general purpose compression library
|
||||
// version 1.2.1, November 17th, 2003
|
||||
//
|
||||
// Copyright (C) 1995-2003 Jean-loup Gailly and Mark Adler
|
||||
//
|
||||
// This software is provided 'as-is', without any express or implied
|
||||
// warranty. In no event will the authors be held liable for any damages
|
||||
// arising from the use of this software.
|
||||
//
|
||||
// Permission is granted to anyone to use this software for any purpose,
|
||||
// including commercial applications, and to alter it and redistribute it
|
||||
// freely, subject to the following restrictions:
|
||||
//
|
||||
// 1. The origin of this software must not be misrepresented; you must not
|
||||
// claim that you wrote the original software. If you use this software
|
||||
// in a product, an acknowledgment in the product documentation would be
|
||||
// appreciated but is not required.
|
||||
// 2. Altered source versions must be plainly marked as such, and must not be
|
||||
// misrepresented as being the original software.
|
||||
// 3. This notice may not be removed or altered from any source distribution.
|
||||
//
|
||||
//
|
||||
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
internal sealed class InflaterManaged
|
||||
{
|
||||
// const tables used in decoding:
|
||||
|
||||
// Extra bits for length code 257 - 285.
|
||||
private static readonly byte[] s_extraLengthBits =
|
||||
{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,16 };
|
||||
|
||||
// The base length for length code 257 - 285.
|
||||
// The formula to get the real length for a length code is lengthBase[code - 257] + (value stored in extraBits)
|
||||
private static readonly int[] s_lengthBase =
|
||||
{ 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,3};
|
||||
|
||||
// The base distance for distance code 0 - 31
|
||||
// The real distance for a distance code is distanceBasePosition[code] + (value stored in extraBits)
|
||||
private static readonly int[] s_distanceBasePosition =
|
||||
{ 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,32769,49153 };
|
||||
|
||||
// code lengths for code length alphabet is stored in following order
|
||||
private static readonly byte[] s_codeOrder = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
|
||||
|
||||
private static readonly byte[] s_staticDistanceTreeTable =
|
||||
{
|
||||
0x00,0x10,0x08,0x18,0x04,0x14,0x0c,0x1c,0x02,0x12,0x0a,0x1a,
|
||||
0x06,0x16,0x0e,0x1e,0x01,0x11,0x09,0x19,0x05,0x15,0x0d,0x1d,
|
||||
0x03,0x13,0x0b,0x1b,0x07,0x17,0x0f,0x1f
|
||||
};
|
||||
|
||||
private readonly OutputWindow _output;
|
||||
private readonly InputBuffer _input;
|
||||
private HuffmanTree _literalLengthTree;
|
||||
private HuffmanTree _distanceTree;
|
||||
|
||||
private InflaterState _state;
|
||||
//private bool _hasFormatReader;
|
||||
private int _bfinal;
|
||||
private BlockType _blockType;
|
||||
|
||||
// uncompressed block
|
||||
private readonly byte[] _blockLengthBuffer = new byte[4];
|
||||
private int _blockLength;
|
||||
|
||||
// compressed block
|
||||
private int _length;
|
||||
private int _distanceCode;
|
||||
private int _extraBits;
|
||||
|
||||
private int _loopCounter;
|
||||
private int _literalLengthCodeCount;
|
||||
private int _distanceCodeCount;
|
||||
private int _codeLengthCodeCount;
|
||||
private int _codeArraySize;
|
||||
private int _lengthCode;
|
||||
|
||||
private readonly byte[] _codeList; // temporary array to store the code length for literal/Length and distance
|
||||
private readonly byte[] _codeLengthTreeCodeLength;
|
||||
private readonly bool _deflate64;
|
||||
private HuffmanTree _codeLengthTree;
|
||||
|
||||
//private IFileFormatReader _formatReader; // class to decode header and footer (e.g. gzip)
|
||||
|
||||
internal InflaterManaged(/*IFileFormatReader reader, */bool deflate64)
|
||||
{
|
||||
_output = new OutputWindow();
|
||||
_input = new InputBuffer();
|
||||
|
||||
_codeList = new byte[HuffmanTree.MaxLiteralTreeElements + HuffmanTree.MaxDistTreeElements];
|
||||
_codeLengthTreeCodeLength = new byte[HuffmanTree.NumberOfCodeLengthTreeElements];
|
||||
_deflate64 = deflate64;
|
||||
//if (reader != null)
|
||||
//{
|
||||
// _formatReader = reader;
|
||||
// _hasFormatReader = true;
|
||||
//}
|
||||
Reset();
|
||||
}
|
||||
|
||||
private void Reset()
|
||||
{
|
||||
_state = //_hasFormatReader ?
|
||||
//InflaterState.ReadingHeader : // start by reading Header info
|
||||
InflaterState.ReadingBFinal; // start by reading BFinal bit
|
||||
}
|
||||
|
||||
public void SetInput(byte[] inputBytes, int offset, int length) =>
|
||||
_input.SetInput(inputBytes, offset, length); // append the bytes
|
||||
|
||||
public bool Finished() => _state == InflaterState.Done || _state == InflaterState.VerifyingFooter;
|
||||
|
||||
public int AvailableOutput => _output.AvailableBytes;
|
||||
|
||||
public int Inflate(byte[] bytes, int offset, int length)
|
||||
{
|
||||
// copy bytes from output to outputbytes if we have available bytes
|
||||
// if buffer is not filled up. keep decoding until no input are available
|
||||
// if decodeBlock returns false. Throw an exception.
|
||||
int count = 0;
|
||||
do
|
||||
{
|
||||
int copied = _output.CopyTo(bytes, offset, length);
|
||||
if (copied > 0)
|
||||
{
|
||||
//if (_hasFormatReader)
|
||||
//{
|
||||
// _formatReader.UpdateWithBytesRead(bytes, offset, copied);
|
||||
//}
|
||||
|
||||
offset += copied;
|
||||
count += copied;
|
||||
length -= copied;
|
||||
}
|
||||
|
||||
if (length == 0)
|
||||
{ // filled in the bytes array
|
||||
break;
|
||||
}
|
||||
// Decode will return false when more input is needed
|
||||
} while (!Finished() && Decode());
|
||||
|
||||
if (_state == InflaterState.VerifyingFooter)
|
||||
{ // finished reading CRC
|
||||
// In this case finished is true and output window has all the data.
|
||||
// But some data in output window might not be copied out.
|
||||
if (_output.AvailableBytes == 0)
|
||||
{
|
||||
//_formatReader.Validate();
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
//Each block of compressed data begins with 3 header bits
|
||||
// containing the following data:
|
||||
// first bit BFINAL
|
||||
// next 2 bits BTYPE
|
||||
// Note that the header bits do not necessarily begin on a byte
|
||||
// boundary, since a block does not necessarily occupy an integral
|
||||
// number of bytes.
|
||||
// BFINAL is set if and only if this is the last block of the data
|
||||
// set.
|
||||
// BTYPE specifies how the data are compressed, as follows:
|
||||
// 00 - no compression
|
||||
// 01 - compressed with fixed Huffman codes
|
||||
// 10 - compressed with dynamic Huffman codes
|
||||
// 11 - reserved (error)
|
||||
// The only difference between the two compressed cases is how the
|
||||
// Huffman codes for the literal/length and distance alphabets are
|
||||
// defined.
|
||||
//
|
||||
// This function returns true for success (end of block or output window is full,)
|
||||
// false if we are short of input
|
||||
//
|
||||
private bool Decode()
|
||||
{
|
||||
bool eob = false;
|
||||
bool result = false;
|
||||
|
||||
if (Finished())
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
//if (_hasFormatReader)
|
||||
//{
|
||||
// if (_state == InflaterState.ReadingHeader)
|
||||
// {
|
||||
// if (!_formatReader.ReadHeader(_input))
|
||||
// {
|
||||
// return false;
|
||||
// }
|
||||
// _state = InflaterState.ReadingBFinal;
|
||||
// }
|
||||
// else if (_state == InflaterState.StartReadingFooter || _state == InflaterState.ReadingFooter)
|
||||
// {
|
||||
// if (!_formatReader.ReadFooter(_input))
|
||||
// return false;
|
||||
|
||||
// _state = InflaterState.VerifyingFooter;
|
||||
// return true;
|
||||
// }
|
||||
//}
|
||||
|
||||
if (_state == InflaterState.ReadingBFinal)
|
||||
{
|
||||
// reading bfinal bit
|
||||
// Need 1 bit
|
||||
if (!_input.EnsureBitsAvailable(1))
|
||||
return false;
|
||||
|
||||
_bfinal = _input.GetBits(1);
|
||||
_state = InflaterState.ReadingBType;
|
||||
}
|
||||
|
||||
if (_state == InflaterState.ReadingBType)
|
||||
{
|
||||
// Need 2 bits
|
||||
if (!_input.EnsureBitsAvailable(2))
|
||||
{
|
||||
_state = InflaterState.ReadingBType;
|
||||
return false;
|
||||
}
|
||||
|
||||
_blockType = (BlockType)_input.GetBits(2);
|
||||
if (_blockType == BlockType.Dynamic)
|
||||
{
|
||||
_state = InflaterState.ReadingNumLitCodes;
|
||||
}
|
||||
else if (_blockType == BlockType.Static)
|
||||
{
|
||||
_literalLengthTree = HuffmanTree.StaticLiteralLengthTree;
|
||||
_distanceTree = HuffmanTree.StaticDistanceTree;
|
||||
_state = InflaterState.DecodeTop;
|
||||
}
|
||||
else if (_blockType == BlockType.Uncompressed)
|
||||
{
|
||||
_state = InflaterState.UncompressedAligning;
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: unknown block type");
|
||||
}
|
||||
}
|
||||
|
||||
if (_blockType == BlockType.Dynamic)
|
||||
{
|
||||
if (_state < InflaterState.DecodeTop)
|
||||
{
|
||||
// we are reading the header
|
||||
result = DecodeDynamicBlockHeader();
|
||||
}
|
||||
else
|
||||
{
|
||||
result = DecodeBlock(out eob); // this can returns true when output is full
|
||||
}
|
||||
}
|
||||
else if (_blockType == BlockType.Static)
|
||||
{
|
||||
result = DecodeBlock(out eob);
|
||||
}
|
||||
else if (_blockType == BlockType.Uncompressed)
|
||||
{
|
||||
result = DecodeUncompressedBlock(out eob);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: unknown block type");
|
||||
}
|
||||
|
||||
//
|
||||
// If we reached the end of the block and the block we were decoding had
|
||||
// bfinal=1 (final block)
|
||||
//
|
||||
if (eob && (_bfinal != 0))
|
||||
{
|
||||
//if (_hasFormatReader)
|
||||
// _state = InflaterState.StartReadingFooter;
|
||||
//else
|
||||
_state = InflaterState.Done;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// Format of Non-compressed blocks (BTYPE=00):
|
||||
//
|
||||
// Any bits of input up to the next byte boundary are ignored.
|
||||
// The rest of the block consists of the following information:
|
||||
//
|
||||
// 0 1 2 3 4...
|
||||
// +---+---+---+---+================================+
|
||||
// | LEN | NLEN |... LEN bytes of literal data...|
|
||||
// +---+---+---+---+================================+
|
||||
//
|
||||
// LEN is the number of data bytes in the block. NLEN is the
|
||||
// one's complement of LEN.
|
||||
private bool DecodeUncompressedBlock(out bool end_of_block)
|
||||
{
|
||||
end_of_block = false;
|
||||
while (true)
|
||||
{
|
||||
switch (_state)
|
||||
{
|
||||
case InflaterState.UncompressedAligning: // initial state when calling this function
|
||||
// we must skip to a byte boundary
|
||||
_input.SkipToByteBoundary();
|
||||
_state = InflaterState.UncompressedByte1;
|
||||
goto case InflaterState.UncompressedByte1;
|
||||
|
||||
case InflaterState.UncompressedByte1: // decoding block length
|
||||
case InflaterState.UncompressedByte2:
|
||||
case InflaterState.UncompressedByte3:
|
||||
case InflaterState.UncompressedByte4:
|
||||
int bits = _input.GetBits(8);
|
||||
if (bits < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
_blockLengthBuffer[_state - InflaterState.UncompressedByte1] = (byte)bits;
|
||||
if (_state == InflaterState.UncompressedByte4)
|
||||
{
|
||||
_blockLength = _blockLengthBuffer[0] + ((int)_blockLengthBuffer[1]) * 256;
|
||||
int blockLengthComplement = _blockLengthBuffer[2] + ((int)_blockLengthBuffer[3]) * 256;
|
||||
|
||||
// make sure complement matches
|
||||
if ((ushort)_blockLength != (ushort)(~blockLengthComplement))
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: invalid block length");
|
||||
}
|
||||
}
|
||||
|
||||
_state += 1;
|
||||
break;
|
||||
|
||||
case InflaterState.DecodingUncompressed: // copying block data
|
||||
|
||||
// Directly copy bytes from input to output.
|
||||
int bytesCopied = _output.CopyFrom(_input, _blockLength);
|
||||
_blockLength -= bytesCopied;
|
||||
|
||||
if (_blockLength == 0)
|
||||
{
|
||||
// Done with this block, need to re-init bit buffer for next block
|
||||
_state = InflaterState.ReadingBFinal;
|
||||
end_of_block = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
// We can fail to copy all bytes for two reasons:
|
||||
// Running out of Input
|
||||
// running out of free space in output window
|
||||
if (_output.FreeBytes == 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
default:
|
||||
Debug./*Fail*/Assert(false, "check why we are here!");
|
||||
throw new InvalidDataException("Deflate64: unknown state");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private bool DecodeBlock(out bool end_of_block_code_seen)
|
||||
{
|
||||
end_of_block_code_seen = false;
|
||||
|
||||
int freeBytes = _output.FreeBytes; // it is a little bit faster than frequently accessing the property
|
||||
while (freeBytes > 65536)
|
||||
{
|
||||
// With Deflate64 we can have up to a 64kb length, so we ensure at least that much space is available
|
||||
// in the OutputWindow to avoid overwriting previous unflushed output data.
|
||||
|
||||
int symbol;
|
||||
switch (_state)
|
||||
{
|
||||
case InflaterState.DecodeTop:
|
||||
// decode an element from the literal tree
|
||||
|
||||
// TODO: optimize this!!!
|
||||
symbol = _literalLengthTree.GetNextSymbol(_input);
|
||||
if (symbol < 0)
|
||||
{
|
||||
// running out of input
|
||||
return false;
|
||||
}
|
||||
|
||||
if (symbol < 256)
|
||||
{
|
||||
// literal
|
||||
_output.Write((byte)symbol);
|
||||
--freeBytes;
|
||||
}
|
||||
else if (symbol == 256)
|
||||
{
|
||||
// end of block
|
||||
end_of_block_code_seen = true;
|
||||
// Reset state
|
||||
_state = InflaterState.ReadingBFinal;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// length/distance pair
|
||||
symbol -= 257; // length code started at 257
|
||||
if (symbol < 8)
|
||||
{
|
||||
symbol += 3; // match length = 3,4,5,6,7,8,9,10
|
||||
_extraBits = 0;
|
||||
}
|
||||
else if (!_deflate64 && symbol == 28)
|
||||
{
|
||||
// extra bits for code 285 is 0
|
||||
symbol = 258; // code 285 means length 258
|
||||
_extraBits = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (symbol < 0 || symbol >= s_extraLengthBits.Length)
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: invalid data");
|
||||
}
|
||||
_extraBits = s_extraLengthBits[symbol];
|
||||
Debug.Assert(_extraBits != 0, "We handle other cases separately!");
|
||||
}
|
||||
_length = symbol;
|
||||
goto case InflaterState.HaveInitialLength;
|
||||
}
|
||||
break;
|
||||
|
||||
case InflaterState.HaveInitialLength:
|
||||
if (_extraBits > 0)
|
||||
{
|
||||
_state = InflaterState.HaveInitialLength;
|
||||
int bits = _input.GetBits(_extraBits);
|
||||
if (bits < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_length < 0 || _length >= s_lengthBase.Length)
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: invalid data");
|
||||
}
|
||||
_length = s_lengthBase[_length] + bits;
|
||||
}
|
||||
_state = InflaterState.HaveFullLength;
|
||||
goto case InflaterState.HaveFullLength;
|
||||
|
||||
case InflaterState.HaveFullLength:
|
||||
if (_blockType == BlockType.Dynamic)
|
||||
{
|
||||
_distanceCode = _distanceTree.GetNextSymbol(_input);
|
||||
}
|
||||
else
|
||||
{
|
||||
// get distance code directly for static block
|
||||
_distanceCode = _input.GetBits(5);
|
||||
if (_distanceCode >= 0)
|
||||
{
|
||||
_distanceCode = s_staticDistanceTreeTable[_distanceCode];
|
||||
}
|
||||
}
|
||||
|
||||
if (_distanceCode < 0)
|
||||
{
|
||||
// running out input
|
||||
return false;
|
||||
}
|
||||
|
||||
_state = InflaterState.HaveDistCode;
|
||||
goto case InflaterState.HaveDistCode;
|
||||
|
||||
case InflaterState.HaveDistCode:
|
||||
// To avoid a table lookup we note that for distanceCode > 3,
|
||||
// extra_bits = (distanceCode-2) >> 1
|
||||
int offset;
|
||||
if (_distanceCode > 3)
|
||||
{
|
||||
_extraBits = (_distanceCode - 2) >> 1;
|
||||
int bits = _input.GetBits(_extraBits);
|
||||
if (bits < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
offset = s_distanceBasePosition[_distanceCode] + bits;
|
||||
}
|
||||
else
|
||||
{
|
||||
offset = _distanceCode + 1;
|
||||
}
|
||||
|
||||
_output.WriteLengthDistance(_length, offset);
|
||||
freeBytes -= _length;
|
||||
_state = InflaterState.DecodeTop;
|
||||
break;
|
||||
|
||||
default:
|
||||
Debug./*Fail*/Assert(false, "check why we are here!");
|
||||
throw new InvalidDataException("Deflate64: unknown state");
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Format of the dynamic block header:
|
||||
// 5 Bits: HLIT, # of Literal/Length codes - 257 (257 - 286)
|
||||
// 5 Bits: HDIST, # of Distance codes - 1 (1 - 32)
|
||||
// 4 Bits: HCLEN, # of Code Length codes - 4 (4 - 19)
|
||||
//
|
||||
// (HCLEN + 4) x 3 bits: code lengths for the code length
|
||||
// alphabet given just above, in the order: 16, 17, 18,
|
||||
// 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
|
||||
//
|
||||
// These code lengths are interpreted as 3-bit integers
|
||||
// (0-7); as above, a code length of 0 means the
|
||||
// corresponding symbol (literal/length or distance code
|
||||
// length) is not used.
|
||||
//
|
||||
// HLIT + 257 code lengths for the literal/length alphabet,
|
||||
// encoded using the code length Huffman code
|
||||
//
|
||||
// HDIST + 1 code lengths for the distance alphabet,
|
||||
// encoded using the code length Huffman code
|
||||
//
|
||||
// The code length repeat codes can cross from HLIT + 257 to the
|
||||
// HDIST + 1 code lengths. In other words, all code lengths form
|
||||
// a single sequence of HLIT + HDIST + 258 values.
|
||||
private bool DecodeDynamicBlockHeader()
|
||||
{
|
||||
switch (_state)
|
||||
{
|
||||
case InflaterState.ReadingNumLitCodes:
|
||||
_literalLengthCodeCount = _input.GetBits(5);
|
||||
if (_literalLengthCodeCount < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
_literalLengthCodeCount += 257;
|
||||
_state = InflaterState.ReadingNumDistCodes;
|
||||
goto case InflaterState.ReadingNumDistCodes;
|
||||
|
||||
case InflaterState.ReadingNumDistCodes:
|
||||
_distanceCodeCount = _input.GetBits(5);
|
||||
if (_distanceCodeCount < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
_distanceCodeCount += 1;
|
||||
_state = InflaterState.ReadingNumCodeLengthCodes;
|
||||
goto case InflaterState.ReadingNumCodeLengthCodes;
|
||||
|
||||
case InflaterState.ReadingNumCodeLengthCodes:
|
||||
_codeLengthCodeCount = _input.GetBits(4);
|
||||
if (_codeLengthCodeCount < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
_codeLengthCodeCount += 4;
|
||||
_loopCounter = 0;
|
||||
_state = InflaterState.ReadingCodeLengthCodes;
|
||||
goto case InflaterState.ReadingCodeLengthCodes;
|
||||
|
||||
case InflaterState.ReadingCodeLengthCodes:
|
||||
while (_loopCounter < _codeLengthCodeCount)
|
||||
{
|
||||
int bits = _input.GetBits(3);
|
||||
if (bits < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
_codeLengthTreeCodeLength[s_codeOrder[_loopCounter]] = (byte)bits;
|
||||
++_loopCounter;
|
||||
}
|
||||
|
||||
for (int i = _codeLengthCodeCount; i < s_codeOrder.Length; i++)
|
||||
{
|
||||
_codeLengthTreeCodeLength[s_codeOrder[i]] = 0;
|
||||
}
|
||||
|
||||
// create huffman tree for code length
|
||||
_codeLengthTree = new HuffmanTree(_codeLengthTreeCodeLength);
|
||||
_codeArraySize = _literalLengthCodeCount + _distanceCodeCount;
|
||||
_loopCounter = 0; // reset loop count
|
||||
|
||||
_state = InflaterState.ReadingTreeCodesBefore;
|
||||
goto case InflaterState.ReadingTreeCodesBefore;
|
||||
|
||||
case InflaterState.ReadingTreeCodesBefore:
|
||||
case InflaterState.ReadingTreeCodesAfter:
|
||||
while (_loopCounter < _codeArraySize)
|
||||
{
|
||||
if (_state == InflaterState.ReadingTreeCodesBefore)
|
||||
{
|
||||
if ((_lengthCode = _codeLengthTree.GetNextSymbol(_input)) < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// The alphabet for code lengths is as follows:
|
||||
// 0 - 15: Represent code lengths of 0 - 15
|
||||
// 16: Copy the previous code length 3 - 6 times.
|
||||
// The next 2 bits indicate repeat length
|
||||
// (0 = 3, ... , 3 = 6)
|
||||
// Example: Codes 8, 16 (+2 bits 11),
|
||||
// 16 (+2 bits 10) will expand to
|
||||
// 12 code lengths of 8 (1 + 6 + 5)
|
||||
// 17: Repeat a code length of 0 for 3 - 10 times.
|
||||
// (3 bits of length)
|
||||
// 18: Repeat a code length of 0 for 11 - 138 times
|
||||
// (7 bits of length)
|
||||
if (_lengthCode <= 15)
|
||||
{
|
||||
_codeList[_loopCounter++] = (byte)_lengthCode;
|
||||
}
|
||||
else
|
||||
{
|
||||
int repeatCount;
|
||||
if (_lengthCode == 16)
|
||||
{
|
||||
if (!_input.EnsureBitsAvailable(2))
|
||||
{
|
||||
_state = InflaterState.ReadingTreeCodesAfter;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_loopCounter == 0)
|
||||
{
|
||||
// can't have "prev code" on first code
|
||||
throw new InvalidDataException();
|
||||
}
|
||||
|
||||
byte previousCode = _codeList[_loopCounter - 1];
|
||||
repeatCount = _input.GetBits(2) + 3;
|
||||
|
||||
if (_loopCounter + repeatCount > _codeArraySize)
|
||||
{
|
||||
throw new InvalidDataException();
|
||||
}
|
||||
|
||||
for (int j = 0; j < repeatCount; j++)
|
||||
{
|
||||
_codeList[_loopCounter++] = previousCode;
|
||||
}
|
||||
}
|
||||
else if (_lengthCode == 17)
|
||||
{
|
||||
if (!_input.EnsureBitsAvailable(3))
|
||||
{
|
||||
_state = InflaterState.ReadingTreeCodesAfter;
|
||||
return false;
|
||||
}
|
||||
|
||||
repeatCount = _input.GetBits(3) + 3;
|
||||
|
||||
if (_loopCounter + repeatCount > _codeArraySize)
|
||||
{
|
||||
throw new InvalidDataException();
|
||||
}
|
||||
|
||||
for (int j = 0; j < repeatCount; j++)
|
||||
{
|
||||
_codeList[_loopCounter++] = 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// code == 18
|
||||
if (!_input.EnsureBitsAvailable(7))
|
||||
{
|
||||
_state = InflaterState.ReadingTreeCodesAfter;
|
||||
return false;
|
||||
}
|
||||
|
||||
repeatCount = _input.GetBits(7) + 11;
|
||||
|
||||
if (_loopCounter + repeatCount > _codeArraySize)
|
||||
{
|
||||
throw new InvalidDataException();
|
||||
}
|
||||
|
||||
for (int j = 0; j < repeatCount; j++)
|
||||
{
|
||||
_codeList[_loopCounter++] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
_state = InflaterState.ReadingTreeCodesBefore; // we want to read the next code.
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
Debug./*Fail*/Assert(false, "check why we are here!");
|
||||
throw new InvalidDataException("Deflate64: unknown state");
|
||||
}
|
||||
|
||||
byte[] literalTreeCodeLength = new byte[HuffmanTree.MaxLiteralTreeElements];
|
||||
byte[] distanceTreeCodeLength = new byte[HuffmanTree.MaxDistTreeElements];
|
||||
|
||||
// Create literal and distance tables
|
||||
Array.Copy(_codeList, 0, literalTreeCodeLength, 0, _literalLengthCodeCount);
|
||||
Array.Copy(_codeList, _literalLengthCodeCount, distanceTreeCodeLength, 0, _distanceCodeCount);
|
||||
|
||||
// Make sure there is an end-of-block code, otherwise how could we ever end?
|
||||
if (literalTreeCodeLength[HuffmanTree.EndOfBlockCode] == 0)
|
||||
{
|
||||
throw new InvalidDataException();
|
||||
}
|
||||
|
||||
_literalLengthTree = new HuffmanTree(literalTreeCodeLength);
|
||||
_distanceTree = new HuffmanTree(distanceTreeCodeLength);
|
||||
_state = InflaterState.DecodeTop;
|
||||
return true;
|
||||
}
|
||||
|
||||
public void Dispose() { }
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
// Do not rearrange the enum values.
|
||||
internal enum InflaterState
|
||||
{
|
||||
ReadingHeader = 0, // Only applies to GZIP
|
||||
|
||||
ReadingBFinal = 2, // About to read bfinal bit
|
||||
ReadingBType = 3, // About to read blockType bits
|
||||
|
||||
ReadingNumLitCodes = 4, // About to read # literal codes
|
||||
ReadingNumDistCodes = 5, // About to read # dist codes
|
||||
ReadingNumCodeLengthCodes = 6, // About to read # code length codes
|
||||
ReadingCodeLengthCodes = 7, // In the middle of reading the code length codes
|
||||
ReadingTreeCodesBefore = 8, // In the middle of reading tree codes (loop top)
|
||||
ReadingTreeCodesAfter = 9, // In the middle of reading tree codes (extension; code > 15)
|
||||
|
||||
DecodeTop = 10, // About to decode a literal (char/match) in a compressed block
|
||||
HaveInitialLength = 11, // Decoding a match, have the literal code (base length)
|
||||
HaveFullLength = 12, // Ditto, now have the full match length (incl. extra length bits)
|
||||
HaveDistCode = 13, // Ditto, now have the distance code also, need extra dist bits
|
||||
|
||||
/* uncompressed blocks */
|
||||
UncompressedAligning = 15,
|
||||
UncompressedByte1 = 16,
|
||||
UncompressedByte2 = 17,
|
||||
UncompressedByte3 = 18,
|
||||
UncompressedByte4 = 19,
|
||||
DecodingUncompressed = 20,
|
||||
|
||||
// These three apply only to GZIP
|
||||
StartReadingFooter = 21, // (Initialisation for reading footer)
|
||||
ReadingFooter = 22,
|
||||
VerifyingFooter = 23,
|
||||
|
||||
Done = 24 // Finished
|
||||
}
|
||||
}
|
||||
@@ -1,202 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
// This class can be used to read bits from an byte array quickly.
|
||||
// Normally we get bits from 'bitBuffer' field and bitsInBuffer stores
|
||||
// the number of bits available in 'BitBuffer'.
|
||||
// When we used up the bits in bitBuffer, we will try to get byte from
|
||||
// the byte array and copy the byte to appropiate position in bitBuffer.
|
||||
//
|
||||
// The byte array is not reused. We will go from 'start' to 'end'.
|
||||
// When we reach the end, most read operations will return -1,
|
||||
// which means we are running out of input.
|
||||
|
||||
internal sealed class InputBuffer
|
||||
{
|
||||
private byte[] _buffer; // byte array to store input
|
||||
private int _start; // start poisition of the buffer
|
||||
private int _end; // end position of the buffer
|
||||
private uint _bitBuffer = 0; // store the bits here, we can quickly shift in this buffer
|
||||
private int _bitsInBuffer = 0; // number of bits available in bitBuffer
|
||||
|
||||
/// <summary>Total bits available in the input buffer.</summary>
|
||||
public int AvailableBits => _bitsInBuffer;
|
||||
|
||||
/// <summary>Total bytes available in the input buffer.</summary>
|
||||
public int AvailableBytes => (_end - _start) + (_bitsInBuffer / 8);
|
||||
|
||||
/// <summary>Ensure that count bits are in the bit buffer.</summary>
|
||||
/// <param name="count">Can be up to 16.</param>
|
||||
/// <returns>Returns false if input is not sufficient to make this true.</returns>
|
||||
public bool EnsureBitsAvailable(int count)
|
||||
{
|
||||
Debug.Assert(0 < count && count <= 16, "count is invalid.");
|
||||
|
||||
// manual inlining to improve perf
|
||||
if (_bitsInBuffer < count)
|
||||
{
|
||||
if (NeedsInput())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// insert a byte to bitbuffer
|
||||
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
|
||||
_bitsInBuffer += 8;
|
||||
|
||||
if (_bitsInBuffer < count)
|
||||
{
|
||||
if (NeedsInput())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// insert a byte to bitbuffer
|
||||
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
|
||||
_bitsInBuffer += 8;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// This function will try to load 16 or more bits into bitBuffer.
|
||||
/// It returns whatever is contained in bitBuffer after loading.
|
||||
/// The main difference between this and GetBits is that this will
|
||||
/// never return -1. So the caller needs to check AvailableBits to
|
||||
/// see how many bits are available.
|
||||
/// </summary>
|
||||
public uint TryLoad16Bits()
|
||||
{
|
||||
if (_bitsInBuffer < 8)
|
||||
{
|
||||
if (_start < _end)
|
||||
{
|
||||
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
|
||||
_bitsInBuffer += 8;
|
||||
}
|
||||
|
||||
if (_start < _end)
|
||||
{
|
||||
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
|
||||
_bitsInBuffer += 8;
|
||||
}
|
||||
}
|
||||
else if (_bitsInBuffer < 16)
|
||||
{
|
||||
if (_start < _end)
|
||||
{
|
||||
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
|
||||
_bitsInBuffer += 8;
|
||||
}
|
||||
}
|
||||
|
||||
return _bitBuffer;
|
||||
}
|
||||
|
||||
private uint GetBitMask(int count) => ((uint)1 << count) - 1;
|
||||
|
||||
/// <summary>Gets count bits from the input buffer. Returns -1 if not enough bits available.</summary>
|
||||
public int GetBits(int count)
|
||||
{
|
||||
Debug.Assert(0 < count && count <= 16, "count is invalid.");
|
||||
|
||||
if (!EnsureBitsAvailable(count))
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
int result = (int)(_bitBuffer & GetBitMask(count));
|
||||
_bitBuffer >>= count;
|
||||
_bitsInBuffer -= count;
|
||||
return result;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Copies length bytes from input buffer to output buffer starting at output[offset].
|
||||
/// You have to make sure, that the buffer is byte aligned. If not enough bytes are
|
||||
/// available, copies fewer bytes.
|
||||
/// </summary>
|
||||
/// <returns>Returns the number of bytes copied, 0 if no byte is available.</returns>
|
||||
public int CopyTo(byte[] output, int offset, int length)
|
||||
{
|
||||
Debug.Assert(output != null);
|
||||
Debug.Assert(offset >= 0);
|
||||
Debug.Assert(length >= 0);
|
||||
Debug.Assert(offset <= output.Length - length);
|
||||
Debug.Assert((_bitsInBuffer % 8) == 0);
|
||||
|
||||
// Copy the bytes in bitBuffer first.
|
||||
int bytesFromBitBuffer = 0;
|
||||
while (_bitsInBuffer > 0 && length > 0)
|
||||
{
|
||||
output[offset++] = (byte)_bitBuffer;
|
||||
_bitBuffer >>= 8;
|
||||
_bitsInBuffer -= 8;
|
||||
length--;
|
||||
bytesFromBitBuffer++;
|
||||
}
|
||||
|
||||
if (length == 0)
|
||||
{
|
||||
return bytesFromBitBuffer;
|
||||
}
|
||||
|
||||
int avail = _end - _start;
|
||||
if (length > avail)
|
||||
{
|
||||
length = avail;
|
||||
}
|
||||
|
||||
Array.Copy(_buffer, _start, output, offset, length);
|
||||
_start += length;
|
||||
return bytesFromBitBuffer + length;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Return true is all input bytes are used.
|
||||
/// This means the caller can call SetInput to add more input.
|
||||
/// </summary>
|
||||
public bool NeedsInput() => _start == _end;
|
||||
|
||||
/// <summary>
|
||||
/// Set the byte array to be processed.
|
||||
/// All the bits remained in bitBuffer will be processed before the new bytes.
|
||||
/// We don't clone the byte array here since it is expensive.
|
||||
/// The caller should make sure after a buffer is passed in.
|
||||
/// It will not be changed before calling this function again.
|
||||
/// </summary>
|
||||
public void SetInput(byte[] buffer, int offset, int length)
|
||||
{
|
||||
Debug.Assert(buffer != null);
|
||||
Debug.Assert(offset >= 0);
|
||||
Debug.Assert(length >= 0);
|
||||
Debug.Assert(offset <= buffer.Length - length);
|
||||
Debug.Assert(_start == _end);
|
||||
|
||||
_buffer = buffer;
|
||||
_start = offset;
|
||||
_end = offset + length;
|
||||
}
|
||||
|
||||
/// <summary>Skip n bits in the buffer.</summary>
|
||||
public void SkipBits(int n)
|
||||
{
|
||||
Debug.Assert(_bitsInBuffer >= n, "No enough bits in the buffer, Did you call EnsureBitsAvailable?");
|
||||
_bitBuffer >>= n;
|
||||
_bitsInBuffer -= n;
|
||||
}
|
||||
|
||||
/// <summary>Skips to the next byte boundary.</summary>
|
||||
public void SkipToByteBoundary()
|
||||
{
|
||||
_bitBuffer >>= (_bitsInBuffer % 8);
|
||||
_bitsInBuffer = _bitsInBuffer - (_bitsInBuffer % 8);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
/// <summary>
|
||||
/// This class represents a match in the history window.
|
||||
/// </summary>
|
||||
internal sealed class Match
|
||||
{
|
||||
internal MatchState State { get; set; }
|
||||
internal int Position { get; set; }
|
||||
internal int Length { get; set; }
|
||||
internal byte Symbol { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
internal enum MatchState
|
||||
{
|
||||
HasSymbol = 1,
|
||||
HasMatch = 2,
|
||||
HasSymbolAndMatch = 3
|
||||
}
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
/// <summary>
|
||||
/// This class maintains a window for decompressed output.
|
||||
/// We need to keep this because the decompressed information can be
|
||||
/// a literal or a length/distance pair. For length/distance pair,
|
||||
/// we need to look back in the output window and copy bytes from there.
|
||||
/// We use a byte array of WindowSize circularly.
|
||||
/// </summary>
|
||||
internal sealed class OutputWindow
|
||||
{
|
||||
// With Deflate64 we can have up to a 65536 length as well as up to a 65538 distance. This means we need a Window that is at
|
||||
// least 131074 bytes long so we have space to retrieve up to a full 64kb in lookback and place it in our buffer without
|
||||
// overwriting existing data. OutputWindow requires that the WindowSize be an exponent of 2, so we round up to 2^18.
|
||||
private const int WindowSize = 262144;
|
||||
private const int WindowMask = 262143;
|
||||
|
||||
private readonly byte[] _window = new byte[WindowSize]; // The window is 2^18 bytes
|
||||
private int _end; // this is the position to where we should write next byte
|
||||
private int _bytesUsed; // The number of bytes in the output window which is not consumed.
|
||||
|
||||
/// <summary>Add a byte to output window.</summary>
|
||||
public void Write(byte b)
|
||||
{
|
||||
Debug.Assert(_bytesUsed < WindowSize, "Can't add byte when window is full!");
|
||||
_window[_end++] = b;
|
||||
_end &= WindowMask;
|
||||
++_bytesUsed;
|
||||
}
|
||||
|
||||
public void WriteLengthDistance(int length, int distance)
|
||||
{
|
||||
Debug.Assert((_bytesUsed + length) <= WindowSize, "No Enough space");
|
||||
|
||||
// move backwards distance bytes in the output stream,
|
||||
// and copy length bytes from this position to the output stream.
|
||||
_bytesUsed += length;
|
||||
int copyStart = (_end - distance) & WindowMask; // start position for coping.
|
||||
|
||||
int border = WindowSize - length;
|
||||
if (copyStart <= border && _end < border)
|
||||
{
|
||||
if (length <= distance)
|
||||
{
|
||||
Array.Copy(_window, copyStart, _window, _end, length);
|
||||
_end += length;
|
||||
}
|
||||
else
|
||||
{
|
||||
// The referenced string may overlap the current
|
||||
// position; for example, if the last 2 bytes decoded have values
|
||||
// X and Y, a string reference with <length = 5, distance = 2>
|
||||
// adds X,Y,X,Y,X to the output stream.
|
||||
while (length-- > 0)
|
||||
{
|
||||
_window[_end++] = _window[copyStart++];
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// copy byte by byte
|
||||
while (length-- > 0)
|
||||
{
|
||||
_window[_end++] = _window[copyStart++];
|
||||
_end &= WindowMask;
|
||||
copyStart &= WindowMask;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Copy up to length of bytes from input directly.
|
||||
/// This is used for uncompressed block.
|
||||
/// </summary>
|
||||
public int CopyFrom(InputBuffer input, int length)
|
||||
{
|
||||
length = Math.Min(Math.Min(length, WindowSize - _bytesUsed), input.AvailableBytes);
|
||||
int copied;
|
||||
|
||||
// We might need wrap around to copy all bytes.
|
||||
int tailLen = WindowSize - _end;
|
||||
if (length > tailLen)
|
||||
{
|
||||
// copy the first part
|
||||
copied = input.CopyTo(_window, _end, tailLen);
|
||||
if (copied == tailLen)
|
||||
{
|
||||
// only try to copy the second part if we have enough bytes in input
|
||||
copied += input.CopyTo(_window, 0, length - tailLen);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// only one copy is needed if there is no wrap around.
|
||||
copied = input.CopyTo(_window, _end, length);
|
||||
}
|
||||
|
||||
_end = (_end + copied) & WindowMask;
|
||||
_bytesUsed += copied;
|
||||
return copied;
|
||||
}
|
||||
|
||||
/// <summary>Free space in output window.</summary>
|
||||
public int FreeBytes => WindowSize - _bytesUsed;
|
||||
|
||||
/// <summary>Bytes not consumed in output window.</summary>
|
||||
public int AvailableBytes => _bytesUsed;
|
||||
|
||||
/// <summary>Copy the decompressed bytes to output array.</summary>
|
||||
public int CopyTo(byte[] output, int offset, int length)
|
||||
{
|
||||
int copy_end;
|
||||
|
||||
if (length > _bytesUsed)
|
||||
{
|
||||
// we can copy all the decompressed bytes out
|
||||
copy_end = _end;
|
||||
length = _bytesUsed;
|
||||
}
|
||||
else
|
||||
{
|
||||
copy_end = (_end - _bytesUsed + length) & WindowMask; // copy length of bytes
|
||||
}
|
||||
|
||||
int copied = length;
|
||||
|
||||
int tailLen = length - copy_end;
|
||||
if (tailLen > 0)
|
||||
{
|
||||
// this means we need to copy two parts separately
|
||||
// copy tailLen bytes from the end of output window
|
||||
Array.Copy(_window, WindowSize - tailLen,
|
||||
output, offset, tailLen);
|
||||
offset += tailLen;
|
||||
length = copy_end;
|
||||
}
|
||||
Array.Copy(_window, copy_end - length, output, offset, length);
|
||||
_bytesUsed -= copied;
|
||||
Debug.Assert(_bytesUsed >= 0, "check this function and find why we copied more bytes than we have");
|
||||
return copied;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -58,7 +58,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
}
|
||||
|
||||
return (mBits[index >> 5] & (1u << (index & 31))) != 0;
|
||||
@@ -69,7 +69,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
}
|
||||
|
||||
mBits[index >> 5] |= 1u << (index & 31);
|
||||
@@ -79,7 +79,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
}
|
||||
|
||||
uint bits = mBits[index >> 5];
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Converters;
|
||||
using SharpCompress.Crypto;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
@@ -17,62 +14,29 @@ namespace SharpCompress.Compressors.LZMA
|
||||
public class LZipStream : Stream
|
||||
{
|
||||
private readonly Stream stream;
|
||||
private readonly CountingWritableSubStream rawStream;
|
||||
private bool disposed;
|
||||
private readonly bool leaveOpen;
|
||||
private bool finished;
|
||||
|
||||
private long writeCount;
|
||||
|
||||
public LZipStream(Stream stream, CompressionMode mode, bool leaveOpen = false)
|
||||
public LZipStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, false)
|
||||
{
|
||||
Mode = mode;
|
||||
this.leaveOpen = leaveOpen;
|
||||
|
||||
if (mode == CompressionMode.Decompress)
|
||||
{
|
||||
int dSize = ValidateAndReadSize(stream);
|
||||
if (dSize == 0)
|
||||
{
|
||||
throw new IOException("Not an LZip stream");
|
||||
}
|
||||
byte[] properties = GetProperties(dSize);
|
||||
this.stream = new LzmaStream(properties, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
//default
|
||||
int dSize = 104 * 1024;
|
||||
WriteHeaderSize(stream);
|
||||
|
||||
rawStream = new CountingWritableSubStream(stream);
|
||||
this.stream = new Crc32Stream(new LzmaStream(new LzmaEncoderProperties(true, dSize), false, rawStream));
|
||||
}
|
||||
}
|
||||
|
||||
public void Finish()
|
||||
public LZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
{
|
||||
if (!finished)
|
||||
if (mode != CompressionMode.Decompress)
|
||||
{
|
||||
if (Mode == CompressionMode.Compress)
|
||||
{
|
||||
var crc32Stream = (Crc32Stream)stream;
|
||||
crc32Stream.WrappedStream.Dispose();
|
||||
crc32Stream.Dispose();
|
||||
var compressedCount = rawStream.Count;
|
||||
|
||||
var bytes = DataConverter.LittleEndian.GetBytes(crc32Stream.Crc);
|
||||
rawStream.Write(bytes, 0, bytes.Length);
|
||||
|
||||
bytes = DataConverter.LittleEndian.GetBytes(writeCount);
|
||||
rawStream.Write(bytes, 0, bytes.Length);
|
||||
|
||||
//total with headers
|
||||
bytes = DataConverter.LittleEndian.GetBytes(compressedCount + 6 + 20);
|
||||
rawStream.Write(bytes, 0, bytes.Length);
|
||||
}
|
||||
finished = true;
|
||||
throw new NotImplementedException("Only LZip decompression is currently supported");
|
||||
}
|
||||
Mode = mode;
|
||||
this.leaveOpen = leaveOpen;
|
||||
int dictionarySize = ValidateAndReadSize(stream);
|
||||
if (dictionarySize == 0)
|
||||
{
|
||||
throw new IOException("Not an LZip stream");
|
||||
}
|
||||
byte[] properties = GetProperties(dictionarySize);
|
||||
this.stream = new LzmaStream(properties, stream);
|
||||
}
|
||||
|
||||
#region Stream methods
|
||||
@@ -84,23 +48,19 @@ namespace SharpCompress.Compressors.LZMA
|
||||
return;
|
||||
}
|
||||
disposed = true;
|
||||
if (disposing)
|
||||
if (disposing && !leaveOpen)
|
||||
{
|
||||
Finish();
|
||||
if (!leaveOpen)
|
||||
{
|
||||
rawStream.Dispose();
|
||||
}
|
||||
stream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
public CompressionMode Mode { get; }
|
||||
|
||||
public override bool CanRead => Mode == CompressionMode.Decompress;
|
||||
public override bool CanRead => stream.CanRead;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => Mode == CompressionMode.Compress;
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
@@ -115,16 +75,20 @@ namespace SharpCompress.Compressors.LZMA
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) => stream.Read(buffer, offset, count);
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
stream.Write(buffer, offset, count);
|
||||
writeCount += count;
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
@@ -141,7 +105,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
/// couldn't be read or it isn't a validate LZIP header, or the dictionary
|
||||
/// size if it *is* a valid LZIP file.
|
||||
/// </summary>
|
||||
public static int ValidateAndReadSize(Stream stream)
|
||||
private static int ValidateAndReadSize(Stream stream)
|
||||
{
|
||||
if (stream == null)
|
||||
{
|
||||
@@ -167,17 +131,6 @@ namespace SharpCompress.Compressors.LZMA
|
||||
return (1 << basePower) - subtractionNumerator * (1 << (basePower - 4));
|
||||
}
|
||||
|
||||
public static void WriteHeaderSize(Stream stream)
|
||||
{
|
||||
if (stream == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
}
|
||||
// hard coding the dictionary size encoding
|
||||
byte[] header = new byte[6] {(byte)'L', (byte)'Z', (byte)'I', (byte)'P', 1, 113};
|
||||
stream.Write(header, 0, 6);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a byte array to communicate the parameters and dictionary size to LzmaStream.
|
||||
/// </summary>
|
||||
|
||||
@@ -141,7 +141,10 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
position = encoder.Code(null, true);
|
||||
}
|
||||
inputStream?.Dispose();
|
||||
if (inputStream != null)
|
||||
{
|
||||
inputStream.Dispose();
|
||||
}
|
||||
}
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
@@ -58,22 +58,22 @@ namespace SharpCompress.Compressors.LZMA.Utilites
|
||||
{
|
||||
if (stream == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
throw new ArgumentNullException("stream");
|
||||
}
|
||||
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
throw new ArgumentNullException("buffer");
|
||||
}
|
||||
|
||||
if (offset < 0 || offset > buffer.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
}
|
||||
|
||||
if (length < 0 || length > buffer.Length - offset)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(length));
|
||||
throw new ArgumentOutOfRangeException("length");
|
||||
}
|
||||
|
||||
while (length > 0)
|
||||
|
||||
@@ -146,12 +146,12 @@ namespace SharpCompress.Compressors.PPMd.I1
|
||||
{
|
||||
if (target == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(target));
|
||||
throw new ArgumentNullException("target");
|
||||
}
|
||||
|
||||
if (source == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(source));
|
||||
throw new ArgumentNullException("source");
|
||||
}
|
||||
|
||||
EncodeStart(properties);
|
||||
@@ -235,12 +235,12 @@ namespace SharpCompress.Compressors.PPMd.I1
|
||||
{
|
||||
if (target == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(target));
|
||||
throw new ArgumentNullException("target");
|
||||
}
|
||||
|
||||
if (source == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(source));
|
||||
throw new ArgumentNullException("source");
|
||||
}
|
||||
|
||||
DecodeStart(source, properties);
|
||||
|
||||
@@ -7,7 +7,7 @@ namespace SharpCompress.Compressors.Rar.Decode
|
||||
Dif = new int[11];
|
||||
}
|
||||
|
||||
internal int[] Dif { get; }
|
||||
internal int[] Dif { get; private set; }
|
||||
internal int ByteCount { get; set; }
|
||||
internal int D1 { get; set; }
|
||||
|
||||
|
||||
@@ -17,17 +17,17 @@ namespace SharpCompress.Compressors.Rar.Decode
|
||||
/// <summary> returns the decode Length array</summary>
|
||||
/// <returns> decodeLength
|
||||
/// </returns>
|
||||
internal int[] DecodeLen { get; }
|
||||
internal int[] DecodeLen { get; private set; }
|
||||
|
||||
/// <summary> returns the decode num array</summary>
|
||||
/// <returns> decodeNum
|
||||
/// </returns>
|
||||
internal int[] DecodeNum { get; }
|
||||
internal int[] DecodeNum { get; private set; }
|
||||
|
||||
/// <summary> returns the decodePos array</summary>
|
||||
/// <returns> decodePos
|
||||
/// </returns>
|
||||
internal int[] DecodePos { get; }
|
||||
internal int[] DecodePos { get; private set; }
|
||||
|
||||
internal int MaxNum { get; set; }
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ namespace SharpCompress.Compressors.Rar {
|
||||
{
|
||||
currentCrc = RarCRC.CheckCrc(currentCrc, buffer, offset, result);
|
||||
}
|
||||
else if (GetCrc() != readStream.CurrentCrc && count != 0)
|
||||
else if (GetCrc() != readStream.CurrentCrc)
|
||||
{
|
||||
// NOTE: we use the last FileHeader in a multipart volume to check CRC
|
||||
throw new InvalidFormatException("file crc mismatch");
|
||||
|
||||
@@ -10,8 +10,8 @@ namespace SharpCompress.Compressors.Rar.VM
|
||||
|
||||
internal VMCommands OpCode { get; set; }
|
||||
internal bool IsByteMode { get; set; }
|
||||
internal VMPreparedOperand Op1 { get; }
|
||||
internal VMPreparedOperand Op1 { get; private set; }
|
||||
|
||||
internal VMPreparedOperand Op2 { get; }
|
||||
internal VMPreparedOperand Op2 { get; private set; }
|
||||
}
|
||||
}
|
||||
@@ -9,10 +9,10 @@ namespace SharpCompress.Compressors.Rar.VM
|
||||
Type = type;
|
||||
}
|
||||
|
||||
internal int Length { get; }
|
||||
internal int Length { get; private set; }
|
||||
|
||||
internal uint CRC { get; }
|
||||
internal uint CRC { get; private set; }
|
||||
|
||||
internal VMStandardFilters Type { get; }
|
||||
internal VMStandardFilters Type { get; private set; }
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public static class BinaryUtils
|
||||
{
|
||||
public static int ReadLittleEndianInt32(this BinaryReader reader)
|
||||
{
|
||||
byte[] bytes = reader.ReadBytes(4);
|
||||
return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24));
|
||||
}
|
||||
|
||||
internal static uint ReadLittleEndianUInt32(this BinaryReader reader)
|
||||
{
|
||||
return unchecked((uint)ReadLittleEndianInt32(reader));
|
||||
}
|
||||
public static int ReadLittleEndianInt32(this Stream stream)
|
||||
{
|
||||
byte[] bytes = new byte[4];
|
||||
var read = stream.ReadFully(bytes);
|
||||
if (!read)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24));
|
||||
}
|
||||
|
||||
internal static uint ReadLittleEndianUInt32(this Stream stream)
|
||||
{
|
||||
return unchecked((uint)ReadLittleEndianInt32(stream));
|
||||
}
|
||||
|
||||
internal static byte[] ToBigEndianBytes(this uint uint32)
|
||||
{
|
||||
var result = BitConverter.GetBytes(uint32);
|
||||
|
||||
if (BitConverter.IsLittleEndian)
|
||||
Array.Reverse(result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
internal static byte[] ToLittleEndianBytes(this uint uint32)
|
||||
{
|
||||
var result = BitConverter.GetBytes(uint32);
|
||||
|
||||
if (!BitConverter.IsLittleEndian)
|
||||
Array.Reverse(result);
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public enum CheckType : byte
|
||||
{
|
||||
NONE = 0x00,
|
||||
CRC32 = 0x01,
|
||||
CRC64 = 0x04,
|
||||
SHA256 = 0x0A
|
||||
}
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal static class Crc32
|
||||
{
|
||||
public const UInt32 DefaultPolynomial = 0xedb88320u;
|
||||
public const UInt32 DefaultSeed = 0xffffffffu;
|
||||
|
||||
static UInt32[] defaultTable;
|
||||
|
||||
public static UInt32 Compute(byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultSeed, buffer);
|
||||
}
|
||||
|
||||
public static UInt32 Compute(UInt32 seed, byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultPolynomial, seed, buffer);
|
||||
}
|
||||
|
||||
public static UInt32 Compute(UInt32 polynomial, UInt32 seed, byte[] buffer)
|
||||
{
|
||||
return ~CalculateHash(InitializeTable(polynomial), seed, buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
static UInt32[] InitializeTable(UInt32 polynomial)
|
||||
{
|
||||
if (polynomial == DefaultPolynomial && defaultTable != null)
|
||||
return defaultTable;
|
||||
|
||||
var createTable = new UInt32[256];
|
||||
for (var i = 0; i < 256; i++)
|
||||
{
|
||||
var entry = (UInt32)i;
|
||||
for (var j = 0; j < 8; j++)
|
||||
if ((entry & 1) == 1)
|
||||
entry = (entry >> 1) ^ polynomial;
|
||||
else
|
||||
entry = entry >> 1;
|
||||
createTable[i] = entry;
|
||||
}
|
||||
|
||||
if (polynomial == DefaultPolynomial)
|
||||
defaultTable = createTable;
|
||||
|
||||
return createTable;
|
||||
}
|
||||
|
||||
static UInt32 CalculateHash(UInt32[] table, UInt32 seed, IList<byte> buffer, int start, int size)
|
||||
{
|
||||
var crc = seed;
|
||||
for (var i = start; i < size - start; i++)
|
||||
crc = (crc >> 8) ^ table[buffer[i] ^ crc & 0xff];
|
||||
return crc;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal static class Crc64
|
||||
{
|
||||
public const UInt64 DefaultSeed = 0x0;
|
||||
|
||||
internal static UInt64[] Table;
|
||||
|
||||
public const UInt64 Iso3309Polynomial = 0xD800000000000000;
|
||||
|
||||
public static UInt64 Compute(byte[] buffer)
|
||||
{
|
||||
return Compute(DefaultSeed, buffer);
|
||||
}
|
||||
|
||||
public static UInt64 Compute(UInt64 seed, byte[] buffer)
|
||||
{
|
||||
if (Table == null)
|
||||
Table = CreateTable(Iso3309Polynomial);
|
||||
|
||||
return CalculateHash(seed, Table, buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
public static UInt64 CalculateHash(UInt64 seed, UInt64[] table, IList<byte> buffer, int start, int size)
|
||||
{
|
||||
var crc = seed;
|
||||
|
||||
for (var i = start; i < size; i++)
|
||||
unchecked
|
||||
{
|
||||
crc = (crc >> 8) ^ table[(buffer[i] ^ crc) & 0xff];
|
||||
}
|
||||
|
||||
return crc;
|
||||
}
|
||||
|
||||
public static ulong[] CreateTable(ulong polynomial)
|
||||
{
|
||||
var createTable = new UInt64[256];
|
||||
for (var i = 0; i < 256; ++i)
|
||||
{
|
||||
var entry = (UInt64)i;
|
||||
for (var j = 0; j < 8; ++j)
|
||||
if ((entry & 1) == 1)
|
||||
entry = (entry >> 1) ^ polynomial;
|
||||
else
|
||||
entry = entry >> 1;
|
||||
createTable[i] = entry;
|
||||
}
|
||||
return createTable;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz.Filters
|
||||
{
|
||||
internal abstract class BlockFilter : ReadOnlyStream
|
||||
{
|
||||
public enum FilterTypes : ulong
|
||||
{
|
||||
DELTA = 0x03,
|
||||
ARCH_x86_FILTER = 0x04,
|
||||
ARCH_PowerPC_FILTER = 0x05,
|
||||
ARCH_IA64_FILTER = 0x06,
|
||||
ARCH_ARM_FILTER = 0x07,
|
||||
ARCH_ARMTHUMB_FILTER = 0x08,
|
||||
ARCH_SPARC_FILTER = 0x09,
|
||||
LZMA2 = 0x21,
|
||||
}
|
||||
|
||||
static Dictionary<FilterTypes, Type> FilterMap = new Dictionary<FilterTypes, Type>()
|
||||
{
|
||||
{FilterTypes.LZMA2, typeof(Lzma2Filter) }
|
||||
};
|
||||
|
||||
public abstract bool AllowAsLast { get; }
|
||||
public abstract bool AllowAsNonLast { get; }
|
||||
public abstract bool ChangesDataSize { get; }
|
||||
|
||||
public BlockFilter() { }
|
||||
|
||||
public abstract void Init(byte[] properties);
|
||||
public abstract void ValidateFilter();
|
||||
|
||||
public FilterTypes FilterType { get; set; }
|
||||
public static BlockFilter Read(BinaryReader reader)
|
||||
{
|
||||
var filterType = (FilterTypes)reader.ReadXZInteger();
|
||||
if (!FilterMap.ContainsKey(filterType))
|
||||
throw new NotImplementedException($"Filter {filterType} has not yet been implemented");
|
||||
var filter = Activator.CreateInstance(FilterMap[filterType]) as BlockFilter;
|
||||
|
||||
var sizeOfProperties = reader.ReadXZInteger();
|
||||
if (sizeOfProperties > int.MaxValue)
|
||||
throw new InvalidDataException("Block filter information too large");
|
||||
byte[] properties = reader.ReadBytes((int)sizeOfProperties);
|
||||
filter.Init(properties);
|
||||
return filter;
|
||||
}
|
||||
|
||||
public abstract void SetBaseStream(Stream stream);
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz.Filters
|
||||
{
|
||||
internal class Lzma2Filter : BlockFilter
|
||||
{
|
||||
public override bool AllowAsLast => true;
|
||||
public override bool AllowAsNonLast => false;
|
||||
public override bool ChangesDataSize => true;
|
||||
|
||||
byte _dictionarySize;
|
||||
public uint DictionarySize
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_dictionarySize > 40)
|
||||
throw new OverflowException("Dictionary size greater than UInt32.Max");
|
||||
if (_dictionarySize == 40)
|
||||
{
|
||||
return uint.MaxValue;
|
||||
}
|
||||
int mantissa = 2 | (_dictionarySize & 1);
|
||||
int exponent = _dictionarySize / 2 + 11;
|
||||
return (uint)mantissa << exponent;
|
||||
}
|
||||
}
|
||||
|
||||
public override void Init(byte[] properties)
|
||||
{
|
||||
if (properties.Length != 1)
|
||||
throw new InvalidDataException("LZMA properties unexpected length");
|
||||
|
||||
_dictionarySize = (byte)(properties[0] & 0x3F);
|
||||
var reserved = properties[0] & 0xC0;
|
||||
if (reserved != 0)
|
||||
throw new InvalidDataException("Reserved bits used in LZMA properties");
|
||||
}
|
||||
|
||||
public override void ValidateFilter()
|
||||
{
|
||||
}
|
||||
|
||||
public override void SetBaseStream(Stream stream)
|
||||
{
|
||||
BaseStream = new SharpCompress.Compressors.LZMA.LzmaStream(new[] { _dictionarySize }, stream);
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
return BaseStream.Read(buffer, offset, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal static class MultiByteIntegers
|
||||
{
|
||||
public static ulong ReadXZInteger(this BinaryReader reader, int MaxBytes = 9)
|
||||
{
|
||||
if (MaxBytes <= 0)
|
||||
throw new ArgumentOutOfRangeException();
|
||||
if (MaxBytes > 9)
|
||||
MaxBytes = 9;
|
||||
|
||||
byte LastByte = reader.ReadByte();
|
||||
ulong Output = (ulong)LastByte & 0x7F;
|
||||
|
||||
int i = 0;
|
||||
while ((LastByte & 0x80) != 0)
|
||||
{
|
||||
if (++i >= MaxBytes)
|
||||
throw new InvalidDataException();
|
||||
LastByte = reader.ReadByte();
|
||||
if (LastByte == 0)
|
||||
throw new InvalidDataException();
|
||||
|
||||
Output |= ((ulong)(LastByte & 0x7F)) << (i * 7);
|
||||
}
|
||||
return Output;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public abstract class ReadOnlyStream : Stream
|
||||
{
|
||||
public Stream BaseStream { get; protected set; }
|
||||
|
||||
public override bool CanRead => BaseStream.CanRead;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.Compressors.Xz.Filters;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
internal sealed class XZBlock : XZReadOnlyStream
|
||||
{
|
||||
public int BlockHeaderSize => (_blockHeaderSizeByte + 1) * 4;
|
||||
public ulong? CompressedSize { get; private set; }
|
||||
public ulong? UncompressedSize { get; private set; }
|
||||
public Stack<BlockFilter> Filters { get; private set; } = new Stack<BlockFilter>();
|
||||
public bool HeaderIsLoaded { get; private set; }
|
||||
private CheckType _checkType;
|
||||
private int _checkSize;
|
||||
private bool _streamConnected;
|
||||
private int _numFilters;
|
||||
private byte _blockHeaderSizeByte;
|
||||
private Stream _decomStream;
|
||||
private bool _endOfStream;
|
||||
private bool _paddingSkipped;
|
||||
private bool _crcChecked;
|
||||
private ulong _bytesRead;
|
||||
|
||||
public XZBlock(Stream stream, CheckType checkType, int checkSize) : base(stream)
|
||||
{
|
||||
_checkType = checkType;
|
||||
_checkSize = checkSize;
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int bytesRead = 0;
|
||||
if (!HeaderIsLoaded)
|
||||
LoadHeader();
|
||||
if (!_streamConnected)
|
||||
ConnectStream();
|
||||
if (!_endOfStream)
|
||||
bytesRead = _decomStream.Read(buffer, offset, count);
|
||||
if (bytesRead != count)
|
||||
_endOfStream = true;
|
||||
if (_endOfStream && !_paddingSkipped)
|
||||
SkipPadding();
|
||||
if (_endOfStream && !_crcChecked)
|
||||
CheckCrc();
|
||||
_bytesRead += (ulong)bytesRead;
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
private void SkipPadding()
|
||||
{
|
||||
int bytes = (int)(BaseStream.Position % 4);
|
||||
if (bytes > 0)
|
||||
{
|
||||
byte[] paddingBytes = new byte[4 - bytes];
|
||||
BaseStream.Read(paddingBytes, 0, paddingBytes.Length);
|
||||
if (paddingBytes.Any(b => b != 0))
|
||||
throw new InvalidDataException("Padding bytes were non-null");
|
||||
}
|
||||
_paddingSkipped = true;
|
||||
}
|
||||
|
||||
private void CheckCrc()
|
||||
{
|
||||
byte[] crc = new byte[_checkSize];
|
||||
BaseStream.Read(crc, 0, _checkSize);
|
||||
// Actually do a check (and read in the bytes
|
||||
// into the function throughout the stream read).
|
||||
_crcChecked = true;
|
||||
}
|
||||
|
||||
private void ConnectStream()
|
||||
{
|
||||
_decomStream = BaseStream;
|
||||
while (Filters.Any())
|
||||
{
|
||||
var filter = Filters.Pop();
|
||||
filter.SetBaseStream(_decomStream);
|
||||
_decomStream = filter;
|
||||
}
|
||||
_streamConnected = true;
|
||||
}
|
||||
|
||||
private void LoadHeader()
|
||||
{
|
||||
ReadHeaderSize();
|
||||
byte[] headerCache = CacheHeader();
|
||||
|
||||
using (var cache = new MemoryStream(headerCache))
|
||||
using (var cachedReader = new BinaryReader(cache))
|
||||
{
|
||||
cachedReader.BaseStream.Position = 1; // skip the header size byte
|
||||
ReadBlockFlags(cachedReader);
|
||||
ReadFilters(cachedReader);
|
||||
}
|
||||
HeaderIsLoaded = true;
|
||||
}
|
||||
|
||||
private void ReadHeaderSize()
|
||||
{
|
||||
_blockHeaderSizeByte = (byte)BaseStream.ReadByte();
|
||||
if (_blockHeaderSizeByte == 0)
|
||||
throw new XZIndexMarkerReachedException();
|
||||
}
|
||||
|
||||
private byte[] CacheHeader()
|
||||
{
|
||||
byte[] blockHeaderWithoutCrc = new byte[BlockHeaderSize - 4];
|
||||
blockHeaderWithoutCrc[0] = _blockHeaderSizeByte;
|
||||
var read = BaseStream.Read(blockHeaderWithoutCrc, 1, BlockHeaderSize - 5);
|
||||
if (read != BlockHeaderSize - 5)
|
||||
throw new EndOfStreamException("Reached end of stream unexectedly");
|
||||
|
||||
uint crc = BaseStream.ReadLittleEndianUInt32();
|
||||
uint calcCrc = Crc32.Compute(blockHeaderWithoutCrc);
|
||||
if (crc != calcCrc)
|
||||
throw new InvalidDataException("Block header corrupt");
|
||||
|
||||
return blockHeaderWithoutCrc;
|
||||
}
|
||||
|
||||
private void ReadBlockFlags(BinaryReader reader)
|
||||
{
|
||||
var blockFlags = reader.ReadByte();
|
||||
_numFilters = (blockFlags & 0x03) + 1;
|
||||
byte reserved = (byte)(blockFlags & 0x3C);
|
||||
|
||||
if (reserved != 0)
|
||||
throw new InvalidDataException("Reserved bytes used, perhaps an unknown XZ implementation");
|
||||
|
||||
bool compressedSizePresent = (blockFlags & 0x40) != 0;
|
||||
bool uncompressedSizePresent = (blockFlags & 0x80) != 0;
|
||||
|
||||
if (compressedSizePresent)
|
||||
CompressedSize = reader.ReadXZInteger();
|
||||
if (uncompressedSizePresent)
|
||||
UncompressedSize = reader.ReadXZInteger();
|
||||
}
|
||||
|
||||
private void ReadFilters(BinaryReader reader, long baseStreamOffset = 0)
|
||||
{
|
||||
int nonLastSizeChangers = 0;
|
||||
for (int i = 0; i < _numFilters; i++)
|
||||
{
|
||||
var filter = BlockFilter.Read(reader);
|
||||
if ((i + 1 == _numFilters && !filter.AllowAsLast)
|
||||
|| (i + 1 < _numFilters && !filter.AllowAsNonLast))
|
||||
throw new InvalidDataException("Block Filters in bad order");
|
||||
if (filter.ChangesDataSize && i + 1 < _numFilters)
|
||||
nonLastSizeChangers++;
|
||||
filter.ValidateFilter();
|
||||
Filters.Push(filter);
|
||||
}
|
||||
if (nonLastSizeChangers > 2)
|
||||
throw new InvalidDataException("More than two non-last block filters cannot change stream size");
|
||||
|
||||
int blockHeaderPaddingSize = BlockHeaderSize -
|
||||
(4 + (int)(reader.BaseStream.Position - baseStreamOffset));
|
||||
byte[] blockHeaderPadding = reader.ReadBytes(blockHeaderPaddingSize);
|
||||
if (!blockHeaderPadding.All(b => b == 0))
|
||||
throw new InvalidDataException("Block header contains unknown fields");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz
|
||||
{
|
||||
public class XZFooter
|
||||
{
|
||||
private readonly BinaryReader _reader;
|
||||
private readonly byte[] _magicBytes = new byte[] { 0x59, 0x5A };
|
||||
public long StreamStartPosition { get; private set; }
|
||||
public long BackwardSize { get; private set; }
|
||||
public byte[] StreamFlags { get; private set; }
|
||||
|
||||
public XZFooter(BinaryReader reader)
|
||||
{
|
||||
_reader = reader;
|
||||
StreamStartPosition = reader.BaseStream.Position;
|
||||
}
|
||||
|
||||
public static XZFooter FromStream(Stream stream)
|
||||
{
|
||||
var footer = new XZFooter(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8));
|
||||
footer.Process();
|
||||
return footer;
|
||||
}
|
||||
|
||||
public void Process()
|
||||
{
|
||||
uint crc = _reader.ReadLittleEndianUInt32();
|
||||
byte[] footerBytes = _reader.ReadBytes(6);
|
||||
uint myCrc = Crc32.Compute(footerBytes);
|
||||
if (crc != myCrc)
|
||||
throw new InvalidDataException("Footer corrupt");
|
||||
using (var stream = new MemoryStream(footerBytes))
|
||||
using (var reader = new BinaryReader(stream))
|
||||
{
|
||||
BackwardSize = (reader.ReadLittleEndianUInt32() + 1) * 4;
|
||||
StreamFlags = reader.ReadBytes(2);
|
||||
}
|
||||
byte[] magBy = _reader.ReadBytes(2);
|
||||
if (!Enumerable.SequenceEqual(magBy, _magicBytes))
|
||||
{
|
||||
throw new InvalidDataException("Magic footer missing");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user