mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-04 05:25:00 +00:00
Compare commits
50 Commits
0.17.0
...
presentati
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
56be4d8921 | ||
|
|
4eb3148c26 | ||
|
|
e95d543ecd | ||
|
|
33af3d552b | ||
|
|
359a6042cd | ||
|
|
e27d2ec660 | ||
|
|
da56bfc01f | ||
|
|
6e2c7d2857 | ||
|
|
5481609554 | ||
|
|
a62f4df0b1 | ||
|
|
f893c1272c | ||
|
|
e701f5277e | ||
|
|
f85fd1f6a4 | ||
|
|
8f7ea420b3 | ||
|
|
d8c8dabb52 | ||
|
|
9092ecf331 | ||
|
|
2fd9fe96ad | ||
|
|
02f68b793c | ||
|
|
57b9133a0f | ||
|
|
815f5e09e8 | ||
|
|
5bdf01ee59 | ||
|
|
bd9417e74c | ||
|
|
694e869162 | ||
|
|
45845f8963 | ||
|
|
a8b6def76a | ||
|
|
a4ebd5fb3d | ||
|
|
3da3b212fa | ||
|
|
c2528cf93e | ||
|
|
550fecd4d3 | ||
|
|
50b01428b4 | ||
|
|
bb59f28b22 | ||
|
|
7064cda6de | ||
|
|
525c1873e8 | ||
|
|
3d91b4eb5e | ||
|
|
f20c03180e | ||
|
|
08fee76b4e | ||
|
|
0f511c4b2a | ||
|
|
42d9dfd117 | ||
|
|
3983db08ff | ||
|
|
72114bceea | ||
|
|
c303f96682 | ||
|
|
0e785968c4 | ||
|
|
15110e18e2 | ||
|
|
5465af041b | ||
|
|
310d56fc16 | ||
|
|
231258ef69 | ||
|
|
16b7e3ffc8 | ||
|
|
513e59f830 | ||
|
|
b10a1cf2bd | ||
|
|
1656edaa29 |
15
.circleci/config.yml
Normal file
15
.circleci/config.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
version: 2
|
||||
jobs:
|
||||
build:
|
||||
docker:
|
||||
- image: microsoft/dotnet:2.0.5-sdk-2.1.4
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install unzip
|
||||
command: |
|
||||
apt-get update
|
||||
apt-get install -y unzip
|
||||
- run:
|
||||
name: Build
|
||||
command: ./build.sh
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -2,4 +2,4 @@
|
||||
* text=auto
|
||||
|
||||
# need original files to be windows
|
||||
test/TestArchives/Original/*.txt eol=crlf
|
||||
*.txt text eol=crlf
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -14,3 +14,4 @@ tests/TestArchives/Scratch
|
||||
.vs
|
||||
tools
|
||||
.vscode
|
||||
.idea/
|
||||
|
||||
13
.travis.yml
13
.travis.yml
@@ -1,13 +0,0 @@
|
||||
dist: trusty
|
||||
language: csharp
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.dotnet
|
||||
solution: SharpCompress.sln
|
||||
matrix:
|
||||
include:
|
||||
- dotnet: 1.0.4
|
||||
mono: none
|
||||
env: DOTNETCORE=1
|
||||
script:
|
||||
- ./build.sh
|
||||
@@ -11,7 +11,7 @@
|
||||
| Archive Format | Compression Format(s) | Compress/Decompress | Archive API | Reader API | Writer API |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| Rar | Rar | Decompress (1) | RarArchive | RarReader | N/A |
|
||||
| Zip (2) | None, DEFLATE, BZip2, LZMA/LZMA2, PPMd | Both | ZipArchive | ZipReader | ZipWriter |
|
||||
| Zip (2) | None, DEFLATE, Deflate64, BZip2, LZMA/LZMA2, PPMd | Both | ZipArchive | ZipReader | ZipWriter |
|
||||
| Tar | None | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.GZip | DEFLATE | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
| Tar.BZip2 | BZip2 | Both | TarArchive | TarReader | TarWriter (3) |
|
||||
@@ -22,7 +22,7 @@
|
||||
| LZip (single file) (5) | LZip (LZMA) | Both | LZipArchive | LZipReader | LZipWriter |
|
||||
|
||||
1. SOLID Rars are only supported in the RarReader API.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading/writing is supported but only with seekable streams as the Zip spec doesn't support Zip64 data in post data descriptors.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading/writing is supported but only with seekable streams as the Zip spec doesn't support Zip64 data in post data descriptors. Deflate64 is only supported for reading.
|
||||
3. The Tar format requires a file size in the header. If no size is specified to the TarWriter and the stream is not seekable, then an exception will be thrown.
|
||||
4. The 7Zip format doesn't allow for reading as a forward-only stream so 7Zip is only supported through the Archive API
|
||||
5. LZip has no support for extra data like the file name or timestamp. There is a default filename used when looking at the entry Key on the archive.
|
||||
@@ -36,6 +36,7 @@ For those who want to directly compress/decompress bits. The single file format
|
||||
| BZip2Stream | Both |
|
||||
| GZipStream | Both |
|
||||
| DeflateStream | Both |
|
||||
| Deflate64Stream | Decompress |
|
||||
| LZMAStream | Both |
|
||||
| PPMdStream | Both |
|
||||
| ADCStream | Decompress |
|
||||
|
||||
247
PITCHME.md
Normal file
247
PITCHME.md
Normal file
@@ -0,0 +1,247 @@
|
||||
|
||||
#### SharpCompress - Pure C# Archival and Compression
|
||||
|
||||
---
|
||||
|
||||
#### Overview
|
||||
|
||||
* History
|
||||
* Design
|
||||
* Archival Formats
|
||||
* Usages (Code!)
|
||||
|
||||
---
|
||||
|
||||
#### Why?
|
||||
|
||||
* Bored
|
||||
* Interested in Comics and wanted to make my own cross-platform viewer
|
||||
* Wrote a viewer in Sliverlight 2 using first versions of SharpCompress.
|
||||
* Used it on OS X
|
||||
|
||||
---
|
||||
|
||||
#### Initial Version
|
||||
|
||||
* Started as NUnrar on Codeplex
|
||||
* Used Visual Studio 2003 to convert JUnrar to C#
|
||||
* Cleaned up to have a nicer API
|
||||
|
||||
---
|
||||
|
||||
### More Formats
|
||||
|
||||
* Integrated DotNetZip
|
||||
* Created Unified API
|
||||
* Added Tar
|
||||
* Contributions: 7Zip, LZip, more!
|
||||
|
||||
---
|
||||
|
||||
# Design
|
||||
|
||||
---
|
||||
|
||||
### Unified APIs
|
||||
|
||||
* Random Access
|
||||
* Archive API
|
||||
* Forward-only
|
||||
* Reader API
|
||||
* Writer API
|
||||
* Neutral Factories
|
||||
|
||||
---
|
||||
|
||||
### Neural Factories
|
||||
|
||||
* Factories
|
||||
* `ArchiveFactory`
|
||||
* `ReaderFactory`
|
||||
* `WriterFactory`
|
||||
* Strategy
|
||||
* Look for Archive Signatures
|
||||
* "Rewind" if necessary with RewindableStream
|
||||
|
||||
---
|
||||
|
||||
### Random Access
|
||||
|
||||
* Random/Seekable access on a data stream (e.g. a File)
|
||||
* Strategy
|
||||
* Read Header, Skip Data
|
||||
* Dictionary
|
||||
|
||||
---
|
||||
|
||||
### Forward-only
|
||||
|
||||
* Everything is a stream of data
|
||||
* Support NetworkStreams
|
||||
* Very large files
|
||||
* `yield return` usage
|
||||
|
||||
---
|
||||
|
||||
# Formats
|
||||
|
||||
---
|
||||
|
||||
### Zip
|
||||
|
||||
* Header-Data Format
|
||||
* Optional data trailer (forward-only writing support)
|
||||
* Trailing dictionary of entries
|
||||
* pkware spec - APPNOTE.txt
|
||||
* Supports Reader API, Writer API and Archive API
|
||||
* Compression algorthims: just about everything
|
||||
* Deflate, BZ2, LZMA 1/2, PPMd
|
||||
|
||||
---
|
||||
|
||||
### Rar
|
||||
|
||||
* Header-Data Format
|
||||
* SOLID is a stream of compressed header-data pairs for small files
|
||||
* Multi-file archive
|
||||
* Unrar open-source, rar is closed-source
|
||||
* Supports Reader API and Archive API
|
||||
* Compression looks to be a modification of PPMd
|
||||
|
||||
---
|
||||
|
||||
### 7Zip
|
||||
|
||||
* Multi-data compressed Format
|
||||
* Headers are compressed
|
||||
* Multiple compressed "streams"
|
||||
* Readable Archive API support
|
||||
* Annoying
|
||||
* Known for LZMA
|
||||
|
||||
---
|
||||
|
||||
### Tar
|
||||
|
||||
* Header-Data Format
|
||||
* Supports Reader API, Writer API and Archive API
|
||||
* Uncompressed
|
||||
* Many additions to out-grow limitations
|
||||
* UStar
|
||||
* PAX
|
||||
|
||||
---
|
||||
|
||||
### GZip, BZip, LZip, Xz
|
||||
|
||||
* Header-Data Format of a single entry
|
||||
* Supports Reader API, Writer API and Archive API
|
||||
* Used with Tar
|
||||
* Compression
|
||||
* GZip - Deflate
|
||||
* BZip2 - BZip2
|
||||
* Xz - LZMA2
|
||||
* LZip - LZMA1 (improvement on Xz)
|
||||
|
||||
---
|
||||
|
||||
# Usages
|
||||
|
||||
---
|
||||
|
||||
### Reader
|
||||
|
||||
Writing entry to directory
|
||||
|
||||
```csharp
|
||||
using (Stream stream = new NetworkStream()) // pretend
|
||||
using (IReader reader = ReaderFactory.Open(stream))
|
||||
{
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
if (!reader.Entry.IsDirectory)
|
||||
{
|
||||
reader.WriteEntryToDirectory(test.SCRATCH_FILES_PATH, new ExtractionOptions()
|
||||
{
|
||||
ExtractFullPath = true,
|
||||
Overwrite = true
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Reader
|
||||
|
||||
Writing entry to a stream
|
||||
|
||||
```csharp
|
||||
using (var reader = RarReader.Open("Rar.rar"))
|
||||
{
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
if (!reader.Entry.IsDirectory)
|
||||
{
|
||||
using (var entryStream = reader.OpenEntryStream())
|
||||
{
|
||||
string file = Path.GetFileName(reader.Entry.Key);
|
||||
string folder = Path.GetDirectoryName(reader.Entry.Key);
|
||||
string destdir = Path.Combine(SCRATCH_FILES_PATH, folder);
|
||||
if (!Directory.Exists(destdir))
|
||||
{
|
||||
Directory.CreateDirectory(destdir);
|
||||
}
|
||||
string destinationFileName = Path.Combine(destdir, file);
|
||||
using (FileStream fs = File.OpenWrite(destinationFileName))
|
||||
{
|
||||
entryStream.TransferTo(fs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Writer
|
||||
|
||||
Creating archive
|
||||
|
||||
```csharp
|
||||
using (Stream stream = File.OpenWrite("Test.tar.lz"))
|
||||
using (var writer = WriterFactory.Open(stream, ArchiveType.Tar, CompressionType.LZip))
|
||||
{
|
||||
writer.WriteAll("C:\", "*", SearchOption.AllDirectories);
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Archive
|
||||
|
||||
---
|
||||
|
||||
### Projects
|
||||
|
||||
* Mono's Zip implementation
|
||||
* Nodatime
|
||||
* Octopus Deploy
|
||||
* Duplicati
|
||||
* Large ISO multi-file usage
|
||||
|
||||
---
|
||||
|
||||
### Open-source Notes
|
||||
|
||||
* Mostly solo effort
|
||||
* A few significant contributions
|
||||
* Russian friend did RarStream
|
||||
* Jon Skeet contributed LZip reading
|
||||
* Deflate64 recently added
|
||||
* Can always use help!
|
||||
* Multi-file zip support
|
||||
* Encryption in various formats (some support exists)
|
||||
* General clean up
|
||||
12
README.md
12
README.md
@@ -7,8 +7,8 @@ The major feature is support for non-seekable streams so large files can be proc
|
||||
AppVeyor Build -
|
||||
[](https://ci.appveyor.com/project/adamhathcock/sharpcompress/branch/master)
|
||||
|
||||
Travis CI Build -
|
||||
[](https://travis-ci.org/adamhathcock/sharpcompress)
|
||||
Circle CI Build -
|
||||
[](https://circleci.com/gh/adamhathcock/sharpcompress)
|
||||
|
||||
## Need Help?
|
||||
Post Issues on Github!
|
||||
@@ -44,6 +44,14 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
|
||||
|
||||
## Version Log
|
||||
|
||||
### Version 0.18
|
||||
|
||||
* [Now on Github releases](https://github.com/adamhathcock/sharpcompress/releases/tag/0.18)
|
||||
|
||||
### Version 0.17.1
|
||||
|
||||
* Fix - [Bug Fix for .NET Core on Windows](https://github.com/adamhathcock/sharpcompress/pull/257)
|
||||
|
||||
### Version 0.17.0
|
||||
|
||||
* New - Full LZip support! Can read and write LZip files and Tars inside LZip files. [Make LZip a first class citizen. #241](https://github.com/adamhathcock/sharpcompress/issues/241)
|
||||
|
||||
@@ -45,10 +45,14 @@
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/INDENT_ANONYMOUS_METHOD_BLOCK/@EntryValue">True</s:Boolean>
|
||||
<s:Int64 x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/KEEP_BLANK_LINES_IN_CODE/@EntryValue">1</s:Int64>
|
||||
<s:Int64 x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/KEEP_BLANK_LINES_IN_DECLARATIONS/@EntryValue">1</s:Int64>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_ACCESSOR_ATTRIBUTE_ON_SAME_LINE_EX/@EntryValue">NEVER</s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_ACCESSORHOLDER_ATTRIBUTE_ON_SAME_LINE_EX/@EntryValue">NEVER</s:String>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_CONSTRUCTOR_INITIALIZER_ON_SAME_LINE/@EntryValue">False</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_FIELD_ATTRIBUTE_ON_SAME_LINE/@EntryValue">False</s:Boolean>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_FIELD_ATTRIBUTE_ON_SAME_LINE_EX/@EntryValue">NEVER</s:String>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_ACCESSORHOLDER_ON_SINGLE_LINE/@EntryValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_ACCESSOR_ATTRIBUTE_ON_SAME_LINE/@EntryValue">False</s:Boolean>
|
||||
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_EMBEDDED_STATEMENT_ON_SAME_LINE/@EntryValue">NEVER</s:String>
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_INITIALIZER_ON_SINGLE_LINE/@EntryValue">True</s:Boolean>
|
||||
|
||||
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_WHILE_ON_NEW_LINE/@EntryValue">True</s:Boolean>
|
||||
@@ -114,6 +118,11 @@
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=NAMESPACE_005FALIAS/@EntryIndexedValue"><Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=XAML_005FFIELD/@EntryIndexedValue"><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></s:String>
|
||||
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=XAML_005FRESOURCE/@EntryIndexedValue"><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></s:String>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpAttributeForSingleLineMethodUpgrade/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpKeepExistingMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpPlaceEmbeddedOnSameLineMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpRenamePlacementToArrangementMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EAddAccessorOwnerDeclarationBracesMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002ECSharpPlaceAttributeOnSameLineMigration/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EMigrateBlankLinesAroundFieldToBlankLinesAroundProperty/@EntryIndexedValue">True</s:Boolean>
|
||||
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EMigrateThisQualifierSettings/@EntryIndexedValue">True</s:Boolean></wpf:ResourceDictionary>
|
||||
|
||||
32
build.cake
32
build.cake
@@ -25,13 +25,17 @@ Task("Build")
|
||||
var settings = new DotNetCoreBuildSettings
|
||||
{
|
||||
Framework = "netstandard1.0",
|
||||
Configuration = "Release"
|
||||
Configuration = "Release",
|
||||
NoRestore = true
|
||||
};
|
||||
|
||||
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
|
||||
|
||||
settings.Framework = "netcoreapp1.1";
|
||||
DotNetCoreBuild("./tests/SharpCompress.Test/SharpCompress.Test.csproj", settings);
|
||||
settings.Framework = "netstandard1.3";
|
||||
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
|
||||
|
||||
settings.Framework = "netstandard2.0";
|
||||
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -39,23 +43,15 @@ Task("Test")
|
||||
.IsDependentOn("Build")
|
||||
.Does(() =>
|
||||
{
|
||||
if (!bool.Parse(EnvironmentVariable("APPVEYOR") ?? "false")
|
||||
&& !bool.Parse(EnvironmentVariable("TRAVIS") ?? "false"))
|
||||
var files = GetFiles("tests/**/*.csproj");
|
||||
foreach(var file in files)
|
||||
{
|
||||
var files = GetFiles("tests/**/*.csproj");
|
||||
foreach(var file in files)
|
||||
var settings = new DotNetCoreTestSettings
|
||||
{
|
||||
var settings = new DotNetCoreTestSettings
|
||||
{
|
||||
Configuration = "Release"
|
||||
};
|
||||
|
||||
DotNetCoreTest(file.ToString(), settings);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Information("Skipping tests as this is AppVeyor or Travis CI");
|
||||
Configuration = "Release",
|
||||
Framework = "netcoreapp2.0"
|
||||
};
|
||||
DotNetCoreTest(file.ToString(), settings);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
2
build.sh
2
build.sh
@@ -8,7 +8,7 @@
|
||||
# Define directories.
|
||||
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
TOOLS_DIR=$SCRIPT_DIR/tools
|
||||
CAKE_VERSION=0.19.1
|
||||
CAKE_VERSION=0.26.0
|
||||
CAKE_DLL=$TOOLS_DIR/Cake.CoreCLR.$CAKE_VERSION/Cake.dll
|
||||
|
||||
# Make sure the tools folder exist.
|
||||
|
||||
@@ -14,6 +14,7 @@ namespace SharpCompress.Archives.GZip
|
||||
public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
{
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -36,6 +37,7 @@ namespace SharpCompress.Archives.GZip
|
||||
return new GZipArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -54,11 +56,11 @@ namespace SharpCompress.Archives.GZip
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="options"></param>
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="options"></param>
|
||||
internal GZipArchive(FileInfo fileInfo, ReaderOptions options)
|
||||
: base(ArchiveType.GZip, fileInfo, options)
|
||||
{
|
||||
@@ -104,15 +106,9 @@ namespace SharpCompress.Archives.GZip
|
||||
{
|
||||
// read the header on the first read
|
||||
byte[] header = new byte[10];
|
||||
int n = stream.Read(header, 0, header.Length);
|
||||
|
||||
// workitem 8501: handle edge case (decompress empty stream)
|
||||
if (n == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (n != 10)
|
||||
if (!stream.ReadFully(header))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@@ -158,7 +154,7 @@ namespace SharpCompress.Archives.GZip
|
||||
{
|
||||
throw new InvalidOperationException("Only one entry is allowed in a GZip Archive");
|
||||
}
|
||||
using (var writer = new GZipWriter(stream))
|
||||
using (var writer = new GZipWriter(stream, new GZipWriterOptions(options)))
|
||||
{
|
||||
foreach (var entry in oldEntries.Concat(newEntries)
|
||||
.Where(x => !x.IsDirectory))
|
||||
@@ -179,7 +175,7 @@ namespace SharpCompress.Archives.GZip
|
||||
protected override IEnumerable<GZipArchiveEntry> LoadEntries(IEnumerable<GZipVolume> volumes)
|
||||
{
|
||||
Stream stream = volumes.Single().Stream;
|
||||
yield return new GZipArchiveEntry(this, new GZipFilePart(stream));
|
||||
yield return new GZipArchiveEntry(this, new GZipFilePart(stream, ReaderOptions.ArchiveEncoding));
|
||||
}
|
||||
|
||||
protected override IReader CreateReaderForSolidExtraction()
|
||||
|
||||
@@ -4,6 +4,7 @@ using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.SevenZip;
|
||||
using SharpCompress.Compressors.LZMA.Utilites;
|
||||
using SharpCompress.IO;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
@@ -106,7 +107,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
for (int i = 0; i < database.Files.Count; i++)
|
||||
{
|
||||
var file = database.Files[i];
|
||||
yield return new SevenZipArchiveEntry(this, new SevenZipFilePart(stream, database, i, file));
|
||||
yield return new SevenZipArchiveEntry(this, new SevenZipFilePart(stream, database, i, file, ReaderOptions.ArchiveEncoding));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,7 +118,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
stream.Position = 0;
|
||||
var reader = new ArchiveReader();
|
||||
reader.Open(stream);
|
||||
database = reader.ReadDatabase(null);
|
||||
database = reader.ReadDatabase(new PasswordProvider(ReaderOptions.Password));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,7 +145,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
|
||||
protected override IReader CreateReaderForSolidExtraction()
|
||||
{
|
||||
return new SevenZipReader(this);
|
||||
return new SevenZipReader(ReaderOptions, this);
|
||||
}
|
||||
|
||||
public override bool IsSolid { get { return Entries.Where(x => !x.IsDirectory).GroupBy(x => x.FilePart.Folder).Count() > 1; } }
|
||||
@@ -165,8 +166,8 @@ namespace SharpCompress.Archives.SevenZip
|
||||
private Stream currentStream;
|
||||
private CFileItem currentItem;
|
||||
|
||||
internal SevenZipReader(SevenZipArchive archive)
|
||||
: base(new ReaderOptions(), ArchiveType.SevenZip)
|
||||
internal SevenZipReader(ReaderOptions readerOptions, SevenZipArchive archive)
|
||||
: base(readerOptions, ArchiveType.SevenZip)
|
||||
{
|
||||
this.archive = archive;
|
||||
}
|
||||
@@ -190,7 +191,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
}
|
||||
else
|
||||
{
|
||||
currentStream = archive.database.GetFolderStream(stream, currentFolder, null);
|
||||
currentStream = archive.database.GetFolderStream(stream, currentFolder, new PasswordProvider(Options.Password));
|
||||
}
|
||||
foreach (var entry in group)
|
||||
{
|
||||
@@ -205,5 +206,21 @@ namespace SharpCompress.Archives.SevenZip
|
||||
return CreateEntryStream(new ReadOnlySubStream(currentStream, currentItem.Size));
|
||||
}
|
||||
}
|
||||
|
||||
private class PasswordProvider : IPasswordProvider
|
||||
{
|
||||
private readonly string _password;
|
||||
|
||||
public PasswordProvider(string password)
|
||||
{
|
||||
_password = password;
|
||||
|
||||
}
|
||||
|
||||
public string CryptoGetTextPassword()
|
||||
{
|
||||
return _password;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ namespace SharpCompress.Archives.Tar
|
||||
public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
{
|
||||
#if !NO_FILE
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -39,7 +39,7 @@ namespace SharpCompress.Archives.Tar
|
||||
return new TarArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -52,6 +52,7 @@ namespace SharpCompress.Archives.Tar
|
||||
}
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
public static bool IsTarFile(string filePath)
|
||||
{
|
||||
return IsTarFile(new FileInfo(filePath));
|
||||
@@ -74,7 +75,7 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
try
|
||||
{
|
||||
TarHeader tar = new TarHeader();
|
||||
TarHeader tar = new TarHeader(new ArchiveEncoding());
|
||||
tar.Read(new BinaryReader(stream));
|
||||
return tar.Name.Length > 0 && Enum.IsDefined(typeof(EntryType), tar.EntryType);
|
||||
}
|
||||
@@ -98,7 +99,6 @@ namespace SharpCompress.Archives.Tar
|
||||
|
||||
protected override IEnumerable<TarVolume> LoadVolumes(FileInfo file)
|
||||
{
|
||||
|
||||
return new TarVolume(file.OpenRead(), ReaderOptions).AsEnumerable();
|
||||
}
|
||||
#endif
|
||||
@@ -127,7 +127,7 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
Stream stream = volumes.Single().Stream;
|
||||
TarHeader previousHeader = null;
|
||||
foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream))
|
||||
foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream, ReaderOptions.ArchiveEncoding))
|
||||
{
|
||||
if (header != null)
|
||||
{
|
||||
@@ -152,7 +152,7 @@ namespace SharpCompress.Archives.Tar
|
||||
memoryStream.Position = 0;
|
||||
var bytes = memoryStream.ToArray();
|
||||
|
||||
header.Name = ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length).TrimNulls();
|
||||
header.Name = ReaderOptions.ArchiveEncoding.Decode(bytes).TrimNulls();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ namespace SharpCompress.Archives.Tar
|
||||
IEnumerable<TarArchiveEntry> oldEntries,
|
||||
IEnumerable<TarArchiveEntry> newEntries)
|
||||
{
|
||||
using (var writer = new TarWriter(stream, options))
|
||||
using (var writer = new TarWriter(stream, new TarWriterOptions(options)))
|
||||
{
|
||||
foreach (var entry in oldEntries.Concat(newEntries)
|
||||
.Where(x => !x.IsDirectory))
|
||||
|
||||
@@ -24,6 +24,7 @@ namespace SharpCompress.Archives.Zip
|
||||
public CompressionLevel DeflateCompressionLevel { get; set; }
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -46,6 +47,7 @@ namespace SharpCompress.Archives.Zip
|
||||
return new ZipArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
#endif
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
@@ -58,6 +60,7 @@ namespace SharpCompress.Archives.Zip
|
||||
}
|
||||
|
||||
#if !NO_FILE
|
||||
|
||||
public static bool IsZipFile(string filePath, string password = null)
|
||||
{
|
||||
return IsZipFile(new FileInfo(filePath), password);
|
||||
@@ -78,7 +81,7 @@ namespace SharpCompress.Archives.Zip
|
||||
|
||||
public static bool IsZipFile(Stream stream, string password = null)
|
||||
{
|
||||
StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password);
|
||||
StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password, new ArchiveEncoding());
|
||||
try
|
||||
{
|
||||
ZipHeader header =
|
||||
@@ -109,7 +112,7 @@ namespace SharpCompress.Archives.Zip
|
||||
internal ZipArchive(FileInfo fileInfo, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Zip, fileInfo, readerOptions)
|
||||
{
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password);
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding);
|
||||
}
|
||||
|
||||
protected override IEnumerable<ZipVolume> LoadVolumes(FileInfo file)
|
||||
@@ -131,7 +134,7 @@ namespace SharpCompress.Archives.Zip
|
||||
internal ZipArchive(Stream stream, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Zip, stream, readerOptions)
|
||||
{
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password);
|
||||
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding);
|
||||
}
|
||||
|
||||
protected override IEnumerable<ZipVolume> LoadVolumes(IEnumerable<Stream> streams)
|
||||
@@ -150,19 +153,19 @@ namespace SharpCompress.Archives.Zip
|
||||
switch (h.ZipHeaderType)
|
||||
{
|
||||
case ZipHeaderType.DirectoryEntry:
|
||||
{
|
||||
yield return new ZipArchiveEntry(this,
|
||||
new SeekableZipFilePart(headerFactory,
|
||||
h as DirectoryEntryHeader,
|
||||
stream));
|
||||
}
|
||||
{
|
||||
yield return new ZipArchiveEntry(this,
|
||||
new SeekableZipFilePart(headerFactory,
|
||||
h as DirectoryEntryHeader,
|
||||
stream));
|
||||
}
|
||||
break;
|
||||
case ZipHeaderType.DirectoryEnd:
|
||||
{
|
||||
byte[] bytes = (h as DirectoryEndHeader).Comment;
|
||||
volume.Comment = ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length);
|
||||
yield break;
|
||||
}
|
||||
{
|
||||
byte[] bytes = (h as DirectoryEndHeader).Comment;
|
||||
volume.Comment = ReaderOptions.ArchiveEncoding.Decode(bytes);
|
||||
yield break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -205,7 +208,7 @@ namespace SharpCompress.Archives.Zip
|
||||
{
|
||||
var stream = Volumes.Single().Stream;
|
||||
stream.Position = 0;
|
||||
return ZipReader.Open(stream);
|
||||
return ZipReader.Open(stream, ReaderOptions);
|
||||
}
|
||||
}
|
||||
}
|
||||
119
src/SharpCompress/Buffers/ArrayPool.cs
Normal file
119
src/SharpCompress/Buffers/ArrayPool.cs
Normal file
@@ -0,0 +1,119 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
#if NETCORE
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
/// <summary>
|
||||
/// Provides a resource pool that enables reusing instances of type <see cref="T:T[]"/>.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// <para>
|
||||
/// Renting and returning buffers with an <see cref="ArrayPool{T}"/> can increase performance
|
||||
/// in situations where arrays are created and destroyed frequently, resulting in significant
|
||||
/// memory pressure on the garbage collector.
|
||||
/// </para>
|
||||
/// <para>
|
||||
/// This class is thread-safe. All members may be used by multiple threads concurrently.
|
||||
/// </para>
|
||||
/// </remarks>
|
||||
internal abstract class ArrayPool<T>
|
||||
{
|
||||
/// <summary>The lazily-initialized shared pool instance.</summary>
|
||||
private static ArrayPool<T> s_sharedInstance = null;
|
||||
|
||||
/// <summary>
|
||||
/// Retrieves a shared <see cref="ArrayPool{T}"/> instance.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The shared pool provides a default implementation of <see cref="ArrayPool{T}"/>
|
||||
/// that's intended for general applicability. It maintains arrays of multiple sizes, and
|
||||
/// may hand back a larger array than was actually requested, but will never hand back a smaller
|
||||
/// array than was requested. Renting a buffer from it with <see cref="Rent"/> will result in an
|
||||
/// existing buffer being taken from the pool if an appropriate buffer is available or in a new
|
||||
/// buffer being allocated if one is not available.
|
||||
/// </remarks>
|
||||
public static ArrayPool<T> Shared
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
get { return Volatile.Read(ref s_sharedInstance) ?? EnsureSharedCreated(); }
|
||||
}
|
||||
|
||||
/// <summary>Ensures that <see cref="s_sharedInstance"/> has been initialized to a pool and returns it.</summary>
|
||||
[MethodImpl(MethodImplOptions.NoInlining)]
|
||||
private static ArrayPool<T> EnsureSharedCreated()
|
||||
{
|
||||
Interlocked.CompareExchange(ref s_sharedInstance, Create(), null);
|
||||
return s_sharedInstance;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new <see cref="ArrayPool{T}"/> instance using default configuration options.
|
||||
/// </summary>
|
||||
/// <returns>A new <see cref="ArrayPool{T}"/> instance.</returns>
|
||||
public static ArrayPool<T> Create()
|
||||
{
|
||||
return new DefaultArrayPool<T>();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new <see cref="ArrayPool{T}"/> instance using custom configuration options.
|
||||
/// </summary>
|
||||
/// <param name="maxArrayLength">The maximum length of array instances that may be stored in the pool.</param>
|
||||
/// <param name="maxArraysPerBucket">
|
||||
/// The maximum number of array instances that may be stored in each bucket in the pool. The pool
|
||||
/// groups arrays of similar lengths into buckets for faster access.
|
||||
/// </param>
|
||||
/// <returns>A new <see cref="ArrayPool{T}"/> instance with the specified configuration options.</returns>
|
||||
/// <remarks>
|
||||
/// The created pool will group arrays into buckets, with no more than <paramref name="maxArraysPerBucket"/>
|
||||
/// in each bucket and with those arrays not exceeding <paramref name="maxArrayLength"/> in length.
|
||||
/// </remarks>
|
||||
public static ArrayPool<T> Create(int maxArrayLength, int maxArraysPerBucket)
|
||||
{
|
||||
return new DefaultArrayPool<T>(maxArrayLength, maxArraysPerBucket);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Retrieves a buffer that is at least the requested length.
|
||||
/// </summary>
|
||||
/// <param name="minimumLength">The minimum length of the array needed.</param>
|
||||
/// <returns>
|
||||
/// An <see cref="T:T[]"/> that is at least <paramref name="minimumLength"/> in length.
|
||||
/// </returns>
|
||||
/// <remarks>
|
||||
/// This buffer is loaned to the caller and should be returned to the same pool via
|
||||
/// <see cref="Return"/> so that it may be reused in subsequent usage of <see cref="Rent"/>.
|
||||
/// It is not a fatal error to not return a rented buffer, but failure to do so may lead to
|
||||
/// decreased application performance, as the pool may need to create a new buffer to replace
|
||||
/// the one lost.
|
||||
/// </remarks>
|
||||
public abstract T[] Rent(int minimumLength);
|
||||
|
||||
/// <summary>
|
||||
/// Returns to the pool an array that was previously obtained via <see cref="Rent"/> on the same
|
||||
/// <see cref="ArrayPool{T}"/> instance.
|
||||
/// </summary>
|
||||
/// <param name="array">
|
||||
/// The buffer previously obtained from <see cref="Rent"/> to return to the pool.
|
||||
/// </param>
|
||||
/// <param name="clearArray">
|
||||
/// If <c>true</c> and if the pool will store the buffer to enable subsequent reuse, <see cref="Return"/>
|
||||
/// will clear <paramref name="array"/> of its contents so that a subsequent consumer via <see cref="Rent"/>
|
||||
/// will not see the previous consumer's content. If <c>false</c> or if the pool will release the buffer,
|
||||
/// the array's contents are left unchanged.
|
||||
/// </param>
|
||||
/// <remarks>
|
||||
/// Once a buffer has been returned to the pool, the caller gives up all ownership of the buffer
|
||||
/// and must not use it. The reference returned from a given call to <see cref="Rent"/> must only be
|
||||
/// returned via <see cref="Return"/> once. The default <see cref="ArrayPool{T}"/>
|
||||
/// may hold onto the returned buffer in order to rent it again, or it may release the returned buffer
|
||||
/// if it's determined that the pool already has enough buffers stored.
|
||||
/// </remarks>
|
||||
public abstract void Return(T[] array, bool clearArray = false);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
144
src/SharpCompress/Buffers/DefaultArrayPool.cs
Normal file
144
src/SharpCompress/Buffers/DefaultArrayPool.cs
Normal file
@@ -0,0 +1,144 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
#if NETCORE
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
internal sealed partial class DefaultArrayPool<T> : ArrayPool<T>
|
||||
{
|
||||
/// <summary>The default maximum length of each array in the pool (2^20).</summary>
|
||||
private const int DefaultMaxArrayLength = 1024 * 1024;
|
||||
/// <summary>The default maximum number of arrays per bucket that are available for rent.</summary>
|
||||
private const int DefaultMaxNumberOfArraysPerBucket = 50;
|
||||
/// <summary>Lazily-allocated empty array used when arrays of length 0 are requested.</summary>
|
||||
private static T[] s_emptyArray; // we support contracts earlier than those with Array.Empty<T>()
|
||||
|
||||
private readonly Bucket[] _buckets;
|
||||
|
||||
internal DefaultArrayPool() : this(DefaultMaxArrayLength, DefaultMaxNumberOfArraysPerBucket)
|
||||
{
|
||||
}
|
||||
|
||||
internal DefaultArrayPool(int maxArrayLength, int maxArraysPerBucket)
|
||||
{
|
||||
if (maxArrayLength <= 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(maxArrayLength));
|
||||
}
|
||||
if (maxArraysPerBucket <= 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(maxArraysPerBucket));
|
||||
}
|
||||
|
||||
// Our bucketing algorithm has a min length of 2^4 and a max length of 2^30.
|
||||
// Constrain the actual max used to those values.
|
||||
const int MinimumArrayLength = 0x10, MaximumArrayLength = 0x40000000;
|
||||
if (maxArrayLength > MaximumArrayLength)
|
||||
{
|
||||
maxArrayLength = MaximumArrayLength;
|
||||
}
|
||||
else if (maxArrayLength < MinimumArrayLength)
|
||||
{
|
||||
maxArrayLength = MinimumArrayLength;
|
||||
}
|
||||
|
||||
// Create the buckets.
|
||||
int poolId = Id;
|
||||
int maxBuckets = Utilities.SelectBucketIndex(maxArrayLength);
|
||||
var buckets = new Bucket[maxBuckets + 1];
|
||||
for (int i = 0; i < buckets.Length; i++)
|
||||
{
|
||||
buckets[i] = new Bucket(Utilities.GetMaxSizeForBucket(i), maxArraysPerBucket, poolId);
|
||||
}
|
||||
_buckets = buckets;
|
||||
}
|
||||
|
||||
/// <summary>Gets an ID for the pool to use with events.</summary>
|
||||
private int Id => GetHashCode();
|
||||
|
||||
public override T[] Rent(int minimumLength)
|
||||
{
|
||||
// Arrays can't be smaller than zero. We allow requesting zero-length arrays (even though
|
||||
// pooling such an array isn't valuable) as it's a valid length array, and we want the pool
|
||||
// to be usable in general instead of using `new`, even for computed lengths.
|
||||
if (minimumLength < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(minimumLength));
|
||||
}
|
||||
else if (minimumLength == 0)
|
||||
{
|
||||
// No need for events with the empty array. Our pool is effectively infinite
|
||||
// and we'll never allocate for rents and never store for returns.
|
||||
return s_emptyArray ?? (s_emptyArray = new T[0]);
|
||||
}
|
||||
|
||||
T[] buffer = null;
|
||||
|
||||
int index = Utilities.SelectBucketIndex(minimumLength);
|
||||
if (index < _buckets.Length)
|
||||
{
|
||||
// Search for an array starting at the 'index' bucket. If the bucket is empty, bump up to the
|
||||
// next higher bucket and try that one, but only try at most a few buckets.
|
||||
const int MaxBucketsToTry = 2;
|
||||
int i = index;
|
||||
do
|
||||
{
|
||||
// Attempt to rent from the bucket. If we get a buffer from it, return it.
|
||||
buffer = _buckets[i].Rent();
|
||||
if (buffer != null)
|
||||
{
|
||||
return buffer;
|
||||
}
|
||||
}
|
||||
while (++i < _buckets.Length && i != index + MaxBucketsToTry);
|
||||
|
||||
// The pool was exhausted for this buffer size. Allocate a new buffer with a size corresponding
|
||||
// to the appropriate bucket.
|
||||
buffer = new T[_buckets[index]._bufferLength];
|
||||
}
|
||||
else
|
||||
{
|
||||
// The request was for a size too large for the pool. Allocate an array of exactly the requested length.
|
||||
// When it's returned to the pool, we'll simply throw it away.
|
||||
buffer = new T[minimumLength];
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public override void Return(T[] array, bool clearArray = false)
|
||||
{
|
||||
if (array == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(array));
|
||||
}
|
||||
else if (array.Length == 0)
|
||||
{
|
||||
// Ignore empty arrays. When a zero-length array is rented, we return a singleton
|
||||
// rather than actually taking a buffer out of the lowest bucket.
|
||||
return;
|
||||
}
|
||||
|
||||
// Determine with what bucket this array length is associated
|
||||
int bucket = Utilities.SelectBucketIndex(array.Length);
|
||||
|
||||
// If we can tell that the buffer was allocated, drop it. Otherwise, check if we have space in the pool
|
||||
if (bucket < _buckets.Length)
|
||||
{
|
||||
// Clear the array if the user requests
|
||||
if (clearArray)
|
||||
{
|
||||
Array.Clear(array, 0, array.Length);
|
||||
}
|
||||
|
||||
// Return the buffer to its bucket. In the future, we might consider having Return return false
|
||||
// instead of dropping a bucket, in which case we could try to return to a lower-sized bucket,
|
||||
// just as how in Rent we allow renting from a higher-sized bucket.
|
||||
_buckets[bucket].Return(array);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
111
src/SharpCompress/Buffers/DefaultArrayPoolBucket.cs
Normal file
111
src/SharpCompress/Buffers/DefaultArrayPoolBucket.cs
Normal file
@@ -0,0 +1,111 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
#if NETCORE
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
internal sealed partial class DefaultArrayPool<T> : ArrayPool<T>
|
||||
{
|
||||
/// <summary>Provides a thread-safe bucket containing buffers that can be Rent'd and Return'd.</summary>
|
||||
private sealed class Bucket
|
||||
{
|
||||
internal readonly int _bufferLength;
|
||||
private readonly T[][] _buffers;
|
||||
private readonly int _poolId;
|
||||
|
||||
private SpinLock _lock; // do not make this readonly; it's a mutable struct
|
||||
private int _index;
|
||||
|
||||
/// <summary>
|
||||
/// Creates the pool with numberOfBuffers arrays where each buffer is of bufferLength length.
|
||||
/// </summary>
|
||||
internal Bucket(int bufferLength, int numberOfBuffers, int poolId)
|
||||
{
|
||||
_lock = new SpinLock(Debugger.IsAttached); // only enable thread tracking if debugger is attached; it adds non-trivial overheads to Enter/Exit
|
||||
_buffers = new T[numberOfBuffers][];
|
||||
_bufferLength = bufferLength;
|
||||
_poolId = poolId;
|
||||
}
|
||||
|
||||
/// <summary>Gets an ID for the bucket to use with events.</summary>
|
||||
internal int Id => GetHashCode();
|
||||
|
||||
/// <summary>Takes an array from the bucket. If the bucket is empty, returns null.</summary>
|
||||
internal T[] Rent()
|
||||
{
|
||||
T[][] buffers = _buffers;
|
||||
T[] buffer = null;
|
||||
|
||||
// While holding the lock, grab whatever is at the next available index and
|
||||
// update the index. We do as little work as possible while holding the spin
|
||||
// lock to minimize contention with other threads. The try/finally is
|
||||
// necessary to properly handle thread aborts on platforms which have them.
|
||||
bool lockTaken = false, allocateBuffer = false;
|
||||
try
|
||||
{
|
||||
_lock.Enter(ref lockTaken);
|
||||
|
||||
if (_index < buffers.Length)
|
||||
{
|
||||
buffer = buffers[_index];
|
||||
buffers[_index++] = null;
|
||||
allocateBuffer = buffer == null;
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (lockTaken) _lock.Exit(false);
|
||||
}
|
||||
|
||||
// While we were holding the lock, we grabbed whatever was at the next available index, if
|
||||
// there was one. If we tried and if we got back null, that means we hadn't yet allocated
|
||||
// for that slot, in which case we should do so now.
|
||||
if (allocateBuffer)
|
||||
{
|
||||
buffer = new T[_bufferLength];
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Attempts to return the buffer to the bucket. If successful, the buffer will be stored
|
||||
/// in the bucket and true will be returned; otherwise, the buffer won't be stored, and false
|
||||
/// will be returned.
|
||||
/// </summary>
|
||||
internal void Return(T[] array)
|
||||
{
|
||||
// Check to see if the buffer is the correct size for this bucket
|
||||
if (array.Length != _bufferLength)
|
||||
{
|
||||
throw new ArgumentException("Buffer not from pool", nameof(array));
|
||||
}
|
||||
|
||||
// While holding the spin lock, if there's room available in the bucket,
|
||||
// put the buffer into the next available slot. Otherwise, we just drop it.
|
||||
// The try/finally is necessary to properly handle thread aborts on platforms
|
||||
// which have them.
|
||||
bool lockTaken = false;
|
||||
try
|
||||
{
|
||||
_lock.Enter(ref lockTaken);
|
||||
|
||||
if (_index != 0)
|
||||
{
|
||||
_buffers[--_index] = array;
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (lockTaken) _lock.Exit(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
38
src/SharpCompress/Buffers/Utilities.cs
Normal file
38
src/SharpCompress/Buffers/Utilities.cs
Normal file
@@ -0,0 +1,38 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
#if NETCORE
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
internal static class Utilities
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
internal static int SelectBucketIndex(int bufferSize)
|
||||
{
|
||||
Debug.Assert(bufferSize > 0);
|
||||
|
||||
uint bitsRemaining = ((uint)bufferSize - 1) >> 4;
|
||||
|
||||
int poolIndex = 0;
|
||||
if (bitsRemaining > 0xFFFF) { bitsRemaining >>= 16; poolIndex = 16; }
|
||||
if (bitsRemaining > 0xFF) { bitsRemaining >>= 8; poolIndex += 8; }
|
||||
if (bitsRemaining > 0xF) { bitsRemaining >>= 4; poolIndex += 4; }
|
||||
if (bitsRemaining > 0x3) { bitsRemaining >>= 2; poolIndex += 2; }
|
||||
if (bitsRemaining > 0x1) { bitsRemaining >>= 1; poolIndex += 1; }
|
||||
|
||||
return poolIndex + (int)bitsRemaining;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
internal static int GetMaxSizeForBucket(int binIndex)
|
||||
{
|
||||
int maxSize = 16 << binIndex;
|
||||
Debug.Assert(maxSize >= 0);
|
||||
return maxSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,23 +1,60 @@
|
||||
using System.Text;
|
||||
using System;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common
|
||||
{
|
||||
public static class ArchiveEncoding
|
||||
public class ArchiveEncoding
|
||||
{
|
||||
/// <summary>
|
||||
/// Default encoding to use when archive format doesn't specify one.
|
||||
/// </summary>
|
||||
public static Encoding Default { get; set; }
|
||||
public Encoding Default { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Encoding used by encryption schemes which don't comply with RFC 2898.
|
||||
/// ArchiveEncoding used by encryption schemes which don't comply with RFC 2898.
|
||||
/// </summary>
|
||||
public static Encoding Password { get; set; }
|
||||
public Encoding Password { get; set; }
|
||||
|
||||
static ArchiveEncoding()
|
||||
/// <summary>
|
||||
/// Set this encoding when you want to force it for all encoding operations.
|
||||
/// </summary>
|
||||
public Encoding Forced { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Set this when you want to use a custom method for all decoding operations.
|
||||
/// </summary>
|
||||
/// <returns>string Func(bytes, index, length)</returns>
|
||||
public Func<byte[], int, int, string> CustomDecoder { get; set; }
|
||||
|
||||
public ArchiveEncoding()
|
||||
{
|
||||
Default = Encoding.UTF8;
|
||||
Password = Encoding.UTF8;
|
||||
}
|
||||
|
||||
public string Decode(byte[] bytes)
|
||||
{
|
||||
return Decode(bytes, 0, bytes.Length);
|
||||
}
|
||||
|
||||
public string Decode(byte[] bytes, int start, int length)
|
||||
{
|
||||
return GetDecoder().Invoke(bytes, start, length);
|
||||
}
|
||||
|
||||
public byte[] Encode(string str)
|
||||
{
|
||||
return GetEncoding().GetBytes(str);
|
||||
}
|
||||
|
||||
public Encoding GetEncoding()
|
||||
{
|
||||
return Forced ?? Default ?? Encoding.UTF8;
|
||||
}
|
||||
|
||||
public Func<byte[], int, int, string> GetDecoder()
|
||||
{
|
||||
return CustomDecoder ?? ((bytes, index, count) => (Default ?? Encoding.UTF8).GetString(bytes, index, count));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,7 @@
|
||||
BCJ2,
|
||||
LZip,
|
||||
Xz,
|
||||
Unknown
|
||||
Unknown,
|
||||
Deflate64
|
||||
}
|
||||
}
|
||||
@@ -65,6 +65,12 @@ namespace SharpCompress.Common
|
||||
/// </summary>
|
||||
public abstract bool IsSplit { get; }
|
||||
|
||||
/// <inheritdoc/>
|
||||
public override string ToString()
|
||||
{
|
||||
return this.Key;
|
||||
}
|
||||
|
||||
internal abstract IEnumerable<FilePart> Parts { get; }
|
||||
internal bool IsSolid { get; set; }
|
||||
|
||||
|
||||
@@ -4,9 +4,17 @@ namespace SharpCompress.Common
|
||||
{
|
||||
public abstract class FilePart
|
||||
{
|
||||
protected FilePart(ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
|
||||
internal abstract string FilePartName { get; }
|
||||
|
||||
internal abstract Stream GetCompressedStream();
|
||||
internal abstract Stream GetRawStream();
|
||||
internal bool Skipped { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.GZip
|
||||
{
|
||||
@@ -39,9 +40,9 @@ namespace SharpCompress.Common.GZip
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => filePart.AsEnumerable<FilePart>();
|
||||
|
||||
internal static IEnumerable<GZipEntry> GetEntries(Stream stream)
|
||||
internal static IEnumerable<GZipEntry> GetEntries(Stream stream, OptionsBase options)
|
||||
{
|
||||
yield return new GZipEntry(new GZipFilePart(stream));
|
||||
yield return new GZipEntry(new GZipFilePart(stream, options.ArchiveEncoding));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,35 +5,37 @@ using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.GZip
|
||||
{
|
||||
internal class GZipFilePart : FilePart
|
||||
{
|
||||
private string name;
|
||||
private readonly Stream stream;
|
||||
private string _name;
|
||||
private readonly Stream _stream;
|
||||
|
||||
internal GZipFilePart(Stream stream)
|
||||
internal GZipFilePart(Stream stream, ArchiveEncoding archiveEncoding)
|
||||
: base(archiveEncoding)
|
||||
{
|
||||
ReadAndValidateGzipHeader(stream);
|
||||
EntryStartPosition = stream.Position;
|
||||
this.stream = stream;
|
||||
this._stream = stream;
|
||||
}
|
||||
|
||||
internal long EntryStartPosition { get; }
|
||||
|
||||
internal DateTime? DateModified { get; private set; }
|
||||
|
||||
internal override string FilePartName => name;
|
||||
internal override string FilePartName => _name;
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
return new DeflateStream(stream, CompressionMode.Decompress, CompressionLevel.Default, false);
|
||||
return new DeflateStream(_stream, CompressionMode.Decompress, CompressionLevel.Default, false);
|
||||
}
|
||||
|
||||
internal override Stream GetRawStream()
|
||||
{
|
||||
return stream;
|
||||
return _stream;
|
||||
}
|
||||
|
||||
private void ReadAndValidateGzipHeader(Stream stream)
|
||||
@@ -67,15 +69,16 @@ namespace SharpCompress.Common.GZip
|
||||
|
||||
Int16 extraLength = (Int16)(header[0] + header[1] * 256);
|
||||
byte[] extra = new byte[extraLength];
|
||||
n = stream.Read(extra, 0, extra.Length);
|
||||
if (n != extraLength)
|
||||
|
||||
if (!stream.ReadFully(extra))
|
||||
{
|
||||
throw new ZlibException("Unexpected end-of-file reading GZIP header.");
|
||||
}
|
||||
n = extraLength;
|
||||
}
|
||||
if ((header[3] & 0x08) == 0x08)
|
||||
{
|
||||
name = ReadZeroTerminatedString(stream);
|
||||
_name = ReadZeroTerminatedString(stream);
|
||||
}
|
||||
if ((header[3] & 0x10) == 0x010)
|
||||
{
|
||||
@@ -87,7 +90,7 @@ namespace SharpCompress.Common.GZip
|
||||
}
|
||||
}
|
||||
|
||||
private static string ReadZeroTerminatedString(Stream stream)
|
||||
private string ReadZeroTerminatedString(Stream stream)
|
||||
{
|
||||
byte[] buf1 = new byte[1];
|
||||
var list = new List<byte>();
|
||||
@@ -110,8 +113,8 @@ namespace SharpCompress.Common.GZip
|
||||
}
|
||||
}
|
||||
while (!done);
|
||||
byte[] a = list.ToArray();
|
||||
return ArchiveEncoding.Default.GetString(a, 0, a.Length);
|
||||
byte[] buffer = list.ToArray();
|
||||
return ArchiveEncoding.Decode(buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
namespace SharpCompress.Common
|
||||
|
||||
namespace SharpCompress.Common
|
||||
{
|
||||
public class OptionsBase
|
||||
{
|
||||
@@ -6,5 +7,7 @@
|
||||
/// SharpCompress will keep the supplied streams open. Default is true.
|
||||
/// </summary>
|
||||
public bool LeaveStreamOpen { get; set; } = true;
|
||||
|
||||
public ArchiveEncoding ArchiveEncoding { get; set; } = new ArchiveEncoding();
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
using SharpCompress.IO;
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
@@ -52,50 +52,50 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
switch (HeaderType)
|
||||
{
|
||||
case HeaderType.FileHeader:
|
||||
{
|
||||
if (FileFlags.HasFlag(FileFlags.UNICODE))
|
||||
{
|
||||
int length = 0;
|
||||
while (length < fileNameBytes.Length
|
||||
&& fileNameBytes[length] != 0)
|
||||
if (FileFlags.HasFlag(FileFlags.UNICODE))
|
||||
{
|
||||
length++;
|
||||
}
|
||||
if (length != nameSize)
|
||||
{
|
||||
length++;
|
||||
FileName = FileNameDecoder.Decode(fileNameBytes, length);
|
||||
int length = 0;
|
||||
while (length < fileNameBytes.Length
|
||||
&& fileNameBytes[length] != 0)
|
||||
{
|
||||
length++;
|
||||
}
|
||||
if (length != nameSize)
|
||||
{
|
||||
length++;
|
||||
FileName = FileNameDecoder.Decode(fileNameBytes, length);
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = ArchiveEncoding.Decode(fileNameBytes);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = DecodeDefault(fileNameBytes);
|
||||
FileName = ArchiveEncoding.Decode(fileNameBytes);
|
||||
}
|
||||
FileName = ConvertPath(FileName, HostOS);
|
||||
}
|
||||
else
|
||||
{
|
||||
FileName = DecodeDefault(fileNameBytes);
|
||||
}
|
||||
FileName = ConvertPath(FileName, HostOS);
|
||||
}
|
||||
break;
|
||||
case HeaderType.NewSubHeader:
|
||||
{
|
||||
int datasize = HeaderSize - NEWLHD_SIZE - nameSize;
|
||||
if (FileFlags.HasFlag(FileFlags.SALT))
|
||||
{
|
||||
datasize -= SALT_SIZE;
|
||||
}
|
||||
if (datasize > 0)
|
||||
{
|
||||
SubData = reader.ReadBytes(datasize);
|
||||
}
|
||||
int datasize = HeaderSize - NEWLHD_SIZE - nameSize;
|
||||
if (FileFlags.HasFlag(FileFlags.SALT))
|
||||
{
|
||||
datasize -= SALT_SIZE;
|
||||
}
|
||||
if (datasize > 0)
|
||||
{
|
||||
SubData = reader.ReadBytes(datasize);
|
||||
}
|
||||
|
||||
if (NewSubHeaderType.SUBHEAD_TYPE_RR.Equals(fileNameBytes))
|
||||
{
|
||||
RecoverySectors = SubData[8] + (SubData[9] << 8)
|
||||
+ (SubData[10] << 16) + (SubData[11] << 24);
|
||||
if (NewSubHeaderType.SUBHEAD_TYPE_RR.Equals(fileNameBytes))
|
||||
{
|
||||
RecoverySectors = SubData[8] + (SubData[9] << 8)
|
||||
+ (SubData[10] << 16) + (SubData[11] << 24);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -118,12 +118,6 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
//only the full .net framework will do other code pages than unicode/utf8
|
||||
private string DecodeDefault(byte[] bytes)
|
||||
{
|
||||
return ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length);
|
||||
}
|
||||
|
||||
private long UInt32To64(uint x, uint y)
|
||||
{
|
||||
long l = x;
|
||||
@@ -178,6 +172,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
|
||||
internal long DataStartPosition { get; set; }
|
||||
|
||||
internal HostOS HostOS { get; private set; }
|
||||
|
||||
internal uint FileCRC { get; private set; }
|
||||
@@ -199,6 +194,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
internal FileFlags FileFlags => (FileFlags)Flags;
|
||||
|
||||
internal long CompressedSize { get; private set; }
|
||||
|
||||
internal long UncompressedSize { get; private set; }
|
||||
|
||||
internal string FileName { get; private set; }
|
||||
|
||||
@@ -18,9 +18,9 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
Flags == 0x1A21 &&
|
||||
HeaderSize == 0x07;
|
||||
|
||||
// Rar5 signature: 52 61 72 21 1A 07 10 00 (not supported yet)
|
||||
// Rar5 signature: 52 61 72 21 1A 07 01 00 (not supported yet)
|
||||
}
|
||||
|
||||
internal bool OldFormat { get; private set; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
@@ -17,14 +18,16 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
HeaderSize = baseHeader.HeaderSize;
|
||||
AdditionalSize = baseHeader.AdditionalSize;
|
||||
ReadBytes = baseHeader.ReadBytes;
|
||||
ArchiveEncoding = baseHeader.ArchiveEncoding;
|
||||
}
|
||||
|
||||
internal static RarHeader Create(RarCrcBinaryReader reader)
|
||||
internal static RarHeader Create(RarCrcBinaryReader reader, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
try
|
||||
{
|
||||
RarHeader header = new RarHeader();
|
||||
|
||||
header.ArchiveEncoding = archiveEncoding;
|
||||
reader.Mark();
|
||||
header.ReadStartFromReader(reader);
|
||||
header.ReadBytes += reader.CurrentReadByteCount;
|
||||
@@ -50,7 +53,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
protected virtual void ReadFromReader(MarkingBinaryReader reader) {
|
||||
protected virtual void ReadFromReader(MarkingBinaryReader reader)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
@@ -76,10 +80,11 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
return header;
|
||||
}
|
||||
|
||||
private void VerifyHeaderCrc(ushort crc) {
|
||||
if (HeaderType != HeaderType.MarkHeader)
|
||||
private void VerifyHeaderCrc(ushort crc)
|
||||
{
|
||||
if (HeaderType != HeaderType.MarkHeader)
|
||||
{
|
||||
if (crc != HeadCRC)
|
||||
if (crc != HeadCRC)
|
||||
{
|
||||
throw new InvalidFormatException("rar header crc mismatch");
|
||||
}
|
||||
@@ -106,6 +111,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
protected short HeaderSize { get; private set; }
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; private set; }
|
||||
|
||||
/// <summary>
|
||||
/// This additional size of the header could be file data
|
||||
/// </summary>
|
||||
|
||||
@@ -117,7 +117,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
#if !NO_CRYPTO
|
||||
var reader = new RarCryptoBinaryReader(stream, Options.Password);
|
||||
|
||||
|
||||
if (IsEncrypted)
|
||||
{
|
||||
if (Options.Password == null)
|
||||
@@ -133,7 +133,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
#endif
|
||||
|
||||
RarHeader header = RarHeader.Create(reader);
|
||||
RarHeader header = RarHeader.Create(reader, Options.ArchiveEncoding);
|
||||
if (header == null)
|
||||
{
|
||||
return null;
|
||||
@@ -141,110 +141,110 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
switch (header.HeaderType)
|
||||
{
|
||||
case HeaderType.ArchiveHeader:
|
||||
{
|
||||
var ah = header.PromoteHeader<ArchiveHeader>(reader);
|
||||
IsEncrypted = ah.HasPassword;
|
||||
return ah;
|
||||
}
|
||||
{
|
||||
var ah = header.PromoteHeader<ArchiveHeader>(reader);
|
||||
IsEncrypted = ah.HasPassword;
|
||||
return ah;
|
||||
}
|
||||
case HeaderType.MarkHeader:
|
||||
{
|
||||
return header.PromoteHeader<MarkHeader>(reader);
|
||||
}
|
||||
{
|
||||
return header.PromoteHeader<MarkHeader>(reader);
|
||||
}
|
||||
|
||||
case HeaderType.ProtectHeader:
|
||||
{
|
||||
ProtectHeader ph = header.PromoteHeader<ProtectHeader>(reader);
|
||||
|
||||
// skip the recovery record data, we do not use it.
|
||||
switch (StreamingMode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
reader.BaseStream.Position += ph.DataSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
reader.BaseStream.Skip(ph.DataSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
ProtectHeader ph = header.PromoteHeader<ProtectHeader>(reader);
|
||||
|
||||
return ph;
|
||||
}
|
||||
// skip the recovery record data, we do not use it.
|
||||
switch (StreamingMode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
reader.BaseStream.Position += ph.DataSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
reader.BaseStream.Skip(ph.DataSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
|
||||
return ph;
|
||||
}
|
||||
|
||||
case HeaderType.NewSubHeader:
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
//skip the data because it's useless?
|
||||
reader.BaseStream.Skip(fh.CompressedSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
//skip the data because it's useless?
|
||||
reader.BaseStream.Skip(fh.CompressedSize);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
case HeaderType.FileHeader:
|
||||
{
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
|
||||
switch (StreamingMode)
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
var ms = new ReadOnlySubStream(reader.BaseStream, fh.CompressedSize);
|
||||
if (fh.Salt == null)
|
||||
{
|
||||
fh.PackedStream = ms;
|
||||
}
|
||||
else
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
var ms = new ReadOnlySubStream(reader.BaseStream, fh.CompressedSize);
|
||||
if (fh.Salt == null)
|
||||
{
|
||||
fh.PackedStream = ms;
|
||||
}
|
||||
else
|
||||
{
|
||||
#if !NO_CRYPTO
|
||||
fh.PackedStream = new RarCryptoWrapper(ms, Options.Password, fh.Salt);
|
||||
fh.PackedStream = new RarCryptoWrapper(ms, Options.Password, fh.Salt);
|
||||
#else
|
||||
throw new NotSupportedException("RarCrypto not supported");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
return fh;
|
||||
}
|
||||
case HeaderType.EndArchiveHeader:
|
||||
{
|
||||
return header.PromoteHeader<EndArchiveHeader>(reader);
|
||||
}
|
||||
{
|
||||
return header.PromoteHeader<EndArchiveHeader>(reader);
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid Rar Header: " + header.HeaderType);
|
||||
}
|
||||
{
|
||||
throw new InvalidFormatException("Invalid Rar Header: " + header.HeaderType);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,7 @@ namespace SharpCompress.Common.Rar
|
||||
internal abstract class RarFilePart : FilePart
|
||||
{
|
||||
internal RarFilePart(MarkHeader mh, FileHeader fh)
|
||||
: base(fh.ArchiveEncoding)
|
||||
{
|
||||
MarkHeader = mh;
|
||||
FileHeader = fh;
|
||||
|
||||
@@ -22,6 +22,13 @@ namespace SharpCompress.Common.SevenZip
|
||||
internal List<long> PackStreamStartPositions = new List<long>();
|
||||
internal List<int> FolderStartFileIndex = new List<int>();
|
||||
internal List<int> FileIndexToFolderIndexMap = new List<int>();
|
||||
|
||||
internal IPasswordProvider PasswordProvider { get; }
|
||||
|
||||
public ArchiveDatabase(IPasswordProvider passwordProvider)
|
||||
{
|
||||
PasswordProvider = passwordProvider;
|
||||
}
|
||||
|
||||
internal void Clear()
|
||||
{
|
||||
|
||||
@@ -182,7 +182,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
private DateTime? TranslateTime(long? time)
|
||||
{
|
||||
if (time.HasValue)
|
||||
if (time.HasValue && time.Value >= 0 && time.Value <= 2650467743999999999) //maximum Windows file time 31.12.9999
|
||||
{
|
||||
return TranslateTime(time.Value);
|
||||
}
|
||||
@@ -1211,7 +1211,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
public ArchiveDatabase ReadDatabase(IPasswordProvider pass)
|
||||
{
|
||||
var db = new ArchiveDatabase();
|
||||
var db = new ArchiveDatabase(pass);
|
||||
db.Clear();
|
||||
|
||||
db.MajorVersion = _header[6];
|
||||
@@ -1279,7 +1279,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
throw new InvalidOperationException();
|
||||
}
|
||||
|
||||
var dataVector = ReadAndDecodePackedStreams(db.StartPositionAfterHeader, pass);
|
||||
var dataVector = ReadAndDecodePackedStreams(db.StartPositionAfterHeader, db.PasswordProvider);
|
||||
|
||||
// compressed header without content is odd but ok
|
||||
if (dataVector.Count == 0)
|
||||
@@ -1301,7 +1301,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
}
|
||||
}
|
||||
|
||||
ReadHeader(db, pass);
|
||||
ReadHeader(db, db.PasswordProvider);
|
||||
}
|
||||
db.Fill();
|
||||
return db;
|
||||
@@ -1441,7 +1441,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
#endregion
|
||||
}
|
||||
|
||||
private Stream GetCachedDecoderStream(ArchiveDatabase _db, int folderIndex, IPasswordProvider pw)
|
||||
private Stream GetCachedDecoderStream(ArchiveDatabase _db, int folderIndex)
|
||||
{
|
||||
Stream s;
|
||||
if (!_cachedStreams.TryGetValue(folderIndex, out s))
|
||||
@@ -1456,13 +1456,13 @@ namespace SharpCompress.Common.SevenZip
|
||||
}
|
||||
|
||||
s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes.ToArray(), folderInfo,
|
||||
pw);
|
||||
_db.PasswordProvider);
|
||||
_cachedStreams.Add(folderIndex, s);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
public Stream OpenStream(ArchiveDatabase _db, int fileIndex, IPasswordProvider pw)
|
||||
public Stream OpenStream(ArchiveDatabase _db, int fileIndex)
|
||||
{
|
||||
int folderIndex = _db.FileIndexToFolderIndexMap[fileIndex];
|
||||
int numFilesInFolder = _db.NumUnpackStreamsVector[folderIndex];
|
||||
@@ -1479,12 +1479,12 @@ namespace SharpCompress.Common.SevenZip
|
||||
skipSize += _db.Files[firstFileIndex + i].Size;
|
||||
}
|
||||
|
||||
Stream s = GetCachedDecoderStream(_db, folderIndex, pw);
|
||||
Stream s = GetCachedDecoderStream(_db, folderIndex);
|
||||
s.Position = skipSize;
|
||||
return new ReadOnlySubStream(s, _db.Files[fileIndex].Size);
|
||||
}
|
||||
|
||||
public void Extract(ArchiveDatabase _db, int[] indices, IPasswordProvider pw)
|
||||
public void Extract(ArchiveDatabase _db, int[] indices)
|
||||
{
|
||||
int numItems;
|
||||
bool allFilesMode = (indices == null);
|
||||
@@ -1562,7 +1562,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
// TODO: If the decoding fails the last file may be extracted incompletely. Delete it?
|
||||
|
||||
Stream s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes.ToArray(),
|
||||
folderInfo, pw);
|
||||
folderInfo, _db.PasswordProvider);
|
||||
byte[] buffer = new byte[4 << 10];
|
||||
for (;;)
|
||||
{
|
||||
@@ -1588,4 +1588,4 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,14 +7,15 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
internal class SevenZipFilePart : FilePart
|
||||
{
|
||||
private CompressionType? type;
|
||||
private readonly Stream stream;
|
||||
private readonly ArchiveDatabase database;
|
||||
private CompressionType? _type;
|
||||
private readonly Stream _stream;
|
||||
private readonly ArchiveDatabase _database;
|
||||
|
||||
internal SevenZipFilePart(Stream stream, ArchiveDatabase database, int index, CFileItem fileEntry)
|
||||
internal SevenZipFilePart(Stream stream, ArchiveDatabase database, int index, CFileItem fileEntry, ArchiveEncoding archiveEncoding)
|
||||
: base(archiveEncoding)
|
||||
{
|
||||
this.stream = stream;
|
||||
this.database = database;
|
||||
this._stream = stream;
|
||||
this._database = database;
|
||||
Index = index;
|
||||
Header = fileEntry;
|
||||
if (Header.HasStream)
|
||||
@@ -41,14 +42,14 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
return null;
|
||||
}
|
||||
var folderStream = database.GetFolderStream(stream, Folder, null);
|
||||
var folderStream = _database.GetFolderStream(_stream, Folder, _database.PasswordProvider);
|
||||
|
||||
int firstFileIndex = database.FolderStartFileIndex[database.Folders.IndexOf(Folder)];
|
||||
int firstFileIndex = _database.FolderStartFileIndex[_database.Folders.IndexOf(Folder)];
|
||||
int skipCount = Index - firstFileIndex;
|
||||
long skipSize = 0;
|
||||
for (int i = 0; i < skipCount; i++)
|
||||
{
|
||||
skipSize += database.Files[firstFileIndex + i].Size;
|
||||
skipSize += _database.Files[firstFileIndex + i].Size;
|
||||
}
|
||||
if (skipSize > 0)
|
||||
{
|
||||
@@ -61,11 +62,11 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
get
|
||||
{
|
||||
if (type == null)
|
||||
if (_type == null)
|
||||
{
|
||||
type = GetCompression();
|
||||
_type = GetCompression();
|
||||
}
|
||||
return type.Value;
|
||||
return _type.Value;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +85,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
var coder = Folder.Coders.First();
|
||||
switch (coder.MethodId.Id)
|
||||
{
|
||||
{
|
||||
case k_LZMA:
|
||||
case k_LZMA2:
|
||||
{
|
||||
|
||||
@@ -9,6 +9,11 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
internal static readonly DateTime Epoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
public TarHeader(ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal string Name { get; set; }
|
||||
|
||||
//internal int Mode { get; set; }
|
||||
@@ -20,6 +25,7 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
internal DateTime LastModifiedTime { get; set; }
|
||||
internal EntryType EntryType { get; set; }
|
||||
internal Stream PackedStream { get; set; }
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
|
||||
internal const int BlockSize = 512;
|
||||
|
||||
@@ -31,7 +37,7 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
WriteOctalBytes(0, buffer, 108, 8); // owner ID
|
||||
WriteOctalBytes(0, buffer, 116, 8); // group ID
|
||||
|
||||
//Encoding.UTF8.GetBytes("magic").CopyTo(buffer, 257);
|
||||
//ArchiveEncoding.UTF8.GetBytes("magic").CopyTo(buffer, 257);
|
||||
if (Name.Length > 100)
|
||||
{
|
||||
// Set mock filename and filetype to indicate the next block is the actual name of the file
|
||||
@@ -72,7 +78,7 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
|
||||
private void WriteLongFilenameHeader(Stream output)
|
||||
{
|
||||
byte[] nameBytes = ArchiveEncoding.Default.GetBytes(Name);
|
||||
byte[] nameBytes = ArchiveEncoding.Encode(Name);
|
||||
output.Write(nameBytes, 0, nameBytes.Length);
|
||||
|
||||
// pad to multiple of BlockSize bytes, and make sure a terminating null is added
|
||||
@@ -99,7 +105,7 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
}
|
||||
else
|
||||
{
|
||||
Name = ArchiveEncoding.Default.GetString(buffer, 0, 100).TrimNulls();
|
||||
Name = ArchiveEncoding.Decode(buffer, 0, 100).TrimNulls();
|
||||
}
|
||||
|
||||
EntryType = ReadEntryType(buffer);
|
||||
@@ -111,12 +117,12 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
long unixTimeStamp = ReadASCIIInt64Base8(buffer, 136, 11);
|
||||
LastModifiedTime = Epoch.AddSeconds(unixTimeStamp).ToLocalTime();
|
||||
|
||||
Magic = ArchiveEncoding.Default.GetString(buffer, 257, 6).TrimNulls();
|
||||
Magic = ArchiveEncoding.Decode(buffer, 257, 6).TrimNulls();
|
||||
|
||||
if (!string.IsNullOrEmpty(Magic)
|
||||
&& "ustar".Equals(Magic))
|
||||
{
|
||||
string namePrefix = ArchiveEncoding.Default.GetString(buffer, 345, 157);
|
||||
string namePrefix = ArchiveEncoding.Decode(buffer, 345, 157);
|
||||
namePrefix = namePrefix.TrimNulls();
|
||||
if (!string.IsNullOrEmpty(namePrefix))
|
||||
{
|
||||
@@ -143,7 +149,7 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
reader.ReadBytes(remainingBytesToRead);
|
||||
}
|
||||
return ArchiveEncoding.Default.GetString(nameBytes, 0, nameBytes.Length).TrimNulls();
|
||||
return ArchiveEncoding.Decode(nameBytes, 0, nameBytes.Length).TrimNulls();
|
||||
}
|
||||
|
||||
private static EntryType ReadEntryType(byte[] buffer)
|
||||
|
||||
@@ -3,6 +3,7 @@ using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
@@ -43,9 +44,9 @@ namespace SharpCompress.Common.Tar
|
||||
internal override IEnumerable<FilePart> Parts => filePart.AsEnumerable<FilePart>();
|
||||
|
||||
internal static IEnumerable<TarEntry> GetEntries(StreamingMode mode, Stream stream,
|
||||
CompressionType compressionType)
|
||||
CompressionType compressionType, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
foreach (TarHeader h in TarHeaderFactory.ReadHeader(mode, stream))
|
||||
foreach (TarHeader h in TarHeaderFactory.ReadHeader(mode, stream, archiveEncoding))
|
||||
{
|
||||
if (h != null)
|
||||
{
|
||||
|
||||
@@ -6,11 +6,12 @@ namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal class TarFilePart : FilePart
|
||||
{
|
||||
private readonly Stream seekableStream;
|
||||
private readonly Stream _seekableStream;
|
||||
|
||||
internal TarFilePart(TarHeader header, Stream seekableStream)
|
||||
: base(header.ArchiveEncoding)
|
||||
{
|
||||
this.seekableStream = seekableStream;
|
||||
this._seekableStream = seekableStream;
|
||||
Header = header;
|
||||
}
|
||||
|
||||
@@ -20,10 +21,10 @@ namespace SharpCompress.Common.Tar
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (seekableStream != null)
|
||||
if (_seekableStream != null)
|
||||
{
|
||||
seekableStream.Position = Header.DataStartPosition.Value;
|
||||
return new ReadOnlySubStream(seekableStream, Header.Size);
|
||||
_seekableStream.Position = Header.DataStartPosition.Value;
|
||||
return new ReadOnlySubStream(_seekableStream, Header.Size);
|
||||
}
|
||||
return Header.PackedStream;
|
||||
}
|
||||
|
||||
@@ -2,12 +2,13 @@
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal static class TarHeaderFactory
|
||||
{
|
||||
internal static IEnumerable<TarHeader> ReadHeader(StreamingMode mode, Stream stream)
|
||||
internal static IEnumerable<TarHeader> ReadHeader(StreamingMode mode, Stream stream, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
@@ -15,7 +16,8 @@ namespace SharpCompress.Common.Tar
|
||||
try
|
||||
{
|
||||
BinaryReader reader = new BinaryReader(stream);
|
||||
header = new TarHeader();
|
||||
header = new TarHeader(archiveEncoding);
|
||||
|
||||
if (!header.Read(reader))
|
||||
{
|
||||
yield break;
|
||||
@@ -23,22 +25,22 @@ namespace SharpCompress.Common.Tar
|
||||
switch (mode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
header.DataStartPosition = reader.BaseStream.Position;
|
||||
{
|
||||
header.DataStartPosition = reader.BaseStream.Position;
|
||||
|
||||
//skip to nearest 512
|
||||
reader.BaseStream.Position += PadTo512(header.Size);
|
||||
}
|
||||
//skip to nearest 512
|
||||
reader.BaseStream.Position += PadTo512(header.Size);
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
header.PackedStream = new TarReadOnlySubStream(stream, header.Size);
|
||||
}
|
||||
{
|
||||
header.PackedStream = new TarReadOnlySubStream(stream, header.Size);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
}
|
||||
catch
|
||||
|
||||
@@ -6,8 +6,8 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal class DirectoryEntryHeader : ZipFileEntry
|
||||
{
|
||||
public DirectoryEntryHeader()
|
||||
: base(ZipHeaderType.DirectoryEntry)
|
||||
public DirectoryEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
: base(ZipHeaderType.DirectoryEntry, archiveEncoding)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -31,10 +31,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
RelativeOffsetOfEntryHeader = reader.ReadUInt32();
|
||||
|
||||
byte[] name = reader.ReadBytes(nameLength);
|
||||
Name = DecodeString(name);
|
||||
Name = ArchiveEncoding.Decode(name);
|
||||
byte[] extra = reader.ReadBytes(extraLength);
|
||||
byte[] comment = reader.ReadBytes(commentLength);
|
||||
Comment = DecodeString(comment);
|
||||
Comment = ArchiveEncoding.Decode(comment);
|
||||
LoadExtra(extra);
|
||||
|
||||
var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField);
|
||||
|
||||
@@ -5,6 +5,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
[Flags]
|
||||
internal enum HeaderFlags : ushort
|
||||
{
|
||||
None = 0,
|
||||
Encrypted = 1, // http://www.pkware.com/documents/casestudies/APPNOTE.TXT
|
||||
Bit1 = 2,
|
||||
Bit2 = 4,
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal class LocalEntryHeader : ZipFileEntry
|
||||
{
|
||||
public LocalEntryHeader()
|
||||
: base(ZipHeaderType.LocalEntry)
|
||||
public LocalEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
: base(ZipHeaderType.LocalEntry, archiveEncoding)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -24,7 +25,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
ushort extraLength = reader.ReadUInt16();
|
||||
byte[] name = reader.ReadBytes(nameLength);
|
||||
byte[] extra = reader.ReadBytes(extraLength);
|
||||
Name = DecodeString(name);
|
||||
Name = ArchiveEncoding.Decode(name);
|
||||
LoadExtra(extra);
|
||||
|
||||
var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField);
|
||||
|
||||
@@ -8,10 +8,11 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal abstract class ZipFileEntry : ZipHeader
|
||||
{
|
||||
protected ZipFileEntry(ZipHeaderType type)
|
||||
protected ZipFileEntry(ZipHeaderType type, ArchiveEncoding archiveEncoding)
|
||||
: base(type)
|
||||
{
|
||||
Extra = new List<ExtraData>();
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal bool IsDirectory
|
||||
@@ -29,28 +30,11 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
&& Name.EndsWith("\\");
|
||||
}
|
||||
}
|
||||
|
||||
protected string DecodeString(byte[] str)
|
||||
{
|
||||
if (FlagUtility.HasFlag(Flags, HeaderFlags.UTF8))
|
||||
{
|
||||
return Encoding.UTF8.GetString(str, 0, str.Length);
|
||||
}
|
||||
|
||||
return ArchiveEncoding.Default.GetString(str, 0, str.Length);
|
||||
}
|
||||
|
||||
protected byte[] EncodeString(string str)
|
||||
{
|
||||
if (FlagUtility.HasFlag(Flags, HeaderFlags.UTF8))
|
||||
{
|
||||
return Encoding.UTF8.GetBytes(str);
|
||||
}
|
||||
return ArchiveEncoding.Default.GetBytes(str);
|
||||
}
|
||||
|
||||
|
||||
internal Stream PackedStream { get; set; }
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
|
||||
internal string Name { get; set; }
|
||||
|
||||
internal HeaderFlags Flags { get; set; }
|
||||
@@ -64,7 +48,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
internal long UncompressedSize { get; set; }
|
||||
|
||||
internal List<ExtraData> Extra { get; set; }
|
||||
|
||||
|
||||
public string Password { get; set; }
|
||||
|
||||
internal PkwareTraditionalEncryptionData ComposeEncryptionData(Stream archiveStream)
|
||||
@@ -75,10 +59,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
}
|
||||
|
||||
var buffer = new byte[12];
|
||||
archiveStream.Read(buffer, 0, 12);
|
||||
archiveStream.ReadFully(buffer);
|
||||
|
||||
PkwareTraditionalEncryptionData encryptionData = PkwareTraditionalEncryptionData.ForRead(Password, this, buffer);
|
||||
|
||||
|
||||
return encryptionData;
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException("buffer");
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
byte[] temp = new byte[count];
|
||||
|
||||
@@ -9,9 +9,11 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
private static readonly CRC32 crc32 = new CRC32();
|
||||
private readonly UInt32[] _Keys = {0x12345678, 0x23456789, 0x34567890};
|
||||
private readonly ArchiveEncoding _archiveEncoding;
|
||||
|
||||
private PkwareTraditionalEncryptionData(string password)
|
||||
private PkwareTraditionalEncryptionData(string password, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
_archiveEncoding = archiveEncoding;
|
||||
Initialize(password);
|
||||
}
|
||||
|
||||
@@ -27,7 +29,7 @@ namespace SharpCompress.Common.Zip
|
||||
public static PkwareTraditionalEncryptionData ForRead(string password, ZipFileEntry header,
|
||||
byte[] encryptionHeader)
|
||||
{
|
||||
var encryptor = new PkwareTraditionalEncryptionData(password);
|
||||
var encryptor = new PkwareTraditionalEncryptionData(password, header.ArchiveEncoding);
|
||||
byte[] plainTextHeader = encryptor.Decrypt(encryptionHeader, encryptionHeader.Length);
|
||||
if (plainTextHeader[11] != (byte)((header.Crc >> 24) & 0xff))
|
||||
{
|
||||
@@ -47,7 +49,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
if (length > cipherText.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("length",
|
||||
throw new ArgumentOutOfRangeException(nameof(length),
|
||||
"Bad length during Decryption: the length parameter must be smaller than or equal to the size of the destination array.");
|
||||
}
|
||||
|
||||
@@ -70,7 +72,7 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (length > plainText.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("length",
|
||||
throw new ArgumentOutOfRangeException(nameof(length),
|
||||
"Bad length during Encryption: The length parameter must be smaller than or equal to the size of the destination array.");
|
||||
}
|
||||
|
||||
@@ -93,17 +95,12 @@ namespace SharpCompress.Common.Zip
|
||||
}
|
||||
}
|
||||
|
||||
internal static byte[] StringToByteArray(string value, Encoding encoding)
|
||||
internal byte[] StringToByteArray(string value)
|
||||
{
|
||||
byte[] a = encoding.GetBytes(value);
|
||||
byte[] a = _archiveEncoding.Password.GetBytes(value);
|
||||
return a;
|
||||
}
|
||||
|
||||
internal static byte[] StringToByteArray(string value)
|
||||
{
|
||||
return StringToByteArray(value, ArchiveEncoding.Password);
|
||||
}
|
||||
|
||||
private void UpdateKeys(byte byteValue)
|
||||
{
|
||||
_Keys[0] = (UInt32)crc32.ComputeCrc32((int)_Keys[0], byteValue);
|
||||
|
||||
@@ -5,21 +5,21 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class SeekableZipFilePart : ZipFilePart
|
||||
{
|
||||
private bool isLocalHeaderLoaded;
|
||||
private readonly SeekableZipHeaderFactory headerFactory;
|
||||
private bool _isLocalHeaderLoaded;
|
||||
private readonly SeekableZipHeaderFactory _headerFactory;
|
||||
|
||||
internal SeekableZipFilePart(SeekableZipHeaderFactory headerFactory, DirectoryEntryHeader header, Stream stream)
|
||||
: base(header, stream)
|
||||
{
|
||||
this.headerFactory = headerFactory;
|
||||
this._headerFactory = headerFactory;
|
||||
}
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (!isLocalHeaderLoaded)
|
||||
if (!_isLocalHeaderLoaded)
|
||||
{
|
||||
LoadLocalHeader();
|
||||
isLocalHeaderLoaded = true;
|
||||
_isLocalHeaderLoaded = true;
|
||||
}
|
||||
return base.GetCompressedStream();
|
||||
}
|
||||
@@ -29,7 +29,7 @@ namespace SharpCompress.Common.Zip
|
||||
private void LoadLocalHeader()
|
||||
{
|
||||
bool hasData = Header.HasData;
|
||||
Header = headerFactory.GetLocalHeader(BaseStream, Header as DirectoryEntryHeader);
|
||||
Header = _headerFactory.GetLocalHeader(BaseStream, Header as DirectoryEntryHeader);
|
||||
Header.HasData = hasData;
|
||||
}
|
||||
|
||||
|
||||
@@ -3,16 +3,17 @@ using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class SeekableZipHeaderFactory : ZipHeaderFactory
|
||||
{
|
||||
private const int MAX_ITERATIONS_FOR_DIRECTORY_HEADER = 4096;
|
||||
private bool zip64;
|
||||
private bool _zip64;
|
||||
|
||||
internal SeekableZipHeaderFactory(string password)
|
||||
: base(StreamingMode.Seekable, password)
|
||||
internal SeekableZipHeaderFactory(string password, ArchiveEncoding archiveEncoding)
|
||||
: base(StreamingMode.Seekable, password, archiveEncoding)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -26,14 +27,14 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (entry.IsZip64)
|
||||
{
|
||||
zip64 = true;
|
||||
_zip64 = true;
|
||||
SeekBackToHeader(stream, reader, ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR);
|
||||
var zip64Locator = new Zip64DirectoryEndLocatorHeader();
|
||||
zip64Locator.Read(reader);
|
||||
|
||||
stream.Seek(zip64Locator.RelativeOffsetOfTheEndOfDirectoryRecord, SeekOrigin.Begin);
|
||||
uint zip64Signature = reader.ReadUInt32();
|
||||
if(zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
|
||||
if (zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
|
||||
throw new ArchiveException("Failed to locate the Zip64 Header");
|
||||
|
||||
var zip64Entry = new Zip64DirectoryEndHeader();
|
||||
@@ -50,7 +51,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
stream.Position = position;
|
||||
uint signature = reader.ReadUInt32();
|
||||
var directoryEntryHeader = ReadHeader(signature, reader, zip64) as DirectoryEntryHeader;
|
||||
var directoryEntryHeader = ReadHeader(signature, reader, _zip64) as DirectoryEntryHeader;
|
||||
position = stream.Position;
|
||||
if (directoryEntryHeader == null)
|
||||
{
|
||||
@@ -91,7 +92,7 @@ namespace SharpCompress.Common.Zip
|
||||
stream.Seek(directoryEntryHeader.RelativeOffsetOfEntryHeader, SeekOrigin.Begin);
|
||||
BinaryReader reader = new BinaryReader(stream);
|
||||
uint signature = reader.ReadUInt32();
|
||||
var localEntryHeader = ReadHeader(signature, reader, zip64) as LocalEntryHeader;
|
||||
var localEntryHeader = ReadHeader(signature, reader, _zip64) as LocalEntryHeader;
|
||||
if (localEntryHeader == null)
|
||||
{
|
||||
throw new InvalidOperationException();
|
||||
|
||||
@@ -39,19 +39,20 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return new BinaryReader(rewindableStream);
|
||||
}
|
||||
if (Header.HasData)
|
||||
if (Header.HasData && !Skipped)
|
||||
{
|
||||
if (decompressionStream == null)
|
||||
{
|
||||
decompressionStream = GetCompressedStream();
|
||||
}
|
||||
decompressionStream.SkipAll();
|
||||
decompressionStream.Skip();
|
||||
|
||||
DeflateStream deflateStream = decompressionStream as DeflateStream;
|
||||
if (deflateStream != null)
|
||||
{
|
||||
rewindableStream.Rewind(deflateStream.InputBuffer);
|
||||
}
|
||||
Skipped = true;
|
||||
}
|
||||
var reader = new BinaryReader(rewindableStream);
|
||||
decompressionStream = null;
|
||||
|
||||
@@ -2,13 +2,14 @@
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal class StreamingZipHeaderFactory : ZipHeaderFactory
|
||||
{
|
||||
internal StreamingZipHeaderFactory(string password)
|
||||
: base(StreamingMode.Streaming, password)
|
||||
internal StreamingZipHeaderFactory(string password, ArchiveEncoding archiveEncoding)
|
||||
: base(StreamingMode.Streaming, password, archiveEncoding)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
//read out last 10 auth bytes
|
||||
var ten = new byte[10];
|
||||
stream.Read(ten, 0, 10);
|
||||
stream.ReadFully(ten);
|
||||
stream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,10 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return CompressionType.Deflate;
|
||||
}
|
||||
case ZipCompressionMethod.Deflate64:
|
||||
{
|
||||
return CompressionType.Deflate64;
|
||||
}
|
||||
case ZipCompressionMethod.LZMA:
|
||||
{
|
||||
return CompressionType.LZMA;
|
||||
|
||||
@@ -5,6 +5,7 @@ using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.BZip2;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Compressors.Deflate64;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
using SharpCompress.Compressors.PPMd;
|
||||
using SharpCompress.Converters;
|
||||
@@ -15,6 +16,7 @@ namespace SharpCompress.Common.Zip
|
||||
internal abstract class ZipFilePart : FilePart
|
||||
{
|
||||
internal ZipFilePart(ZipFileEntry header, Stream stream)
|
||||
: base(header.ArchiveEncoding)
|
||||
{
|
||||
Header = header;
|
||||
header.Part = this;
|
||||
@@ -65,6 +67,10 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return new DeflateStream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
case ZipCompressionMethod.Deflate64:
|
||||
{
|
||||
return new Deflate64Stream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
case ZipCompressionMethod.BZip2:
|
||||
{
|
||||
return new BZip2Stream(stream, CompressionMode.Decompress);
|
||||
@@ -88,7 +94,7 @@ namespace SharpCompress.Common.Zip
|
||||
case ZipCompressionMethod.PPMd:
|
||||
{
|
||||
var props = new byte[2];
|
||||
stream.Read(props, 0, props.Length);
|
||||
stream.ReadFully(props);
|
||||
return new PpmdStream(new PpmdProperties(props), stream, false);
|
||||
}
|
||||
case ZipCompressionMethod.WinzipAes:
|
||||
@@ -175,7 +181,6 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return plainStream;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ using System.Linq;
|
||||
#endif
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip
|
||||
{
|
||||
@@ -23,11 +24,13 @@ namespace SharpCompress.Common.Zip
|
||||
protected LocalEntryHeader lastEntryHeader;
|
||||
private readonly string password;
|
||||
private readonly StreamingMode mode;
|
||||
private readonly ArchiveEncoding archiveEncoding;
|
||||
|
||||
protected ZipHeaderFactory(StreamingMode mode, string password)
|
||||
protected ZipHeaderFactory(StreamingMode mode, string password, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
this.mode = mode;
|
||||
this.password = password;
|
||||
this.archiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
protected ZipHeader ReadHeader(uint headerBytes, BinaryReader reader, bool zip64 = false)
|
||||
@@ -36,7 +39,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
case ENTRY_HEADER_BYTES:
|
||||
{
|
||||
var entryHeader = new LocalEntryHeader();
|
||||
var entryHeader = new LocalEntryHeader(archiveEncoding);
|
||||
entryHeader.Read(reader);
|
||||
LoadHeader(entryHeader, reader.BaseStream);
|
||||
|
||||
@@ -45,48 +48,48 @@ namespace SharpCompress.Common.Zip
|
||||
}
|
||||
case DIRECTORY_START_HEADER_BYTES:
|
||||
{
|
||||
var entry = new DirectoryEntryHeader();
|
||||
var entry = new DirectoryEntryHeader(archiveEncoding);
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case POST_DATA_DESCRIPTOR:
|
||||
{
|
||||
if (FlagUtility.HasFlag(lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor))
|
||||
{
|
||||
lastEntryHeader.Crc = reader.ReadUInt32();
|
||||
lastEntryHeader.CompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
lastEntryHeader.UncompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
if (FlagUtility.HasFlag(lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor))
|
||||
{
|
||||
lastEntryHeader.Crc = reader.ReadUInt32();
|
||||
lastEntryHeader.CompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
lastEntryHeader.UncompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
|
||||
}
|
||||
else
|
||||
{
|
||||
reader.ReadBytes(zip64 ? 20 : 12);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
else
|
||||
{
|
||||
reader.ReadBytes(zip64 ? 20 : 12);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
case DIGITAL_SIGNATURE:
|
||||
return null;
|
||||
case DIRECTORY_END_HEADER_BYTES:
|
||||
{
|
||||
var entry = new DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case SPLIT_ARCHIVE_HEADER_BYTES:
|
||||
{
|
||||
return new SplitHeader();
|
||||
}
|
||||
{
|
||||
return new SplitHeader();
|
||||
}
|
||||
case ZIP64_END_OF_CENTRAL_DIRECTORY:
|
||||
{
|
||||
var entry = new Zip64DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new Zip64DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR:
|
||||
{
|
||||
var entry = new Zip64DirectoryEndLocatorHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new Zip64DirectoryEndLocatorHeader();
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
default:
|
||||
throw new NotSupportedException("Unknown header: " + headerBytes);
|
||||
}
|
||||
@@ -165,22 +168,22 @@ namespace SharpCompress.Common.Zip
|
||||
switch (mode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
entryHeader.DataStartPosition = stream.Position;
|
||||
stream.Position += entryHeader.CompressedSize;
|
||||
break;
|
||||
}
|
||||
{
|
||||
entryHeader.DataStartPosition = stream.Position;
|
||||
stream.Position += entryHeader.CompressedSize;
|
||||
break;
|
||||
}
|
||||
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
entryHeader.PackedStream = stream;
|
||||
break;
|
||||
}
|
||||
{
|
||||
entryHeader.PackedStream = stream;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
|
||||
//}
|
||||
|
||||
@@ -105,19 +105,19 @@ namespace SharpCompress.Compressors.ADC
|
||||
}
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException("buffer");
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
if (count < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
if (offset < buffer.GetLowerBound(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
if ((offset + count) > buffer.GetLength(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
|
||||
int size = -1;
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -36,9 +37,10 @@ namespace SharpCompress.Compressors.Deflate
|
||||
|
||||
public DeflateStream(Stream stream, CompressionMode mode,
|
||||
CompressionLevel level = CompressionLevel.Default,
|
||||
bool leaveOpen = false)
|
||||
bool leaveOpen = false,
|
||||
Encoding forceEncoding = null)
|
||||
{
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen);
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen, forceEncoding);
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
|
||||
@@ -30,41 +30,45 @@ using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
public class GZipStream : Stream
|
||||
{
|
||||
internal static readonly DateTime UnixEpoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
internal static readonly DateTime UNIX_EPOCH = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
public DateTime? LastModified { get; set; }
|
||||
|
||||
private string comment;
|
||||
private string fileName;
|
||||
private string _comment;
|
||||
private string _fileName;
|
||||
|
||||
internal ZlibBaseStream BaseStream;
|
||||
private bool disposed;
|
||||
private bool firstReadDone;
|
||||
private int headerByteCount;
|
||||
private bool _disposed;
|
||||
private bool _firstReadDone;
|
||||
private int _headerByteCount;
|
||||
|
||||
private readonly Encoding _encoding;
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, CompressionLevel.Default, false)
|
||||
: this(stream, mode, CompressionLevel.Default, false, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level)
|
||||
: this(stream, mode, level, false)
|
||||
: this(stream, mode, level, false, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
|
||||
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen, Encoding encoding)
|
||||
{
|
||||
BaseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen);
|
||||
BaseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen, encoding);
|
||||
_encoding = encoding;
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
@@ -74,7 +78,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
get => (BaseStream._flushMode);
|
||||
set
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -87,7 +91,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
get => BaseStream._bufferSize;
|
||||
set
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -123,7 +127,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
get
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -149,7 +153,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
get
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -179,7 +183,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Writer)
|
||||
{
|
||||
return BaseStream._z.TotalBytesOut + headerByteCount;
|
||||
return BaseStream._z.TotalBytesOut + _headerByteCount;
|
||||
}
|
||||
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Reader)
|
||||
{
|
||||
@@ -202,14 +206,14 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
try
|
||||
{
|
||||
if (!disposed)
|
||||
if (!_disposed)
|
||||
{
|
||||
if (disposing && (BaseStream != null))
|
||||
{
|
||||
BaseStream.Dispose();
|
||||
Crc32 = BaseStream.Crc32;
|
||||
}
|
||||
disposed = true;
|
||||
_disposed = true;
|
||||
}
|
||||
}
|
||||
finally
|
||||
@@ -223,7 +227,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// </summary>
|
||||
public override void Flush()
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -263,7 +267,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// <returns>the number of bytes actually read</returns>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -272,9 +276,9 @@ namespace SharpCompress.Compressors.Deflate
|
||||
// Console.WriteLine("GZipStream::Read(buffer, off({0}), c({1}) = {2}", offset, count, n);
|
||||
// Console.WriteLine( Util.FormatByteArray(buffer, offset, n) );
|
||||
|
||||
if (!firstReadDone)
|
||||
if (!_firstReadDone)
|
||||
{
|
||||
firstReadDone = true;
|
||||
_firstReadDone = true;
|
||||
FileName = BaseStream._GzipFileName;
|
||||
Comment = BaseStream._GzipComment;
|
||||
}
|
||||
@@ -325,7 +329,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
/// <param name="count">the number of bytes to write.</param>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
@@ -335,7 +339,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
if (BaseStream._wantCompress)
|
||||
{
|
||||
// first write in compression, therefore, emit the GZIP header
|
||||
headerByteCount = EmitHeader();
|
||||
_headerByteCount = EmitHeader();
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -346,56 +350,56 @@ namespace SharpCompress.Compressors.Deflate
|
||||
BaseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion Stream methods
|
||||
|
||||
public String Comment
|
||||
{
|
||||
get => comment;
|
||||
get => _comment;
|
||||
set
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
comment = value;
|
||||
_comment = value;
|
||||
}
|
||||
}
|
||||
|
||||
public string FileName
|
||||
{
|
||||
get => fileName;
|
||||
get => _fileName;
|
||||
set
|
||||
{
|
||||
if (disposed)
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
fileName = value;
|
||||
if (fileName == null)
|
||||
_fileName = value;
|
||||
if (_fileName == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (fileName.IndexOf("/") != -1)
|
||||
if (_fileName.IndexOf("/") != -1)
|
||||
{
|
||||
fileName = fileName.Replace("/", "\\");
|
||||
_fileName = _fileName.Replace("/", "\\");
|
||||
}
|
||||
if (fileName.EndsWith("\\"))
|
||||
if (_fileName.EndsWith("\\"))
|
||||
{
|
||||
throw new InvalidOperationException("Illegal filename");
|
||||
}
|
||||
|
||||
var index = fileName.IndexOf("\\");
|
||||
var index = _fileName.IndexOf("\\");
|
||||
if (index != -1)
|
||||
{
|
||||
// trim any leading path
|
||||
int length = fileName.Length;
|
||||
int length = _fileName.Length;
|
||||
int num = length;
|
||||
while (--num >= 0)
|
||||
{
|
||||
char c = fileName[num];
|
||||
char c = _fileName[num];
|
||||
if (c == '\\')
|
||||
{
|
||||
fileName = fileName.Substring(num + 1, length - num - 1);
|
||||
_fileName = _fileName.Substring(num + 1, length - num - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -406,8 +410,10 @@ namespace SharpCompress.Compressors.Deflate
|
||||
|
||||
private int EmitHeader()
|
||||
{
|
||||
byte[] commentBytes = (Comment == null) ? null : ArchiveEncoding.Default.GetBytes(Comment);
|
||||
byte[] filenameBytes = (FileName == null) ? null : ArchiveEncoding.Default.GetBytes(FileName);
|
||||
byte[] commentBytes = (Comment == null) ? null
|
||||
: _encoding.GetBytes(Comment);
|
||||
byte[] filenameBytes = (FileName == null) ? null
|
||||
: _encoding.GetBytes(FileName);
|
||||
|
||||
int cbLength = (Comment == null) ? 0 : commentBytes.Length + 1;
|
||||
int fnLength = (FileName == null) ? 0 : filenameBytes.Length + 1;
|
||||
@@ -440,7 +446,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
LastModified = DateTime.Now;
|
||||
}
|
||||
TimeSpan delta = LastModified.Value - UnixEpoch;
|
||||
TimeSpan delta = LastModified.Value - UNIX_EPOCH;
|
||||
var timet = (Int32)delta.TotalSeconds;
|
||||
DataConverter.LittleEndian.PutBytes(header, i, timet);
|
||||
i += 4;
|
||||
|
||||
@@ -418,7 +418,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
internal sealed class Adler
|
||||
{
|
||||
// largest prime smaller than 65536
|
||||
private static readonly int BASE = 65521;
|
||||
private static readonly uint BASE = 65521U;
|
||||
|
||||
// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
|
||||
private static readonly int NMAX = 5552;
|
||||
@@ -430,8 +430,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
return 1;
|
||||
}
|
||||
|
||||
int s1 = (int)(adler & 0xffff);
|
||||
int s2 = (int)((adler >> 16) & 0xffff);
|
||||
uint s1 = adler & 0xffffU;
|
||||
uint s2 = (adler >> 16) & 0xffffU;
|
||||
|
||||
while (len > 0)
|
||||
{
|
||||
@@ -486,7 +486,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
s1 %= BASE;
|
||||
s2 %= BASE;
|
||||
}
|
||||
return (uint)((s2 << 16) | s1);
|
||||
return (s2 << 16) | s1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +1,20 @@
|
||||
// ZlibBaseStream.cs
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
|
||||
// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
|
||||
// All rights reserved.
|
||||
//
|
||||
// This code module is part of DotNetZip, a zipfile class library.
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// This code is licensed under the Microsoft Public License.
|
||||
// This code is licensed under the Microsoft Public License.
|
||||
// See the file License.txt for the license details.
|
||||
// More info on: http://dotnetzip.codeplex.com
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
// last saved (in emacs):
|
||||
// last saved (in emacs):
|
||||
// Time-stamp: <2009-October-28 15:45:15>
|
||||
//
|
||||
// ------------------------------------------------------------------
|
||||
@@ -30,6 +30,7 @@ using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -64,6 +65,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
protected internal DateTime _GzipMtime;
|
||||
protected internal int _gzipHeaderByteCount;
|
||||
|
||||
private readonly Encoding _encoding;
|
||||
|
||||
internal int Crc32
|
||||
{
|
||||
get
|
||||
@@ -80,7 +83,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
CompressionMode compressionMode,
|
||||
CompressionLevel level,
|
||||
ZlibStreamFlavor flavor,
|
||||
bool leaveOpen)
|
||||
bool leaveOpen,
|
||||
Encoding encoding)
|
||||
{
|
||||
_flushMode = FlushType.None;
|
||||
|
||||
@@ -91,6 +95,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
_flavor = flavor;
|
||||
_level = level;
|
||||
|
||||
_encoding = encoding;
|
||||
|
||||
// workitem 7159
|
||||
if (flavor == ZlibStreamFlavor.GZIP)
|
||||
{
|
||||
@@ -418,8 +424,8 @@ namespace SharpCompress.Compressors.Deflate
|
||||
}
|
||||
}
|
||||
while (!done);
|
||||
byte[] a = list.ToArray();
|
||||
return ArchiveEncoding.Default.GetString(a, 0, a.Length);
|
||||
byte[] buffer = list.ToArray();
|
||||
return _encoding.GetString(buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
private int _ReadAndValidateGzipHeader()
|
||||
@@ -528,19 +534,19 @@ namespace SharpCompress.Compressors.Deflate
|
||||
}
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException("buffer");
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
if (count < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
if (offset < buffer.GetLowerBound(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
if ((offset + count) > buffer.GetLength(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("count");
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
|
||||
int rc = 0;
|
||||
@@ -593,7 +599,7 @@ namespace SharpCompress.Compressors.Deflate
|
||||
while (_z.AvailableBytesOut > 0 && !nomoreinput && rc == ZlibConstants.Z_OK);
|
||||
|
||||
// workitem 8557
|
||||
// is there more room in output?
|
||||
// is there more room in output?
|
||||
if (_z.AvailableBytesOut > 0)
|
||||
{
|
||||
if (rc == ZlibConstants.Z_OK && _z.AvailableBytesIn == 0)
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
@@ -36,23 +37,23 @@ namespace SharpCompress.Compressors.Deflate
|
||||
private bool _disposed;
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode)
|
||||
: this(stream, mode, CompressionLevel.Default, false)
|
||||
: this(stream, mode, CompressionLevel.Default, false, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level)
|
||||
: this(stream, mode, level, false)
|
||||
: this(stream, mode, level, false, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, bool leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen)
|
||||
: this(stream, mode, CompressionLevel.Default, leaveOpen, Encoding.UTF8)
|
||||
{
|
||||
}
|
||||
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
|
||||
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen, Encoding encoding)
|
||||
{
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen);
|
||||
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen, encoding);
|
||||
}
|
||||
|
||||
#region Zlib properties
|
||||
@@ -326,6 +327,6 @@ namespace SharpCompress.Compressors.Deflate
|
||||
_baseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion System.IO.Stream methods
|
||||
}
|
||||
}
|
||||
13
src/SharpCompress/Compressors/Deflate64/BlockType.cs
Normal file
13
src/SharpCompress/Compressors/Deflate64/BlockType.cs
Normal file
@@ -0,0 +1,13 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
internal enum BlockType
|
||||
{
|
||||
Uncompressed = 0,
|
||||
Static = 1,
|
||||
Dynamic = 2
|
||||
}
|
||||
}
|
||||
257
src/SharpCompress/Compressors/Deflate64/Deflate64Stream.cs
Normal file
257
src/SharpCompress/Compressors/Deflate64/Deflate64Stream.cs
Normal file
@@ -0,0 +1,257 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using SharpCompress.Common.Zip;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.IO;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
public sealed partial class Deflate64Stream : Stream
|
||||
{
|
||||
internal const int DefaultBufferSize = 8192;
|
||||
|
||||
private Stream _stream;
|
||||
private CompressionMode _mode;
|
||||
private bool _leaveOpen;
|
||||
private InflaterManaged _inflater;
|
||||
private byte[] _buffer;
|
||||
|
||||
public Deflate64Stream(Stream stream, CompressionMode mode,
|
||||
CompressionLevel level = CompressionLevel.Default,
|
||||
bool leaveOpen = false)
|
||||
{
|
||||
if (stream == null)
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
if (mode != CompressionMode.Decompress)
|
||||
throw new NotImplementedException("Deflate64: this implementation only supports decompression");
|
||||
if (!stream.CanRead)
|
||||
throw new ArgumentException("Deflate64: input stream is not readable", nameof(stream));
|
||||
|
||||
InitializeInflater(stream, leaveOpen, ZipCompressionMethod.Deflate64);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets up this DeflateManagedStream to be used for Inflation/Decompression
|
||||
/// </summary>
|
||||
internal void InitializeInflater(Stream stream, bool leaveOpen, ZipCompressionMethod method = ZipCompressionMethod.Deflate)
|
||||
{
|
||||
Debug.Assert(stream != null);
|
||||
Debug.Assert(method == ZipCompressionMethod.Deflate || method == ZipCompressionMethod.Deflate64);
|
||||
if (!stream.CanRead)
|
||||
throw new ArgumentException("Deflate64: input stream is not readable", nameof(stream));
|
||||
|
||||
_inflater = new InflaterManaged(method == ZipCompressionMethod.Deflate64);
|
||||
|
||||
_stream = stream;
|
||||
_mode = CompressionMode.Decompress;
|
||||
_leaveOpen = leaveOpen;
|
||||
_buffer = new byte[DefaultBufferSize];
|
||||
}
|
||||
|
||||
public override bool CanRead
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_stream == null)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return (_mode == CompressionMode.Decompress && _stream.CanRead);
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanWrite
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_stream == null)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return (_mode == CompressionMode.Compress && _stream.CanWrite);
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override long Length
|
||||
{
|
||||
get { throw new NotSupportedException("Deflate64: not supported"); }
|
||||
}
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get { throw new NotSupportedException("Deflate64: not supported"); }
|
||||
set { throw new NotSupportedException("Deflate64: not supported"); }
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException("Deflate64: not supported");
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException("Deflate64: not supported");
|
||||
}
|
||||
|
||||
public override int Read(byte[] array, int offset, int count)
|
||||
{
|
||||
EnsureDecompressionMode();
|
||||
ValidateParameters(array, offset, count);
|
||||
EnsureNotDisposed();
|
||||
|
||||
int bytesRead;
|
||||
int currentOffset = offset;
|
||||
int remainingCount = count;
|
||||
|
||||
while (true)
|
||||
{
|
||||
bytesRead = _inflater.Inflate(array, currentOffset, remainingCount);
|
||||
currentOffset += bytesRead;
|
||||
remainingCount -= bytesRead;
|
||||
|
||||
if (remainingCount == 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
if (_inflater.Finished())
|
||||
{
|
||||
// if we finished decompressing, we can't have anything left in the outputwindow.
|
||||
Debug.Assert(_inflater.AvailableOutput == 0, "We should have copied all stuff out!");
|
||||
break;
|
||||
}
|
||||
|
||||
int bytes = _stream.Read(_buffer, 0, _buffer.Length);
|
||||
if (bytes <= 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
else if (bytes > _buffer.Length)
|
||||
{
|
||||
// The stream is either malicious or poorly implemented and returned a number of
|
||||
// bytes larger than the buffer supplied to it.
|
||||
throw new InvalidDataException("Deflate64: invalid data");
|
||||
}
|
||||
|
||||
_inflater.SetInput(_buffer, 0, bytes);
|
||||
}
|
||||
|
||||
return count - remainingCount;
|
||||
}
|
||||
|
||||
private void ValidateParameters(byte[] array, int offset, int count)
|
||||
{
|
||||
if (array == null)
|
||||
throw new ArgumentNullException(nameof(array));
|
||||
|
||||
if (offset < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
|
||||
if (count < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
|
||||
if (array.Length - offset < count)
|
||||
throw new ArgumentException("Deflate64: invalid offset/count combination");
|
||||
}
|
||||
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (_stream == null)
|
||||
ThrowStreamClosedException();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.NoInlining)]
|
||||
private static void ThrowStreamClosedException()
|
||||
{
|
||||
throw new ObjectDisposedException(null, "Deflate64: stream has been disposed");
|
||||
}
|
||||
|
||||
private void EnsureDecompressionMode()
|
||||
{
|
||||
if (_mode != CompressionMode.Decompress)
|
||||
ThrowCannotReadFromDeflateManagedStreamException();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.NoInlining)]
|
||||
private static void ThrowCannotReadFromDeflateManagedStreamException()
|
||||
{
|
||||
throw new InvalidOperationException("Deflate64: cannot read from this stream");
|
||||
}
|
||||
|
||||
private void EnsureCompressionMode()
|
||||
{
|
||||
if (_mode != CompressionMode.Compress)
|
||||
ThrowCannotWriteToDeflateManagedStreamException();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.NoInlining)]
|
||||
private static void ThrowCannotWriteToDeflateManagedStreamException()
|
||||
{
|
||||
throw new InvalidOperationException("Deflate64: cannot write to this stream");
|
||||
}
|
||||
|
||||
public override void Write(byte[] array, int offset, int count)
|
||||
{
|
||||
ThrowCannotWriteToDeflateManagedStreamException();
|
||||
}
|
||||
|
||||
// This is called by Dispose:
|
||||
private void PurgeBuffers(bool disposing)
|
||||
{
|
||||
if (!disposing)
|
||||
return;
|
||||
|
||||
if (_stream == null)
|
||||
return;
|
||||
|
||||
Flush();
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
try
|
||||
{
|
||||
PurgeBuffers(disposing);
|
||||
}
|
||||
finally
|
||||
{
|
||||
// Close the underlying stream even if PurgeBuffers threw.
|
||||
// Stream.Close() may throw here (may or may not be due to the same error).
|
||||
// In this case, we still need to clean up internal resources, hence the inner finally blocks.
|
||||
try
|
||||
{
|
||||
if (disposing && !_leaveOpen && _stream != null)
|
||||
_stream.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
_stream = null;
|
||||
|
||||
try
|
||||
{
|
||||
_inflater?.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
_inflater = null;
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
43
src/SharpCompress/Compressors/Deflate64/DeflateInput.cs
Normal file
43
src/SharpCompress/Compressors/Deflate64/DeflateInput.cs
Normal file
@@ -0,0 +1,43 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using System.Diagnostics;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
internal sealed class DeflateInput
|
||||
{
|
||||
internal byte[] Buffer { get; set; }
|
||||
internal int Count { get; set; }
|
||||
internal int StartIndex { get; set; }
|
||||
|
||||
internal void ConsumeBytes(int n)
|
||||
{
|
||||
Debug.Assert(n <= Count, "Should use more bytes than what we have in the buffer");
|
||||
StartIndex += n;
|
||||
Count -= n;
|
||||
Debug.Assert(StartIndex + Count <= Buffer.Length, "Input buffer is in invalid state!");
|
||||
}
|
||||
|
||||
internal InputState DumpState() => new InputState(Count, StartIndex);
|
||||
|
||||
internal void RestoreState(InputState state)
|
||||
{
|
||||
Count = state._count;
|
||||
StartIndex = state._startIndex;
|
||||
}
|
||||
|
||||
internal /*readonly */struct InputState
|
||||
{
|
||||
internal readonly int _count;
|
||||
internal readonly int _startIndex;
|
||||
|
||||
internal InputState(int count, int startIndex)
|
||||
{
|
||||
_count = count;
|
||||
_startIndex = startIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
245
src/SharpCompress/Compressors/Deflate64/FastEncoderStatus.cs
Normal file
245
src/SharpCompress/Compressors/Deflate64/FastEncoderStatus.cs
Normal file
@@ -0,0 +1,245 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using System.Diagnostics;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
internal static class FastEncoderStatics
|
||||
{
|
||||
// static information for encoding, DO NOT MODIFY
|
||||
|
||||
internal static readonly byte[] FastEncoderTreeStructureData =
|
||||
{
|
||||
0xec,0xbd,0x07,0x60,0x1c,0x49,0x96,0x25,0x26,0x2f,0x6d,0xca,
|
||||
0x7b,0x7f,0x4a,0xf5,0x4a,0xd7,0xe0,0x74,0xa1,0x08,0x80,0x60,
|
||||
0x13,0x24,0xd8,0x90,0x40,0x10,0xec,0xc1,0x88,0xcd,0xe6,0x92,
|
||||
0xec,0x1d,0x69,0x47,0x23,0x29,0xab,0x2a,0x81,0xca,0x65,0x56,
|
||||
0x65,0x5d,0x66,0x16,0x40,0xcc,0xed,0x9d,0xbc,0xf7,0xde,0x7b,
|
||||
0xef,0xbd,0xf7,0xde,0x7b,0xef,0xbd,0xf7,0xba,0x3b,0x9d,0x4e,
|
||||
0x27,0xf7,0xdf,0xff,0x3f,0x5c,0x66,0x64,0x01,0x6c,0xf6,0xce,
|
||||
0x4a,0xda,0xc9,0x9e,0x21,0x80,0xaa,0xc8,0x1f,0x3f,0x7e,0x7c,
|
||||
0x1f,0x3f
|
||||
};
|
||||
|
||||
internal static readonly byte[] BFinalFastEncoderTreeStructureData =
|
||||
{
|
||||
0xed,0xbd,0x07,0x60,0x1c,0x49,0x96,0x25,0x26,0x2f,0x6d,0xca,
|
||||
0x7b,0x7f,0x4a,0xf5,0x4a,0xd7,0xe0,0x74,0xa1,0x08,0x80,0x60,
|
||||
0x13,0x24,0xd8,0x90,0x40,0x10,0xec,0xc1,0x88,0xcd,0xe6,0x92,
|
||||
0xec,0x1d,0x69,0x47,0x23,0x29,0xab,0x2a,0x81,0xca,0x65,0x56,
|
||||
0x65,0x5d,0x66,0x16,0x40,0xcc,0xed,0x9d,0xbc,0xf7,0xde,0x7b,
|
||||
0xef,0xbd,0xf7,0xde,0x7b,0xef,0xbd,0xf7,0xba,0x3b,0x9d,0x4e,
|
||||
0x27,0xf7,0xdf,0xff,0x3f,0x5c,0x66,0x64,0x01,0x6c,0xf6,0xce,
|
||||
0x4a,0xda,0xc9,0x9e,0x21,0x80,0xaa,0xc8,0x1f,0x3f,0x7e,0x7c,
|
||||
0x1f,0x3f
|
||||
};
|
||||
|
||||
// Output a currentMatch with length matchLen (>= MIN_MATCH) and displacement matchPos
|
||||
//
|
||||
// Optimisation: unlike the other encoders, here we have an array of codes for each currentMatch
|
||||
// length (not just each currentMatch length slot), complete with all the extra bits filled in, in
|
||||
// a single array element.
|
||||
//
|
||||
// There are many advantages to doing this:
|
||||
//
|
||||
// 1. A single array lookup on g_FastEncoderLiteralCodeInfo, instead of separate array lookups
|
||||
// on g_LengthLookup (to get the length slot), g_FastEncoderLiteralTreeLength,
|
||||
// g_FastEncoderLiteralTreeCode, g_ExtraLengthBits, and g_BitMask
|
||||
//
|
||||
// 2. The array is an array of ULONGs, so no access penalty, unlike for accessing those USHORT
|
||||
// code arrays in the other encoders (although they could be made into ULONGs with some
|
||||
// modifications to the source).
|
||||
//
|
||||
// Note, if we could guarantee that codeLen <= 16 always, then we could skip an if statement here.
|
||||
//
|
||||
// A completely different optimisation is used for the distance codes since, obviously, a table for
|
||||
// all 8192 distances combining their extra bits is not feasible. The distance codeinfo table is
|
||||
// made up of code[], len[] and # extraBits for this code.
|
||||
//
|
||||
// The advantages are similar to the above; a ULONG array instead of a USHORT and BYTE array, better
|
||||
// cache locality, fewer memory operations.
|
||||
//
|
||||
|
||||
|
||||
// Encoding information for literal and Length.
|
||||
// The least 5 significant bits are the length
|
||||
// and the rest is the code bits.
|
||||
|
||||
internal static readonly uint[] FastEncoderLiteralCodeInfo =
|
||||
{
|
||||
0x0000d7ee,0x0004d7ee,0x0002d7ee,0x0006d7ee,0x0001d7ee,0x0005d7ee,0x0003d7ee,
|
||||
0x0007d7ee,0x000037ee,0x0000c7ec,0x00000126,0x000437ee,0x000237ee,0x000637ee,
|
||||
0x000137ee,0x000537ee,0x000337ee,0x000737ee,0x0000b7ee,0x0004b7ee,0x0002b7ee,
|
||||
0x0006b7ee,0x0001b7ee,0x0005b7ee,0x0003b7ee,0x0007b7ee,0x000077ee,0x000477ee,
|
||||
0x000277ee,0x000677ee,0x000017ed,0x000177ee,0x00000526,0x000577ee,0x000023ea,
|
||||
0x0001c7ec,0x000377ee,0x000777ee,0x000217ed,0x000063ea,0x00000b68,0x00000ee9,
|
||||
0x00005beb,0x000013ea,0x00000467,0x00001b68,0x00000c67,0x00002ee9,0x00000768,
|
||||
0x00001768,0x00000f68,0x00001ee9,0x00001f68,0x00003ee9,0x000053ea,0x000001e9,
|
||||
0x000000e8,0x000021e9,0x000011e9,0x000010e8,0x000031e9,0x000033ea,0x000008e8,
|
||||
0x0000f7ee,0x0004f7ee,0x000018e8,0x000009e9,0x000004e8,0x000029e9,0x000014e8,
|
||||
0x000019e9,0x000073ea,0x0000dbeb,0x00000ce8,0x00003beb,0x0002f7ee,0x000039e9,
|
||||
0x00000bea,0x000005e9,0x00004bea,0x000025e9,0x000027ec,0x000015e9,0x000035e9,
|
||||
0x00000de9,0x00002bea,0x000127ec,0x0000bbeb,0x0006f7ee,0x0001f7ee,0x0000a7ec,
|
||||
0x00007beb,0x0005f7ee,0x0000fbeb,0x0003f7ee,0x0007f7ee,0x00000fee,0x00000326,
|
||||
0x00000267,0x00000a67,0x00000667,0x00000726,0x00001ce8,0x000002e8,0x00000e67,
|
||||
0x000000a6,0x0001a7ec,0x00002de9,0x000004a6,0x00000167,0x00000967,0x000002a6,
|
||||
0x00000567,0x000117ed,0x000006a6,0x000001a6,0x000005a6,0x00000d67,0x000012e8,
|
||||
0x00000ae8,0x00001de9,0x00001ae8,0x000007eb,0x000317ed,0x000067ec,0x000097ed,
|
||||
0x000297ed,0x00040fee,0x00020fee,0x00060fee,0x00010fee,0x00050fee,0x00030fee,
|
||||
0x00070fee,0x00008fee,0x00048fee,0x00028fee,0x00068fee,0x00018fee,0x00058fee,
|
||||
0x00038fee,0x00078fee,0x00004fee,0x00044fee,0x00024fee,0x00064fee,0x00014fee,
|
||||
0x00054fee,0x00034fee,0x00074fee,0x0000cfee,0x0004cfee,0x0002cfee,0x0006cfee,
|
||||
0x0001cfee,0x0005cfee,0x0003cfee,0x0007cfee,0x00002fee,0x00042fee,0x00022fee,
|
||||
0x00062fee,0x00012fee,0x00052fee,0x00032fee,0x00072fee,0x0000afee,0x0004afee,
|
||||
0x0002afee,0x0006afee,0x0001afee,0x0005afee,0x0003afee,0x0007afee,0x00006fee,
|
||||
0x00046fee,0x00026fee,0x00066fee,0x00016fee,0x00056fee,0x00036fee,0x00076fee,
|
||||
0x0000efee,0x0004efee,0x0002efee,0x0006efee,0x0001efee,0x0005efee,0x0003efee,
|
||||
0x0007efee,0x00001fee,0x00041fee,0x00021fee,0x00061fee,0x00011fee,0x00051fee,
|
||||
0x00031fee,0x00071fee,0x00009fee,0x00049fee,0x00029fee,0x00069fee,0x00019fee,
|
||||
0x00059fee,0x00039fee,0x00079fee,0x00005fee,0x00045fee,0x00025fee,0x00065fee,
|
||||
0x00015fee,0x00055fee,0x00035fee,0x00075fee,0x0000dfee,0x0004dfee,0x0002dfee,
|
||||
0x0006dfee,0x0001dfee,0x0005dfee,0x0003dfee,0x0007dfee,0x00003fee,0x00043fee,
|
||||
0x00023fee,0x00063fee,0x00013fee,0x00053fee,0x00033fee,0x00073fee,0x0000bfee,
|
||||
0x0004bfee,0x0002bfee,0x0006bfee,0x0001bfee,0x0005bfee,0x0003bfee,0x0007bfee,
|
||||
0x00007fee,0x00047fee,0x00027fee,0x00067fee,0x00017fee,0x000197ed,0x000397ed,
|
||||
0x000057ed,0x00057fee,0x000257ed,0x00037fee,0x000157ed,0x00077fee,0x000357ed,
|
||||
0x0000ffee,0x0004ffee,0x0002ffee,0x0006ffee,0x0001ffee,0x00000084,0x00000003,
|
||||
0x00000184,0x00000044,0x00000144,0x000000c5,0x000002c5,0x000001c5,0x000003c6,
|
||||
0x000007c6,0x00000026,0x00000426,0x000003a7,0x00000ba7,0x000007a7,0x00000fa7,
|
||||
0x00000227,0x00000627,0x00000a27,0x00000e27,0x00000068,0x00000868,0x00001068,
|
||||
0x00001868,0x00000369,0x00001369,0x00002369,0x00003369,0x000006ea,0x000026ea,
|
||||
0x000046ea,0x000066ea,0x000016eb,0x000036eb,0x000056eb,0x000076eb,0x000096eb,
|
||||
0x0000b6eb,0x0000d6eb,0x0000f6eb,0x00003dec,0x00007dec,0x0000bdec,0x0000fdec,
|
||||
0x00013dec,0x00017dec,0x0001bdec,0x0001fdec,0x00006bed,0x0000ebed,0x00016bed,
|
||||
0x0001ebed,0x00026bed,0x0002ebed,0x00036bed,0x0003ebed,0x000003ec,0x000043ec,
|
||||
0x000083ec,0x0000c3ec,0x000103ec,0x000143ec,0x000183ec,0x0001c3ec,0x00001bee,
|
||||
0x00009bee,0x00011bee,0x00019bee,0x00021bee,0x00029bee,0x00031bee,0x00039bee,
|
||||
0x00041bee,0x00049bee,0x00051bee,0x00059bee,0x00061bee,0x00069bee,0x00071bee,
|
||||
0x00079bee,0x000167f0,0x000367f0,0x000567f0,0x000767f0,0x000967f0,0x000b67f0,
|
||||
0x000d67f0,0x000f67f0,0x001167f0,0x001367f0,0x001567f0,0x001767f0,0x001967f0,
|
||||
0x001b67f0,0x001d67f0,0x001f67f0,0x000087ef,0x000187ef,0x000287ef,0x000387ef,
|
||||
0x000487ef,0x000587ef,0x000687ef,0x000787ef,0x000887ef,0x000987ef,0x000a87ef,
|
||||
0x000b87ef,0x000c87ef,0x000d87ef,0x000e87ef,0x000f87ef,0x0000e7f0,0x0002e7f0,
|
||||
0x0004e7f0,0x0006e7f0,0x0008e7f0,0x000ae7f0,0x000ce7f0,0x000ee7f0,0x0010e7f0,
|
||||
0x0012e7f0,0x0014e7f0,0x0016e7f0,0x0018e7f0,0x001ae7f0,0x001ce7f0,0x001ee7f0,
|
||||
0x0005fff3,0x000dfff3,0x0015fff3,0x001dfff3,0x0025fff3,0x002dfff3,0x0035fff3,
|
||||
0x003dfff3,0x0045fff3,0x004dfff3,0x0055fff3,0x005dfff3,0x0065fff3,0x006dfff3,
|
||||
0x0075fff3,0x007dfff3,0x0085fff3,0x008dfff3,0x0095fff3,0x009dfff3,0x00a5fff3,
|
||||
0x00adfff3,0x00b5fff3,0x00bdfff3,0x00c5fff3,0x00cdfff3,0x00d5fff3,0x00ddfff3,
|
||||
0x00e5fff3,0x00edfff3,0x00f5fff3,0x00fdfff3,0x0003fff3,0x000bfff3,0x0013fff3,
|
||||
0x001bfff3,0x0023fff3,0x002bfff3,0x0033fff3,0x003bfff3,0x0043fff3,0x004bfff3,
|
||||
0x0053fff3,0x005bfff3,0x0063fff3,0x006bfff3,0x0073fff3,0x007bfff3,0x0083fff3,
|
||||
0x008bfff3,0x0093fff3,0x009bfff3,0x00a3fff3,0x00abfff3,0x00b3fff3,0x00bbfff3,
|
||||
0x00c3fff3,0x00cbfff3,0x00d3fff3,0x00dbfff3,0x00e3fff3,0x00ebfff3,0x00f3fff3,
|
||||
0x00fbfff3,0x0007fff3,0x000ffff3,0x0017fff3,0x001ffff3,0x0027fff3,0x002ffff3,
|
||||
0x0037fff3,0x003ffff3,0x0047fff3,0x004ffff3,0x0057fff3,0x005ffff3,0x0067fff3,
|
||||
0x006ffff3,0x0077fff3,0x007ffff3,0x0087fff3,0x008ffff3,0x0097fff3,0x009ffff3,
|
||||
0x00a7fff3,0x00affff3,0x00b7fff3,0x00bffff3,0x00c7fff3,0x00cffff3,0x00d7fff3,
|
||||
0x00dffff3,0x00e7fff3,0x00effff3,0x00f7fff3,0x00fffff3,0x0001e7f1,0x0003e7f1,
|
||||
0x0005e7f1,0x0007e7f1,0x0009e7f1,0x000be7f1,0x000de7f1,0x000fe7f1,0x0011e7f1,
|
||||
0x0013e7f1,0x0015e7f1,0x0017e7f1,0x0019e7f1,0x001be7f1,0x001de7f1,0x001fe7f1,
|
||||
0x0021e7f1,0x0023e7f1,0x0025e7f1,0x0027e7f1,0x0029e7f1,0x002be7f1,0x002de7f1,
|
||||
0x002fe7f1,0x0031e7f1,0x0033e7f1,0x0035e7f1,0x0037e7f1,0x0039e7f1,0x003be7f1,
|
||||
0x003de7f1,0x000047eb
|
||||
};
|
||||
|
||||
internal static readonly uint[] FastEncoderDistanceCodeInfo =
|
||||
{
|
||||
0x00000f06,0x0001ff0a,0x0003ff0b,0x0007ff0b,0x0000ff19,0x00003f18,0x0000bf28,
|
||||
0x00007f28,0x00001f37,0x00005f37,0x00000d45,0x00002f46,0x00000054,0x00001d55,
|
||||
0x00000864,0x00000365,0x00000474,0x00001375,0x00000c84,0x00000284,0x00000a94,
|
||||
0x00000694,0x00000ea4,0x000001a4,0x000009b4,0x00000bb5,0x000005c4,0x00001bc5,
|
||||
0x000007d5,0x000017d5,0x00000000,0x00000100
|
||||
};
|
||||
|
||||
internal static readonly uint[] BitMask = { 0, 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767 };
|
||||
internal static readonly byte[] ExtraLengthBits = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 };
|
||||
internal static readonly byte[] ExtraDistanceBits = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 0, 0 };
|
||||
internal const int NumChars = 256;
|
||||
internal const int NumLengthBaseCodes = 29;
|
||||
internal const int NumDistBaseCodes = 30;
|
||||
|
||||
internal const uint FastEncoderPostTreeBitBuf = 0x0022;
|
||||
internal const int FastEncoderPostTreeBitCount = 9;
|
||||
|
||||
internal const uint NoCompressionHeader = 0x0;
|
||||
internal const int NoCompressionHeaderBitCount = 3;
|
||||
internal const uint BFinalNoCompressionHeader = 0x1;
|
||||
internal const int BFinalNoCompressionHeaderBitCount = 3;
|
||||
internal const int MaxCodeLen = 16;
|
||||
|
||||
private static readonly byte[] s_distLookup = CreateDistanceLookup();
|
||||
|
||||
private static byte[] CreateDistanceLookup()
|
||||
{
|
||||
byte[] result = new byte[512];
|
||||
|
||||
// Generate the global slot tables which allow us to convert a distance
|
||||
// (0..32K) to a distance slot (0..29)
|
||||
//
|
||||
// Distance table
|
||||
// Extra Extra Extra
|
||||
// Code Bits Dist Code Bits Dist Code Bits Distance
|
||||
// ---- ---- ---- ---- ---- ------ ---- ---- --------
|
||||
// 0 0 1 10 4 33-48 20 9 1025-1536
|
||||
// 1 0 2 11 4 49-64 21 9 1537-2048
|
||||
// 2 0 3 12 5 65-96 22 10 2049-3072
|
||||
// 3 0 4 13 5 97-128 23 10 3073-4096
|
||||
// 4 1 5,6 14 6 129-192 24 11 4097-6144
|
||||
// 5 1 7,8 15 6 193-256 25 11 6145-8192
|
||||
// 6 2 9-12 16 7 257-384 26 12 8193-12288
|
||||
// 7 2 13-16 17 7 385-512 27 12 12289-16384
|
||||
// 8 3 17-24 18 8 513-768 28 13 16385-24576
|
||||
// 9 3 25-32 19 8 769-1024 29 13 24577-32768
|
||||
|
||||
// Initialize the mapping length (0..255) -> length code (0..28)
|
||||
//int length = 0;
|
||||
//for (code = 0; code < FastEncoderStatics.NumLengthBaseCodes-1; code++) {
|
||||
// for (int n = 0; n < (1 << FastEncoderStatics.ExtraLengthBits[code]); n++)
|
||||
// lengthLookup[length++] = (byte) code;
|
||||
//}
|
||||
//lengthLookup[length-1] = (byte) code;
|
||||
|
||||
// Initialize the mapping dist (0..32K) -> dist code (0..29)
|
||||
int dist = 0;
|
||||
int code;
|
||||
for (code = 0; code < 16; code++)
|
||||
{
|
||||
for (int n = 0; n < (1 << ExtraDistanceBits[code]); n++)
|
||||
result[dist++] = (byte)code;
|
||||
}
|
||||
|
||||
dist >>= 7; // from now on, all distances are divided by 128
|
||||
|
||||
for (; code < NumDistBaseCodes; code++)
|
||||
{
|
||||
for (int n = 0; n < (1 << (ExtraDistanceBits[code] - 7)); n++)
|
||||
result[256 + dist++] = (byte)code;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Return the position slot (0...29) of a match offset (0...32767)
|
||||
internal static int GetSlot(int pos) =>
|
||||
s_distLookup[((pos) < 256) ? (pos) : (256 + ((pos) >> 7))];
|
||||
|
||||
// Reverse 'length' of the bits in code
|
||||
public static uint BitReverse(uint code, int length)
|
||||
{
|
||||
uint new_code = 0;
|
||||
|
||||
Debug.Assert(length > 0 && length <= 16, "Invalid len");
|
||||
do
|
||||
{
|
||||
new_code |= (code & 1);
|
||||
new_code <<= 1;
|
||||
code >>= 1;
|
||||
} while (--length > 0);
|
||||
|
||||
return new_code >> 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
311
src/SharpCompress/Compressors/Deflate64/HuffmanTree.cs
Normal file
311
src/SharpCompress/Compressors/Deflate64/HuffmanTree.cs
Normal file
@@ -0,0 +1,311 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using System.Diagnostics;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
// Strictly speaking this class is not a HuffmanTree, this class is
|
||||
// a lookup table combined with a HuffmanTree. The idea is to speed up
|
||||
// the lookup for short symbols (they should appear more frequently ideally.)
|
||||
// However we don't want to create a huge table since it might take longer to
|
||||
// build the table than decoding (Deflate usually generates new tables frequently.)
|
||||
//
|
||||
// Jean-loup Gailly and Mark Adler gave a very good explanation about this.
|
||||
// The full text (algorithm.txt) can be found inside
|
||||
// ftp://ftp.uu.net/pub/archiving/zip/zlib/zlib.zip.
|
||||
//
|
||||
// Following paper explains decoding in details:
|
||||
// Hirschberg and Lelewer, "Efficient decoding of prefix codes,"
|
||||
// Comm. ACM, 33,4, April 1990, pp. 449-459.
|
||||
//
|
||||
|
||||
internal sealed class HuffmanTree
|
||||
{
|
||||
internal const int MaxLiteralTreeElements = 288;
|
||||
internal const int MaxDistTreeElements = 32;
|
||||
internal const int EndOfBlockCode = 256;
|
||||
internal const int NumberOfCodeLengthTreeElements = 19;
|
||||
|
||||
private readonly int _tableBits;
|
||||
private readonly short[] _table;
|
||||
private readonly short[] _left;
|
||||
private readonly short[] _right;
|
||||
private readonly byte[] _codeLengthArray;
|
||||
#if DEBUG
|
||||
private uint[] _codeArrayDebug;
|
||||
#endif
|
||||
|
||||
private readonly int _tableMask;
|
||||
|
||||
// huffman tree for static block
|
||||
public static HuffmanTree StaticLiteralLengthTree { get; } = new HuffmanTree(GetStaticLiteralTreeLength());
|
||||
|
||||
public static HuffmanTree StaticDistanceTree { get; } = new HuffmanTree(GetStaticDistanceTreeLength());
|
||||
|
||||
public HuffmanTree(byte[] codeLengths)
|
||||
{
|
||||
Debug.Assert(
|
||||
codeLengths.Length == MaxLiteralTreeElements ||
|
||||
codeLengths.Length == MaxDistTreeElements ||
|
||||
codeLengths.Length == NumberOfCodeLengthTreeElements,
|
||||
"we only expect three kinds of Length here");
|
||||
_codeLengthArray = codeLengths;
|
||||
|
||||
if (_codeLengthArray.Length == MaxLiteralTreeElements)
|
||||
{
|
||||
// bits for Literal/Length tree table
|
||||
_tableBits = 9;
|
||||
}
|
||||
else
|
||||
{
|
||||
// bits for distance tree table and code length tree table
|
||||
_tableBits = 7;
|
||||
}
|
||||
_tableMask = (1 << _tableBits) - 1;
|
||||
|
||||
_table = new short[1 << _tableBits];
|
||||
|
||||
// I need to find proof that left and right array will always be
|
||||
// enough. I think they are.
|
||||
_left = new short[2 * _codeLengthArray.Length];
|
||||
_right = new short[2 * _codeLengthArray.Length];
|
||||
|
||||
CreateTable();
|
||||
}
|
||||
|
||||
// Generate the array contains huffman codes lengths for static huffman tree.
|
||||
// The data is in RFC 1951.
|
||||
private static byte[] GetStaticLiteralTreeLength()
|
||||
{
|
||||
byte[] literalTreeLength = new byte[MaxLiteralTreeElements];
|
||||
for (int i = 0; i <= 143; i++)
|
||||
literalTreeLength[i] = 8;
|
||||
|
||||
for (int i = 144; i <= 255; i++)
|
||||
literalTreeLength[i] = 9;
|
||||
|
||||
for (int i = 256; i <= 279; i++)
|
||||
literalTreeLength[i] = 7;
|
||||
|
||||
for (int i = 280; i <= 287; i++)
|
||||
literalTreeLength[i] = 8;
|
||||
|
||||
return literalTreeLength;
|
||||
}
|
||||
|
||||
private static byte[] GetStaticDistanceTreeLength()
|
||||
{
|
||||
byte[] staticDistanceTreeLength = new byte[MaxDistTreeElements];
|
||||
for (int i = 0; i < MaxDistTreeElements; i++)
|
||||
{
|
||||
staticDistanceTreeLength[i] = 5;
|
||||
}
|
||||
return staticDistanceTreeLength;
|
||||
}
|
||||
|
||||
// Calculate the huffman code for each character based on the code length for each character.
|
||||
// This algorithm is described in standard RFC 1951
|
||||
private uint[] CalculateHuffmanCode()
|
||||
{
|
||||
uint[] bitLengthCount = new uint[17];
|
||||
foreach (int codeLength in _codeLengthArray)
|
||||
{
|
||||
bitLengthCount[codeLength]++;
|
||||
}
|
||||
bitLengthCount[0] = 0; // clear count for length 0
|
||||
|
||||
uint[] nextCode = new uint[17];
|
||||
uint tempCode = 0;
|
||||
for (int bits = 1; bits <= 16; bits++)
|
||||
{
|
||||
tempCode = (tempCode + bitLengthCount[bits - 1]) << 1;
|
||||
nextCode[bits] = tempCode;
|
||||
}
|
||||
|
||||
uint[] code = new uint[MaxLiteralTreeElements];
|
||||
for (int i = 0; i < _codeLengthArray.Length; i++)
|
||||
{
|
||||
int len = _codeLengthArray[i];
|
||||
|
||||
if (len > 0)
|
||||
{
|
||||
code[i] = FastEncoderStatics.BitReverse(nextCode[len], len);
|
||||
nextCode[len]++;
|
||||
}
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
private void CreateTable()
|
||||
{
|
||||
uint[] codeArray = CalculateHuffmanCode();
|
||||
#if DEBUG
|
||||
_codeArrayDebug = codeArray;
|
||||
#endif
|
||||
|
||||
short avail = (short)_codeLengthArray.Length;
|
||||
|
||||
for (int ch = 0; ch < _codeLengthArray.Length; ch++)
|
||||
{
|
||||
// length of this code
|
||||
int len = _codeLengthArray[ch];
|
||||
if (len > 0)
|
||||
{
|
||||
// start value (bit reversed)
|
||||
int start = (int)codeArray[ch];
|
||||
|
||||
if (len <= _tableBits)
|
||||
{
|
||||
// If a particular symbol is shorter than nine bits,
|
||||
// then that symbol's translation is duplicated
|
||||
// in all those entries that start with that symbol's bits.
|
||||
// For example, if the symbol is four bits, then it's duplicated
|
||||
// 32 times in a nine-bit table. If a symbol is nine bits long,
|
||||
// it appears in the table once.
|
||||
//
|
||||
// Make sure that in the loop below, code is always
|
||||
// less than table_size.
|
||||
//
|
||||
// On last iteration we store at array index:
|
||||
// initial_start_at + (locs-1)*increment
|
||||
// = initial_start_at + locs*increment - increment
|
||||
// = initial_start_at + (1 << tableBits) - increment
|
||||
// = initial_start_at + table_size - increment
|
||||
//
|
||||
// Therefore we must ensure:
|
||||
// initial_start_at + table_size - increment < table_size
|
||||
// or: initial_start_at < increment
|
||||
//
|
||||
int increment = 1 << len;
|
||||
if (start >= increment)
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: invalid Huffman data");
|
||||
}
|
||||
|
||||
// Note the bits in the table are reverted.
|
||||
int locs = 1 << (_tableBits - len);
|
||||
for (int j = 0; j < locs; j++)
|
||||
{
|
||||
_table[start] = (short)ch;
|
||||
start += increment;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// For any code which has length longer than num_elements,
|
||||
// build a binary tree.
|
||||
|
||||
int overflowBits = len - _tableBits; // the nodes we need to respent the data.
|
||||
int codeBitMask = 1 << _tableBits; // mask to get current bit (the bits can't fit in the table)
|
||||
|
||||
// the left, right table is used to repesent the
|
||||
// the rest bits. When we got the first part (number bits.) and look at
|
||||
// tbe table, we will need to follow the tree to find the real character.
|
||||
// This is in place to avoid bloating the table if there are
|
||||
// a few ones with long code.
|
||||
int index = start & ((1 << _tableBits) - 1);
|
||||
short[] array = _table;
|
||||
|
||||
do
|
||||
{
|
||||
short value = array[index];
|
||||
|
||||
if (value == 0)
|
||||
{
|
||||
// set up next pointer if this node is not used before.
|
||||
array[index] = (short)-avail; // use next available slot.
|
||||
value = (short)-avail;
|
||||
avail++;
|
||||
}
|
||||
|
||||
if (value > 0)
|
||||
{
|
||||
// prevent an IndexOutOfRangeException from array[index]
|
||||
throw new InvalidDataException("Deflate64: invalid Huffman data");
|
||||
}
|
||||
|
||||
Debug.Assert(value < 0, "CreateTable: Only negative numbers are used for tree pointers!");
|
||||
|
||||
if ((start & codeBitMask) == 0)
|
||||
{
|
||||
// if current bit is 0, go change the left array
|
||||
array = _left;
|
||||
}
|
||||
else
|
||||
{
|
||||
// if current bit is 1, set value in the right array
|
||||
array = _right;
|
||||
}
|
||||
index = -value; // go to next node
|
||||
|
||||
codeBitMask <<= 1;
|
||||
overflowBits--;
|
||||
} while (overflowBits != 0);
|
||||
|
||||
array[index] = (short)ch;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// This function will try to get enough bits from input and
|
||||
// try to decode the bits.
|
||||
// If there are no enought bits in the input, this function will return -1.
|
||||
//
|
||||
public int GetNextSymbol(InputBuffer input)
|
||||
{
|
||||
// Try to load 16 bits into input buffer if possible and get the bitBuffer value.
|
||||
// If there aren't 16 bits available we will return all we have in the
|
||||
// input buffer.
|
||||
uint bitBuffer = input.TryLoad16Bits();
|
||||
if (input.AvailableBits == 0)
|
||||
{ // running out of input.
|
||||
return -1;
|
||||
}
|
||||
|
||||
// decode an element
|
||||
int symbol = _table[bitBuffer & _tableMask];
|
||||
if (symbol < 0)
|
||||
{ // this will be the start of the binary tree
|
||||
// navigate the tree
|
||||
uint mask = (uint)1 << _tableBits;
|
||||
do
|
||||
{
|
||||
symbol = -symbol;
|
||||
if ((bitBuffer & mask) == 0)
|
||||
symbol = _left[symbol];
|
||||
else
|
||||
symbol = _right[symbol];
|
||||
mask <<= 1;
|
||||
} while (symbol < 0);
|
||||
}
|
||||
|
||||
int codeLength = _codeLengthArray[symbol];
|
||||
|
||||
// huffman code lengths must be at least 1 bit long
|
||||
if (codeLength <= 0)
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: invalid Huffman data");
|
||||
}
|
||||
|
||||
//
|
||||
// If this code is longer than the # bits we had in the bit buffer (i.e.
|
||||
// we read only part of the code), we can hit the entry in the table or the tree
|
||||
// for another symbol. However the length of another symbol will not match the
|
||||
// available bits count.
|
||||
if (codeLength > input.AvailableBits)
|
||||
{
|
||||
// We already tried to load 16 bits and maximum length is 15,
|
||||
// so this means we are running out of input.
|
||||
return -1;
|
||||
}
|
||||
|
||||
input.SkipBits(codeLength);
|
||||
return symbol;
|
||||
}
|
||||
}
|
||||
}
|
||||
738
src/SharpCompress/Compressors/Deflate64/InflaterManaged.cs
Normal file
738
src/SharpCompress/Compressors/Deflate64/InflaterManaged.cs
Normal file
@@ -0,0 +1,738 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
//
|
||||
// zlib.h -- interface of the 'zlib' general purpose compression library
|
||||
// version 1.2.1, November 17th, 2003
|
||||
//
|
||||
// Copyright (C) 1995-2003 Jean-loup Gailly and Mark Adler
|
||||
//
|
||||
// This software is provided 'as-is', without any express or implied
|
||||
// warranty. In no event will the authors be held liable for any damages
|
||||
// arising from the use of this software.
|
||||
//
|
||||
// Permission is granted to anyone to use this software for any purpose,
|
||||
// including commercial applications, and to alter it and redistribute it
|
||||
// freely, subject to the following restrictions:
|
||||
//
|
||||
// 1. The origin of this software must not be misrepresented; you must not
|
||||
// claim that you wrote the original software. If you use this software
|
||||
// in a product, an acknowledgment in the product documentation would be
|
||||
// appreciated but is not required.
|
||||
// 2. Altered source versions must be plainly marked as such, and must not be
|
||||
// misrepresented as being the original software.
|
||||
// 3. This notice may not be removed or altered from any source distribution.
|
||||
//
|
||||
//
|
||||
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
internal sealed class InflaterManaged
|
||||
{
|
||||
// const tables used in decoding:
|
||||
|
||||
// Extra bits for length code 257 - 285.
|
||||
private static readonly byte[] s_extraLengthBits =
|
||||
{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,16 };
|
||||
|
||||
// The base length for length code 257 - 285.
|
||||
// The formula to get the real length for a length code is lengthBase[code - 257] + (value stored in extraBits)
|
||||
private static readonly int[] s_lengthBase =
|
||||
{ 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,3};
|
||||
|
||||
// The base distance for distance code 0 - 31
|
||||
// The real distance for a distance code is distanceBasePosition[code] + (value stored in extraBits)
|
||||
private static readonly int[] s_distanceBasePosition =
|
||||
{ 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,32769,49153 };
|
||||
|
||||
// code lengths for code length alphabet is stored in following order
|
||||
private static readonly byte[] s_codeOrder = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
|
||||
|
||||
private static readonly byte[] s_staticDistanceTreeTable =
|
||||
{
|
||||
0x00,0x10,0x08,0x18,0x04,0x14,0x0c,0x1c,0x02,0x12,0x0a,0x1a,
|
||||
0x06,0x16,0x0e,0x1e,0x01,0x11,0x09,0x19,0x05,0x15,0x0d,0x1d,
|
||||
0x03,0x13,0x0b,0x1b,0x07,0x17,0x0f,0x1f
|
||||
};
|
||||
|
||||
private readonly OutputWindow _output;
|
||||
private readonly InputBuffer _input;
|
||||
private HuffmanTree _literalLengthTree;
|
||||
private HuffmanTree _distanceTree;
|
||||
|
||||
private InflaterState _state;
|
||||
//private bool _hasFormatReader;
|
||||
private int _bfinal;
|
||||
private BlockType _blockType;
|
||||
|
||||
// uncompressed block
|
||||
private readonly byte[] _blockLengthBuffer = new byte[4];
|
||||
private int _blockLength;
|
||||
|
||||
// compressed block
|
||||
private int _length;
|
||||
private int _distanceCode;
|
||||
private int _extraBits;
|
||||
|
||||
private int _loopCounter;
|
||||
private int _literalLengthCodeCount;
|
||||
private int _distanceCodeCount;
|
||||
private int _codeLengthCodeCount;
|
||||
private int _codeArraySize;
|
||||
private int _lengthCode;
|
||||
|
||||
private readonly byte[] _codeList; // temporary array to store the code length for literal/Length and distance
|
||||
private readonly byte[] _codeLengthTreeCodeLength;
|
||||
private readonly bool _deflate64;
|
||||
private HuffmanTree _codeLengthTree;
|
||||
|
||||
//private IFileFormatReader _formatReader; // class to decode header and footer (e.g. gzip)
|
||||
|
||||
internal InflaterManaged(/*IFileFormatReader reader, */bool deflate64)
|
||||
{
|
||||
_output = new OutputWindow();
|
||||
_input = new InputBuffer();
|
||||
|
||||
_codeList = new byte[HuffmanTree.MaxLiteralTreeElements + HuffmanTree.MaxDistTreeElements];
|
||||
_codeLengthTreeCodeLength = new byte[HuffmanTree.NumberOfCodeLengthTreeElements];
|
||||
_deflate64 = deflate64;
|
||||
//if (reader != null)
|
||||
//{
|
||||
// _formatReader = reader;
|
||||
// _hasFormatReader = true;
|
||||
//}
|
||||
Reset();
|
||||
}
|
||||
|
||||
private void Reset()
|
||||
{
|
||||
_state = //_hasFormatReader ?
|
||||
//InflaterState.ReadingHeader : // start by reading Header info
|
||||
InflaterState.ReadingBFinal; // start by reading BFinal bit
|
||||
}
|
||||
|
||||
public void SetInput(byte[] inputBytes, int offset, int length) =>
|
||||
_input.SetInput(inputBytes, offset, length); // append the bytes
|
||||
|
||||
public bool Finished() => _state == InflaterState.Done || _state == InflaterState.VerifyingFooter;
|
||||
|
||||
public int AvailableOutput => _output.AvailableBytes;
|
||||
|
||||
public int Inflate(byte[] bytes, int offset, int length)
|
||||
{
|
||||
// copy bytes from output to outputbytes if we have available bytes
|
||||
// if buffer is not filled up. keep decoding until no input are available
|
||||
// if decodeBlock returns false. Throw an exception.
|
||||
int count = 0;
|
||||
do
|
||||
{
|
||||
int copied = _output.CopyTo(bytes, offset, length);
|
||||
if (copied > 0)
|
||||
{
|
||||
//if (_hasFormatReader)
|
||||
//{
|
||||
// _formatReader.UpdateWithBytesRead(bytes, offset, copied);
|
||||
//}
|
||||
|
||||
offset += copied;
|
||||
count += copied;
|
||||
length -= copied;
|
||||
}
|
||||
|
||||
if (length == 0)
|
||||
{ // filled in the bytes array
|
||||
break;
|
||||
}
|
||||
// Decode will return false when more input is needed
|
||||
} while (!Finished() && Decode());
|
||||
|
||||
if (_state == InflaterState.VerifyingFooter)
|
||||
{ // finished reading CRC
|
||||
// In this case finished is true and output window has all the data.
|
||||
// But some data in output window might not be copied out.
|
||||
if (_output.AvailableBytes == 0)
|
||||
{
|
||||
//_formatReader.Validate();
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
//Each block of compressed data begins with 3 header bits
|
||||
// containing the following data:
|
||||
// first bit BFINAL
|
||||
// next 2 bits BTYPE
|
||||
// Note that the header bits do not necessarily begin on a byte
|
||||
// boundary, since a block does not necessarily occupy an integral
|
||||
// number of bytes.
|
||||
// BFINAL is set if and only if this is the last block of the data
|
||||
// set.
|
||||
// BTYPE specifies how the data are compressed, as follows:
|
||||
// 00 - no compression
|
||||
// 01 - compressed with fixed Huffman codes
|
||||
// 10 - compressed with dynamic Huffman codes
|
||||
// 11 - reserved (error)
|
||||
// The only difference between the two compressed cases is how the
|
||||
// Huffman codes for the literal/length and distance alphabets are
|
||||
// defined.
|
||||
//
|
||||
// This function returns true for success (end of block or output window is full,)
|
||||
// false if we are short of input
|
||||
//
|
||||
private bool Decode()
|
||||
{
|
||||
bool eob = false;
|
||||
bool result = false;
|
||||
|
||||
if (Finished())
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
//if (_hasFormatReader)
|
||||
//{
|
||||
// if (_state == InflaterState.ReadingHeader)
|
||||
// {
|
||||
// if (!_formatReader.ReadHeader(_input))
|
||||
// {
|
||||
// return false;
|
||||
// }
|
||||
// _state = InflaterState.ReadingBFinal;
|
||||
// }
|
||||
// else if (_state == InflaterState.StartReadingFooter || _state == InflaterState.ReadingFooter)
|
||||
// {
|
||||
// if (!_formatReader.ReadFooter(_input))
|
||||
// return false;
|
||||
|
||||
// _state = InflaterState.VerifyingFooter;
|
||||
// return true;
|
||||
// }
|
||||
//}
|
||||
|
||||
if (_state == InflaterState.ReadingBFinal)
|
||||
{
|
||||
// reading bfinal bit
|
||||
// Need 1 bit
|
||||
if (!_input.EnsureBitsAvailable(1))
|
||||
return false;
|
||||
|
||||
_bfinal = _input.GetBits(1);
|
||||
_state = InflaterState.ReadingBType;
|
||||
}
|
||||
|
||||
if (_state == InflaterState.ReadingBType)
|
||||
{
|
||||
// Need 2 bits
|
||||
if (!_input.EnsureBitsAvailable(2))
|
||||
{
|
||||
_state = InflaterState.ReadingBType;
|
||||
return false;
|
||||
}
|
||||
|
||||
_blockType = (BlockType)_input.GetBits(2);
|
||||
if (_blockType == BlockType.Dynamic)
|
||||
{
|
||||
_state = InflaterState.ReadingNumLitCodes;
|
||||
}
|
||||
else if (_blockType == BlockType.Static)
|
||||
{
|
||||
_literalLengthTree = HuffmanTree.StaticLiteralLengthTree;
|
||||
_distanceTree = HuffmanTree.StaticDistanceTree;
|
||||
_state = InflaterState.DecodeTop;
|
||||
}
|
||||
else if (_blockType == BlockType.Uncompressed)
|
||||
{
|
||||
_state = InflaterState.UncompressedAligning;
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: unknown block type");
|
||||
}
|
||||
}
|
||||
|
||||
if (_blockType == BlockType.Dynamic)
|
||||
{
|
||||
if (_state < InflaterState.DecodeTop)
|
||||
{
|
||||
// we are reading the header
|
||||
result = DecodeDynamicBlockHeader();
|
||||
}
|
||||
else
|
||||
{
|
||||
result = DecodeBlock(out eob); // this can returns true when output is full
|
||||
}
|
||||
}
|
||||
else if (_blockType == BlockType.Static)
|
||||
{
|
||||
result = DecodeBlock(out eob);
|
||||
}
|
||||
else if (_blockType == BlockType.Uncompressed)
|
||||
{
|
||||
result = DecodeUncompressedBlock(out eob);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: unknown block type");
|
||||
}
|
||||
|
||||
//
|
||||
// If we reached the end of the block and the block we were decoding had
|
||||
// bfinal=1 (final block)
|
||||
//
|
||||
if (eob && (_bfinal != 0))
|
||||
{
|
||||
//if (_hasFormatReader)
|
||||
// _state = InflaterState.StartReadingFooter;
|
||||
//else
|
||||
_state = InflaterState.Done;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// Format of Non-compressed blocks (BTYPE=00):
|
||||
//
|
||||
// Any bits of input up to the next byte boundary are ignored.
|
||||
// The rest of the block consists of the following information:
|
||||
//
|
||||
// 0 1 2 3 4...
|
||||
// +---+---+---+---+================================+
|
||||
// | LEN | NLEN |... LEN bytes of literal data...|
|
||||
// +---+---+---+---+================================+
|
||||
//
|
||||
// LEN is the number of data bytes in the block. NLEN is the
|
||||
// one's complement of LEN.
|
||||
private bool DecodeUncompressedBlock(out bool end_of_block)
|
||||
{
|
||||
end_of_block = false;
|
||||
while (true)
|
||||
{
|
||||
switch (_state)
|
||||
{
|
||||
case InflaterState.UncompressedAligning: // initial state when calling this function
|
||||
// we must skip to a byte boundary
|
||||
_input.SkipToByteBoundary();
|
||||
_state = InflaterState.UncompressedByte1;
|
||||
goto case InflaterState.UncompressedByte1;
|
||||
|
||||
case InflaterState.UncompressedByte1: // decoding block length
|
||||
case InflaterState.UncompressedByte2:
|
||||
case InflaterState.UncompressedByte3:
|
||||
case InflaterState.UncompressedByte4:
|
||||
int bits = _input.GetBits(8);
|
||||
if (bits < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
_blockLengthBuffer[_state - InflaterState.UncompressedByte1] = (byte)bits;
|
||||
if (_state == InflaterState.UncompressedByte4)
|
||||
{
|
||||
_blockLength = _blockLengthBuffer[0] + ((int)_blockLengthBuffer[1]) * 256;
|
||||
int blockLengthComplement = _blockLengthBuffer[2] + ((int)_blockLengthBuffer[3]) * 256;
|
||||
|
||||
// make sure complement matches
|
||||
if ((ushort)_blockLength != (ushort)(~blockLengthComplement))
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: invalid block length");
|
||||
}
|
||||
}
|
||||
|
||||
_state += 1;
|
||||
break;
|
||||
|
||||
case InflaterState.DecodingUncompressed: // copying block data
|
||||
|
||||
// Directly copy bytes from input to output.
|
||||
int bytesCopied = _output.CopyFrom(_input, _blockLength);
|
||||
_blockLength -= bytesCopied;
|
||||
|
||||
if (_blockLength == 0)
|
||||
{
|
||||
// Done with this block, need to re-init bit buffer for next block
|
||||
_state = InflaterState.ReadingBFinal;
|
||||
end_of_block = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
// We can fail to copy all bytes for two reasons:
|
||||
// Running out of Input
|
||||
// running out of free space in output window
|
||||
if (_output.FreeBytes == 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
default:
|
||||
Debug./*Fail*/Assert(false, "check why we are here!");
|
||||
throw new InvalidDataException("Deflate64: unknown state");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private bool DecodeBlock(out bool end_of_block_code_seen)
|
||||
{
|
||||
end_of_block_code_seen = false;
|
||||
|
||||
int freeBytes = _output.FreeBytes; // it is a little bit faster than frequently accessing the property
|
||||
while (freeBytes > 65536)
|
||||
{
|
||||
// With Deflate64 we can have up to a 64kb length, so we ensure at least that much space is available
|
||||
// in the OutputWindow to avoid overwriting previous unflushed output data.
|
||||
|
||||
int symbol;
|
||||
switch (_state)
|
||||
{
|
||||
case InflaterState.DecodeTop:
|
||||
// decode an element from the literal tree
|
||||
|
||||
// TODO: optimize this!!!
|
||||
symbol = _literalLengthTree.GetNextSymbol(_input);
|
||||
if (symbol < 0)
|
||||
{
|
||||
// running out of input
|
||||
return false;
|
||||
}
|
||||
|
||||
if (symbol < 256)
|
||||
{
|
||||
// literal
|
||||
_output.Write((byte)symbol);
|
||||
--freeBytes;
|
||||
}
|
||||
else if (symbol == 256)
|
||||
{
|
||||
// end of block
|
||||
end_of_block_code_seen = true;
|
||||
// Reset state
|
||||
_state = InflaterState.ReadingBFinal;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// length/distance pair
|
||||
symbol -= 257; // length code started at 257
|
||||
if (symbol < 8)
|
||||
{
|
||||
symbol += 3; // match length = 3,4,5,6,7,8,9,10
|
||||
_extraBits = 0;
|
||||
}
|
||||
else if (!_deflate64 && symbol == 28)
|
||||
{
|
||||
// extra bits for code 285 is 0
|
||||
symbol = 258; // code 285 means length 258
|
||||
_extraBits = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (symbol < 0 || symbol >= s_extraLengthBits.Length)
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: invalid data");
|
||||
}
|
||||
_extraBits = s_extraLengthBits[symbol];
|
||||
Debug.Assert(_extraBits != 0, "We handle other cases separately!");
|
||||
}
|
||||
_length = symbol;
|
||||
goto case InflaterState.HaveInitialLength;
|
||||
}
|
||||
break;
|
||||
|
||||
case InflaterState.HaveInitialLength:
|
||||
if (_extraBits > 0)
|
||||
{
|
||||
_state = InflaterState.HaveInitialLength;
|
||||
int bits = _input.GetBits(_extraBits);
|
||||
if (bits < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_length < 0 || _length >= s_lengthBase.Length)
|
||||
{
|
||||
throw new InvalidDataException("Deflate64: invalid data");
|
||||
}
|
||||
_length = s_lengthBase[_length] + bits;
|
||||
}
|
||||
_state = InflaterState.HaveFullLength;
|
||||
goto case InflaterState.HaveFullLength;
|
||||
|
||||
case InflaterState.HaveFullLength:
|
||||
if (_blockType == BlockType.Dynamic)
|
||||
{
|
||||
_distanceCode = _distanceTree.GetNextSymbol(_input);
|
||||
}
|
||||
else
|
||||
{
|
||||
// get distance code directly for static block
|
||||
_distanceCode = _input.GetBits(5);
|
||||
if (_distanceCode >= 0)
|
||||
{
|
||||
_distanceCode = s_staticDistanceTreeTable[_distanceCode];
|
||||
}
|
||||
}
|
||||
|
||||
if (_distanceCode < 0)
|
||||
{
|
||||
// running out input
|
||||
return false;
|
||||
}
|
||||
|
||||
_state = InflaterState.HaveDistCode;
|
||||
goto case InflaterState.HaveDistCode;
|
||||
|
||||
case InflaterState.HaveDistCode:
|
||||
// To avoid a table lookup we note that for distanceCode > 3,
|
||||
// extra_bits = (distanceCode-2) >> 1
|
||||
int offset;
|
||||
if (_distanceCode > 3)
|
||||
{
|
||||
_extraBits = (_distanceCode - 2) >> 1;
|
||||
int bits = _input.GetBits(_extraBits);
|
||||
if (bits < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
offset = s_distanceBasePosition[_distanceCode] + bits;
|
||||
}
|
||||
else
|
||||
{
|
||||
offset = _distanceCode + 1;
|
||||
}
|
||||
|
||||
_output.WriteLengthDistance(_length, offset);
|
||||
freeBytes -= _length;
|
||||
_state = InflaterState.DecodeTop;
|
||||
break;
|
||||
|
||||
default:
|
||||
Debug./*Fail*/Assert(false, "check why we are here!");
|
||||
throw new InvalidDataException("Deflate64: unknown state");
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Format of the dynamic block header:
|
||||
// 5 Bits: HLIT, # of Literal/Length codes - 257 (257 - 286)
|
||||
// 5 Bits: HDIST, # of Distance codes - 1 (1 - 32)
|
||||
// 4 Bits: HCLEN, # of Code Length codes - 4 (4 - 19)
|
||||
//
|
||||
// (HCLEN + 4) x 3 bits: code lengths for the code length
|
||||
// alphabet given just above, in the order: 16, 17, 18,
|
||||
// 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
|
||||
//
|
||||
// These code lengths are interpreted as 3-bit integers
|
||||
// (0-7); as above, a code length of 0 means the
|
||||
// corresponding symbol (literal/length or distance code
|
||||
// length) is not used.
|
||||
//
|
||||
// HLIT + 257 code lengths for the literal/length alphabet,
|
||||
// encoded using the code length Huffman code
|
||||
//
|
||||
// HDIST + 1 code lengths for the distance alphabet,
|
||||
// encoded using the code length Huffman code
|
||||
//
|
||||
// The code length repeat codes can cross from HLIT + 257 to the
|
||||
// HDIST + 1 code lengths. In other words, all code lengths form
|
||||
// a single sequence of HLIT + HDIST + 258 values.
|
||||
private bool DecodeDynamicBlockHeader()
|
||||
{
|
||||
switch (_state)
|
||||
{
|
||||
case InflaterState.ReadingNumLitCodes:
|
||||
_literalLengthCodeCount = _input.GetBits(5);
|
||||
if (_literalLengthCodeCount < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
_literalLengthCodeCount += 257;
|
||||
_state = InflaterState.ReadingNumDistCodes;
|
||||
goto case InflaterState.ReadingNumDistCodes;
|
||||
|
||||
case InflaterState.ReadingNumDistCodes:
|
||||
_distanceCodeCount = _input.GetBits(5);
|
||||
if (_distanceCodeCount < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
_distanceCodeCount += 1;
|
||||
_state = InflaterState.ReadingNumCodeLengthCodes;
|
||||
goto case InflaterState.ReadingNumCodeLengthCodes;
|
||||
|
||||
case InflaterState.ReadingNumCodeLengthCodes:
|
||||
_codeLengthCodeCount = _input.GetBits(4);
|
||||
if (_codeLengthCodeCount < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
_codeLengthCodeCount += 4;
|
||||
_loopCounter = 0;
|
||||
_state = InflaterState.ReadingCodeLengthCodes;
|
||||
goto case InflaterState.ReadingCodeLengthCodes;
|
||||
|
||||
case InflaterState.ReadingCodeLengthCodes:
|
||||
while (_loopCounter < _codeLengthCodeCount)
|
||||
{
|
||||
int bits = _input.GetBits(3);
|
||||
if (bits < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
_codeLengthTreeCodeLength[s_codeOrder[_loopCounter]] = (byte)bits;
|
||||
++_loopCounter;
|
||||
}
|
||||
|
||||
for (int i = _codeLengthCodeCount; i < s_codeOrder.Length; i++)
|
||||
{
|
||||
_codeLengthTreeCodeLength[s_codeOrder[i]] = 0;
|
||||
}
|
||||
|
||||
// create huffman tree for code length
|
||||
_codeLengthTree = new HuffmanTree(_codeLengthTreeCodeLength);
|
||||
_codeArraySize = _literalLengthCodeCount + _distanceCodeCount;
|
||||
_loopCounter = 0; // reset loop count
|
||||
|
||||
_state = InflaterState.ReadingTreeCodesBefore;
|
||||
goto case InflaterState.ReadingTreeCodesBefore;
|
||||
|
||||
case InflaterState.ReadingTreeCodesBefore:
|
||||
case InflaterState.ReadingTreeCodesAfter:
|
||||
while (_loopCounter < _codeArraySize)
|
||||
{
|
||||
if (_state == InflaterState.ReadingTreeCodesBefore)
|
||||
{
|
||||
if ((_lengthCode = _codeLengthTree.GetNextSymbol(_input)) < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// The alphabet for code lengths is as follows:
|
||||
// 0 - 15: Represent code lengths of 0 - 15
|
||||
// 16: Copy the previous code length 3 - 6 times.
|
||||
// The next 2 bits indicate repeat length
|
||||
// (0 = 3, ... , 3 = 6)
|
||||
// Example: Codes 8, 16 (+2 bits 11),
|
||||
// 16 (+2 bits 10) will expand to
|
||||
// 12 code lengths of 8 (1 + 6 + 5)
|
||||
// 17: Repeat a code length of 0 for 3 - 10 times.
|
||||
// (3 bits of length)
|
||||
// 18: Repeat a code length of 0 for 11 - 138 times
|
||||
// (7 bits of length)
|
||||
if (_lengthCode <= 15)
|
||||
{
|
||||
_codeList[_loopCounter++] = (byte)_lengthCode;
|
||||
}
|
||||
else
|
||||
{
|
||||
int repeatCount;
|
||||
if (_lengthCode == 16)
|
||||
{
|
||||
if (!_input.EnsureBitsAvailable(2))
|
||||
{
|
||||
_state = InflaterState.ReadingTreeCodesAfter;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_loopCounter == 0)
|
||||
{
|
||||
// can't have "prev code" on first code
|
||||
throw new InvalidDataException();
|
||||
}
|
||||
|
||||
byte previousCode = _codeList[_loopCounter - 1];
|
||||
repeatCount = _input.GetBits(2) + 3;
|
||||
|
||||
if (_loopCounter + repeatCount > _codeArraySize)
|
||||
{
|
||||
throw new InvalidDataException();
|
||||
}
|
||||
|
||||
for (int j = 0; j < repeatCount; j++)
|
||||
{
|
||||
_codeList[_loopCounter++] = previousCode;
|
||||
}
|
||||
}
|
||||
else if (_lengthCode == 17)
|
||||
{
|
||||
if (!_input.EnsureBitsAvailable(3))
|
||||
{
|
||||
_state = InflaterState.ReadingTreeCodesAfter;
|
||||
return false;
|
||||
}
|
||||
|
||||
repeatCount = _input.GetBits(3) + 3;
|
||||
|
||||
if (_loopCounter + repeatCount > _codeArraySize)
|
||||
{
|
||||
throw new InvalidDataException();
|
||||
}
|
||||
|
||||
for (int j = 0; j < repeatCount; j++)
|
||||
{
|
||||
_codeList[_loopCounter++] = 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// code == 18
|
||||
if (!_input.EnsureBitsAvailable(7))
|
||||
{
|
||||
_state = InflaterState.ReadingTreeCodesAfter;
|
||||
return false;
|
||||
}
|
||||
|
||||
repeatCount = _input.GetBits(7) + 11;
|
||||
|
||||
if (_loopCounter + repeatCount > _codeArraySize)
|
||||
{
|
||||
throw new InvalidDataException();
|
||||
}
|
||||
|
||||
for (int j = 0; j < repeatCount; j++)
|
||||
{
|
||||
_codeList[_loopCounter++] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
_state = InflaterState.ReadingTreeCodesBefore; // we want to read the next code.
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
Debug./*Fail*/Assert(false, "check why we are here!");
|
||||
throw new InvalidDataException("Deflate64: unknown state");
|
||||
}
|
||||
|
||||
byte[] literalTreeCodeLength = new byte[HuffmanTree.MaxLiteralTreeElements];
|
||||
byte[] distanceTreeCodeLength = new byte[HuffmanTree.MaxDistTreeElements];
|
||||
|
||||
// Create literal and distance tables
|
||||
Array.Copy(_codeList, 0, literalTreeCodeLength, 0, _literalLengthCodeCount);
|
||||
Array.Copy(_codeList, _literalLengthCodeCount, distanceTreeCodeLength, 0, _distanceCodeCount);
|
||||
|
||||
// Make sure there is an end-of-block code, otherwise how could we ever end?
|
||||
if (literalTreeCodeLength[HuffmanTree.EndOfBlockCode] == 0)
|
||||
{
|
||||
throw new InvalidDataException();
|
||||
}
|
||||
|
||||
_literalLengthTree = new HuffmanTree(literalTreeCodeLength);
|
||||
_distanceTree = new HuffmanTree(distanceTreeCodeLength);
|
||||
_state = InflaterState.DecodeTop;
|
||||
return true;
|
||||
}
|
||||
|
||||
public void Dispose() { }
|
||||
}
|
||||
}
|
||||
42
src/SharpCompress/Compressors/Deflate64/InflaterState.cs
Normal file
42
src/SharpCompress/Compressors/Deflate64/InflaterState.cs
Normal file
@@ -0,0 +1,42 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
// Do not rearrange the enum values.
|
||||
internal enum InflaterState
|
||||
{
|
||||
ReadingHeader = 0, // Only applies to GZIP
|
||||
|
||||
ReadingBFinal = 2, // About to read bfinal bit
|
||||
ReadingBType = 3, // About to read blockType bits
|
||||
|
||||
ReadingNumLitCodes = 4, // About to read # literal codes
|
||||
ReadingNumDistCodes = 5, // About to read # dist codes
|
||||
ReadingNumCodeLengthCodes = 6, // About to read # code length codes
|
||||
ReadingCodeLengthCodes = 7, // In the middle of reading the code length codes
|
||||
ReadingTreeCodesBefore = 8, // In the middle of reading tree codes (loop top)
|
||||
ReadingTreeCodesAfter = 9, // In the middle of reading tree codes (extension; code > 15)
|
||||
|
||||
DecodeTop = 10, // About to decode a literal (char/match) in a compressed block
|
||||
HaveInitialLength = 11, // Decoding a match, have the literal code (base length)
|
||||
HaveFullLength = 12, // Ditto, now have the full match length (incl. extra length bits)
|
||||
HaveDistCode = 13, // Ditto, now have the distance code also, need extra dist bits
|
||||
|
||||
/* uncompressed blocks */
|
||||
UncompressedAligning = 15,
|
||||
UncompressedByte1 = 16,
|
||||
UncompressedByte2 = 17,
|
||||
UncompressedByte3 = 18,
|
||||
UncompressedByte4 = 19,
|
||||
DecodingUncompressed = 20,
|
||||
|
||||
// These three apply only to GZIP
|
||||
StartReadingFooter = 21, // (Initialisation for reading footer)
|
||||
ReadingFooter = 22,
|
||||
VerifyingFooter = 23,
|
||||
|
||||
Done = 24 // Finished
|
||||
}
|
||||
}
|
||||
202
src/SharpCompress/Compressors/Deflate64/InputBuffer.cs
Normal file
202
src/SharpCompress/Compressors/Deflate64/InputBuffer.cs
Normal file
@@ -0,0 +1,202 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
// This class can be used to read bits from an byte array quickly.
|
||||
// Normally we get bits from 'bitBuffer' field and bitsInBuffer stores
|
||||
// the number of bits available in 'BitBuffer'.
|
||||
// When we used up the bits in bitBuffer, we will try to get byte from
|
||||
// the byte array and copy the byte to appropiate position in bitBuffer.
|
||||
//
|
||||
// The byte array is not reused. We will go from 'start' to 'end'.
|
||||
// When we reach the end, most read operations will return -1,
|
||||
// which means we are running out of input.
|
||||
|
||||
internal sealed class InputBuffer
|
||||
{
|
||||
private byte[] _buffer; // byte array to store input
|
||||
private int _start; // start poisition of the buffer
|
||||
private int _end; // end position of the buffer
|
||||
private uint _bitBuffer = 0; // store the bits here, we can quickly shift in this buffer
|
||||
private int _bitsInBuffer = 0; // number of bits available in bitBuffer
|
||||
|
||||
/// <summary>Total bits available in the input buffer.</summary>
|
||||
public int AvailableBits => _bitsInBuffer;
|
||||
|
||||
/// <summary>Total bytes available in the input buffer.</summary>
|
||||
public int AvailableBytes => (_end - _start) + (_bitsInBuffer / 8);
|
||||
|
||||
/// <summary>Ensure that count bits are in the bit buffer.</summary>
|
||||
/// <param name="count">Can be up to 16.</param>
|
||||
/// <returns>Returns false if input is not sufficient to make this true.</returns>
|
||||
public bool EnsureBitsAvailable(int count)
|
||||
{
|
||||
Debug.Assert(0 < count && count <= 16, "count is invalid.");
|
||||
|
||||
// manual inlining to improve perf
|
||||
if (_bitsInBuffer < count)
|
||||
{
|
||||
if (NeedsInput())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// insert a byte to bitbuffer
|
||||
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
|
||||
_bitsInBuffer += 8;
|
||||
|
||||
if (_bitsInBuffer < count)
|
||||
{
|
||||
if (NeedsInput())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// insert a byte to bitbuffer
|
||||
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
|
||||
_bitsInBuffer += 8;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// This function will try to load 16 or more bits into bitBuffer.
|
||||
/// It returns whatever is contained in bitBuffer after loading.
|
||||
/// The main difference between this and GetBits is that this will
|
||||
/// never return -1. So the caller needs to check AvailableBits to
|
||||
/// see how many bits are available.
|
||||
/// </summary>
|
||||
public uint TryLoad16Bits()
|
||||
{
|
||||
if (_bitsInBuffer < 8)
|
||||
{
|
||||
if (_start < _end)
|
||||
{
|
||||
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
|
||||
_bitsInBuffer += 8;
|
||||
}
|
||||
|
||||
if (_start < _end)
|
||||
{
|
||||
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
|
||||
_bitsInBuffer += 8;
|
||||
}
|
||||
}
|
||||
else if (_bitsInBuffer < 16)
|
||||
{
|
||||
if (_start < _end)
|
||||
{
|
||||
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
|
||||
_bitsInBuffer += 8;
|
||||
}
|
||||
}
|
||||
|
||||
return _bitBuffer;
|
||||
}
|
||||
|
||||
private uint GetBitMask(int count) => ((uint)1 << count) - 1;
|
||||
|
||||
/// <summary>Gets count bits from the input buffer. Returns -1 if not enough bits available.</summary>
|
||||
public int GetBits(int count)
|
||||
{
|
||||
Debug.Assert(0 < count && count <= 16, "count is invalid.");
|
||||
|
||||
if (!EnsureBitsAvailable(count))
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
int result = (int)(_bitBuffer & GetBitMask(count));
|
||||
_bitBuffer >>= count;
|
||||
_bitsInBuffer -= count;
|
||||
return result;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Copies length bytes from input buffer to output buffer starting at output[offset].
|
||||
/// You have to make sure, that the buffer is byte aligned. If not enough bytes are
|
||||
/// available, copies fewer bytes.
|
||||
/// </summary>
|
||||
/// <returns>Returns the number of bytes copied, 0 if no byte is available.</returns>
|
||||
public int CopyTo(byte[] output, int offset, int length)
|
||||
{
|
||||
Debug.Assert(output != null);
|
||||
Debug.Assert(offset >= 0);
|
||||
Debug.Assert(length >= 0);
|
||||
Debug.Assert(offset <= output.Length - length);
|
||||
Debug.Assert((_bitsInBuffer % 8) == 0);
|
||||
|
||||
// Copy the bytes in bitBuffer first.
|
||||
int bytesFromBitBuffer = 0;
|
||||
while (_bitsInBuffer > 0 && length > 0)
|
||||
{
|
||||
output[offset++] = (byte)_bitBuffer;
|
||||
_bitBuffer >>= 8;
|
||||
_bitsInBuffer -= 8;
|
||||
length--;
|
||||
bytesFromBitBuffer++;
|
||||
}
|
||||
|
||||
if (length == 0)
|
||||
{
|
||||
return bytesFromBitBuffer;
|
||||
}
|
||||
|
||||
int avail = _end - _start;
|
||||
if (length > avail)
|
||||
{
|
||||
length = avail;
|
||||
}
|
||||
|
||||
Array.Copy(_buffer, _start, output, offset, length);
|
||||
_start += length;
|
||||
return bytesFromBitBuffer + length;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Return true is all input bytes are used.
|
||||
/// This means the caller can call SetInput to add more input.
|
||||
/// </summary>
|
||||
public bool NeedsInput() => _start == _end;
|
||||
|
||||
/// <summary>
|
||||
/// Set the byte array to be processed.
|
||||
/// All the bits remained in bitBuffer will be processed before the new bytes.
|
||||
/// We don't clone the byte array here since it is expensive.
|
||||
/// The caller should make sure after a buffer is passed in.
|
||||
/// It will not be changed before calling this function again.
|
||||
/// </summary>
|
||||
public void SetInput(byte[] buffer, int offset, int length)
|
||||
{
|
||||
Debug.Assert(buffer != null);
|
||||
Debug.Assert(offset >= 0);
|
||||
Debug.Assert(length >= 0);
|
||||
Debug.Assert(offset <= buffer.Length - length);
|
||||
Debug.Assert(_start == _end);
|
||||
|
||||
_buffer = buffer;
|
||||
_start = offset;
|
||||
_end = offset + length;
|
||||
}
|
||||
|
||||
/// <summary>Skip n bits in the buffer.</summary>
|
||||
public void SkipBits(int n)
|
||||
{
|
||||
Debug.Assert(_bitsInBuffer >= n, "No enough bits in the buffer, Did you call EnsureBitsAvailable?");
|
||||
_bitBuffer >>= n;
|
||||
_bitsInBuffer -= n;
|
||||
}
|
||||
|
||||
/// <summary>Skips to the next byte boundary.</summary>
|
||||
public void SkipToByteBoundary()
|
||||
{
|
||||
_bitBuffer >>= (_bitsInBuffer % 8);
|
||||
_bitsInBuffer = _bitsInBuffer - (_bitsInBuffer % 8);
|
||||
}
|
||||
}
|
||||
}
|
||||
17
src/SharpCompress/Compressors/Deflate64/Match.cs
Normal file
17
src/SharpCompress/Compressors/Deflate64/Match.cs
Normal file
@@ -0,0 +1,17 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
/// <summary>
|
||||
/// This class represents a match in the history window.
|
||||
/// </summary>
|
||||
internal sealed class Match
|
||||
{
|
||||
internal MatchState State { get; set; }
|
||||
internal int Position { get; set; }
|
||||
internal int Length { get; set; }
|
||||
internal byte Symbol { get; set; }
|
||||
}
|
||||
}
|
||||
13
src/SharpCompress/Compressors/Deflate64/MatchState.cs
Normal file
13
src/SharpCompress/Compressors/Deflate64/MatchState.cs
Normal file
@@ -0,0 +1,13 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
internal enum MatchState
|
||||
{
|
||||
HasSymbol = 1,
|
||||
HasMatch = 2,
|
||||
HasSymbolAndMatch = 3
|
||||
}
|
||||
}
|
||||
151
src/SharpCompress/Compressors/Deflate64/OutputWindow.cs
Normal file
151
src/SharpCompress/Compressors/Deflate64/OutputWindow.cs
Normal file
@@ -0,0 +1,151 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate64
|
||||
{
|
||||
/// <summary>
|
||||
/// This class maintains a window for decompressed output.
|
||||
/// We need to keep this because the decompressed information can be
|
||||
/// a literal or a length/distance pair. For length/distance pair,
|
||||
/// we need to look back in the output window and copy bytes from there.
|
||||
/// We use a byte array of WindowSize circularly.
|
||||
/// </summary>
|
||||
internal sealed class OutputWindow
|
||||
{
|
||||
// With Deflate64 we can have up to a 65536 length as well as up to a 65538 distance. This means we need a Window that is at
|
||||
// least 131074 bytes long so we have space to retrieve up to a full 64kb in lookback and place it in our buffer without
|
||||
// overwriting existing data. OutputWindow requires that the WindowSize be an exponent of 2, so we round up to 2^18.
|
||||
private const int WindowSize = 262144;
|
||||
private const int WindowMask = 262143;
|
||||
|
||||
private readonly byte[] _window = new byte[WindowSize]; // The window is 2^18 bytes
|
||||
private int _end; // this is the position to where we should write next byte
|
||||
private int _bytesUsed; // The number of bytes in the output window which is not consumed.
|
||||
|
||||
/// <summary>Add a byte to output window.</summary>
|
||||
public void Write(byte b)
|
||||
{
|
||||
Debug.Assert(_bytesUsed < WindowSize, "Can't add byte when window is full!");
|
||||
_window[_end++] = b;
|
||||
_end &= WindowMask;
|
||||
++_bytesUsed;
|
||||
}
|
||||
|
||||
public void WriteLengthDistance(int length, int distance)
|
||||
{
|
||||
Debug.Assert((_bytesUsed + length) <= WindowSize, "No Enough space");
|
||||
|
||||
// move backwards distance bytes in the output stream,
|
||||
// and copy length bytes from this position to the output stream.
|
||||
_bytesUsed += length;
|
||||
int copyStart = (_end - distance) & WindowMask; // start position for coping.
|
||||
|
||||
int border = WindowSize - length;
|
||||
if (copyStart <= border && _end < border)
|
||||
{
|
||||
if (length <= distance)
|
||||
{
|
||||
Array.Copy(_window, copyStart, _window, _end, length);
|
||||
_end += length;
|
||||
}
|
||||
else
|
||||
{
|
||||
// The referenced string may overlap the current
|
||||
// position; for example, if the last 2 bytes decoded have values
|
||||
// X and Y, a string reference with <length = 5, distance = 2>
|
||||
// adds X,Y,X,Y,X to the output stream.
|
||||
while (length-- > 0)
|
||||
{
|
||||
_window[_end++] = _window[copyStart++];
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// copy byte by byte
|
||||
while (length-- > 0)
|
||||
{
|
||||
_window[_end++] = _window[copyStart++];
|
||||
_end &= WindowMask;
|
||||
copyStart &= WindowMask;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Copy up to length of bytes from input directly.
|
||||
/// This is used for uncompressed block.
|
||||
/// </summary>
|
||||
public int CopyFrom(InputBuffer input, int length)
|
||||
{
|
||||
length = Math.Min(Math.Min(length, WindowSize - _bytesUsed), input.AvailableBytes);
|
||||
int copied;
|
||||
|
||||
// We might need wrap around to copy all bytes.
|
||||
int tailLen = WindowSize - _end;
|
||||
if (length > tailLen)
|
||||
{
|
||||
// copy the first part
|
||||
copied = input.CopyTo(_window, _end, tailLen);
|
||||
if (copied == tailLen)
|
||||
{
|
||||
// only try to copy the second part if we have enough bytes in input
|
||||
copied += input.CopyTo(_window, 0, length - tailLen);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// only one copy is needed if there is no wrap around.
|
||||
copied = input.CopyTo(_window, _end, length);
|
||||
}
|
||||
|
||||
_end = (_end + copied) & WindowMask;
|
||||
_bytesUsed += copied;
|
||||
return copied;
|
||||
}
|
||||
|
||||
/// <summary>Free space in output window.</summary>
|
||||
public int FreeBytes => WindowSize - _bytesUsed;
|
||||
|
||||
/// <summary>Bytes not consumed in output window.</summary>
|
||||
public int AvailableBytes => _bytesUsed;
|
||||
|
||||
/// <summary>Copy the decompressed bytes to output array.</summary>
|
||||
public int CopyTo(byte[] output, int offset, int length)
|
||||
{
|
||||
int copy_end;
|
||||
|
||||
if (length > _bytesUsed)
|
||||
{
|
||||
// we can copy all the decompressed bytes out
|
||||
copy_end = _end;
|
||||
length = _bytesUsed;
|
||||
}
|
||||
else
|
||||
{
|
||||
copy_end = (_end - _bytesUsed + length) & WindowMask; // copy length of bytes
|
||||
}
|
||||
|
||||
int copied = length;
|
||||
|
||||
int tailLen = length - copy_end;
|
||||
if (tailLen > 0)
|
||||
{
|
||||
// this means we need to copy two parts separately
|
||||
// copy tailLen bytes from the end of output window
|
||||
Array.Copy(_window, WindowSize - tailLen,
|
||||
output, offset, tailLen);
|
||||
offset += tailLen;
|
||||
length = copy_end;
|
||||
}
|
||||
Array.Copy(_window, copy_end - length, output, offset, length);
|
||||
_bytesUsed -= copied;
|
||||
Debug.Assert(_bytesUsed >= 0, "check this function and find why we copied more bytes than we have");
|
||||
return copied;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -58,7 +58,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
}
|
||||
|
||||
return (mBits[index >> 5] & (1u << (index & 31))) != 0;
|
||||
@@ -69,7 +69,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
}
|
||||
|
||||
mBits[index >> 5] |= 1u << (index & 31);
|
||||
@@ -79,7 +79,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
{
|
||||
if (index < 0 || index >= Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("index");
|
||||
throw new ArgumentOutOfRangeException(nameof(index));
|
||||
}
|
||||
|
||||
uint bits = mBits[index >> 5];
|
||||
|
||||
@@ -58,22 +58,22 @@ namespace SharpCompress.Compressors.LZMA.Utilites
|
||||
{
|
||||
if (stream == null)
|
||||
{
|
||||
throw new ArgumentNullException("stream");
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
}
|
||||
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException("buffer");
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
if (offset < 0 || offset > buffer.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("offset");
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
|
||||
if (length < 0 || length > buffer.Length - offset)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("length");
|
||||
throw new ArgumentOutOfRangeException(nameof(length));
|
||||
}
|
||||
|
||||
while (length > 0)
|
||||
|
||||
@@ -146,12 +146,12 @@ namespace SharpCompress.Compressors.PPMd.I1
|
||||
{
|
||||
if (target == null)
|
||||
{
|
||||
throw new ArgumentNullException("target");
|
||||
throw new ArgumentNullException(nameof(target));
|
||||
}
|
||||
|
||||
if (source == null)
|
||||
{
|
||||
throw new ArgumentNullException("source");
|
||||
throw new ArgumentNullException(nameof(source));
|
||||
}
|
||||
|
||||
EncodeStart(properties);
|
||||
@@ -235,12 +235,12 @@ namespace SharpCompress.Compressors.PPMd.I1
|
||||
{
|
||||
if (target == null)
|
||||
{
|
||||
throw new ArgumentNullException("target");
|
||||
throw new ArgumentNullException(nameof(target));
|
||||
}
|
||||
|
||||
if (source == null)
|
||||
{
|
||||
throw new ArgumentNullException("source");
|
||||
throw new ArgumentNullException(nameof(source));
|
||||
}
|
||||
|
||||
DecodeStart(source, properties);
|
||||
|
||||
@@ -31,7 +31,7 @@ namespace SharpCompress.Compressors.Rar {
|
||||
{
|
||||
currentCrc = RarCRC.CheckCrc(currentCrc, buffer, offset, result);
|
||||
}
|
||||
else if (GetCrc() != readStream.CurrentCrc)
|
||||
else if (GetCrc() != readStream.CurrentCrc && count != 0)
|
||||
{
|
||||
// NOTE: we use the last FileHeader in a multipart volume to check CRC
|
||||
throw new InvalidFormatException("file crc mismatch");
|
||||
|
||||
@@ -18,9 +18,11 @@ namespace SharpCompress.Compressors.Xz
|
||||
public static int ReadLittleEndianInt32(this Stream stream)
|
||||
{
|
||||
byte[] bytes = new byte[4];
|
||||
var read = stream.Read(bytes, 0, 4);
|
||||
if (read != 4)
|
||||
var read = stream.ReadFully(bytes);
|
||||
if (!read)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24));
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ namespace SharpCompress.Compressors.Xz
|
||||
int i = 0;
|
||||
while ((LastByte & 0x80) != 0)
|
||||
{
|
||||
if (i >= MaxBytes)
|
||||
if (++i >= MaxBytes)
|
||||
throw new InvalidDataException();
|
||||
LastByte = reader.ReadByte();
|
||||
if (LastByte == 0)
|
||||
|
||||
@@ -50,11 +50,11 @@ namespace SharpCompress.Compressors.Xz
|
||||
|
||||
private void SkipPadding()
|
||||
{
|
||||
int padding = (int)(_bytesRead % 4);
|
||||
if (padding > 0)
|
||||
int bytes = (int)(BaseStream.Position % 4);
|
||||
if (bytes > 0)
|
||||
{
|
||||
byte[] paddingBytes = new byte[padding];
|
||||
BaseStream.Read(paddingBytes, 0, padding);
|
||||
byte[] paddingBytes = new byte[4 - bytes];
|
||||
BaseStream.Read(paddingBytes, 0, paddingBytes.Length);
|
||||
if (paddingBytes.Any(b => b != 0))
|
||||
throw new InvalidDataException("Padding bytes were non-null");
|
||||
}
|
||||
|
||||
@@ -55,10 +55,10 @@ namespace SharpCompress.Compressors.Xz
|
||||
|
||||
private void SkipPadding()
|
||||
{
|
||||
int padding = (int)(_reader.BaseStream.Position - StreamStartPosition) % 4;
|
||||
if (padding > 0)
|
||||
int bytes = (int)(_reader.BaseStream.Position - StreamStartPosition) % 4;
|
||||
if (bytes > 0)
|
||||
{
|
||||
byte[] paddingBytes = _reader.ReadBytes(padding);
|
||||
byte[] paddingBytes = _reader.ReadBytes(4 - bytes);
|
||||
if (paddingBytes.Any(b => b != 0))
|
||||
throw new InvalidDataException("Padding bytes were non-null");
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (dest == null)
|
||||
{
|
||||
throw new ArgumentNullException("dest");
|
||||
throw new ArgumentNullException(nameof(dest));
|
||||
}
|
||||
if (destIdx < 0 || destIdx > dest.Length - size)
|
||||
{
|
||||
@@ -170,7 +170,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -195,7 +195,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -221,7 +221,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -247,7 +247,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -273,7 +273,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -299,7 +299,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -325,7 +325,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
@@ -351,7 +351,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
@@ -468,7 +468,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -494,7 +494,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -520,7 +520,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 8)
|
||||
{
|
||||
@@ -546,7 +546,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -572,7 +572,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -598,7 +598,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 4)
|
||||
{
|
||||
@@ -624,7 +624,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
@@ -650,7 +650,7 @@ namespace SharpCompress.Converters
|
||||
{
|
||||
if (data == null)
|
||||
{
|
||||
throw new ArgumentNullException("data");
|
||||
throw new ArgumentNullException(nameof(data));
|
||||
}
|
||||
if (data.Length - index < 2)
|
||||
{
|
||||
|
||||
@@ -12,7 +12,7 @@ namespace Org.BouncyCastle.Crypto.Parameters
|
||||
{
|
||||
if (key == null)
|
||||
{
|
||||
throw new ArgumentNullException("key");
|
||||
throw new ArgumentNullException(nameof(key));
|
||||
}
|
||||
|
||||
this.key = (byte[])key.Clone();
|
||||
@@ -25,15 +25,15 @@ namespace Org.BouncyCastle.Crypto.Parameters
|
||||
{
|
||||
if (key == null)
|
||||
{
|
||||
throw new ArgumentNullException("key");
|
||||
throw new ArgumentNullException(nameof(key));
|
||||
}
|
||||
if (keyOff < 0 || keyOff > key.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("keyOff");
|
||||
throw new ArgumentOutOfRangeException(nameof(keyOff));
|
||||
}
|
||||
if (keyLen < 0 || (keyOff + keyLen) > key.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException("keyLen");
|
||||
throw new ArgumentOutOfRangeException(nameof(keyLen));
|
||||
}
|
||||
|
||||
this.key = new byte[keyLen];
|
||||
|
||||
@@ -41,7 +41,7 @@ namespace SharpCompress.IO
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
public override long Length => BytesLeftToRead;
|
||||
|
||||
public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); }
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Compressors.Filters;
|
||||
|
||||
namespace SharpCompress.IO
|
||||
{
|
||||
@@ -46,8 +47,13 @@ namespace SharpCompress.IO
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
bufferStream.TransferTo(buffer);
|
||||
bufferStream = buffer;
|
||||
//create new memorystream to allow proper resizing as memorystream could be a user provided buffer
|
||||
//https://github.com/adamhathcock/sharpcompress/issues/306
|
||||
bufferStream = new MemoryStream();
|
||||
buffer.Position = 0;
|
||||
buffer.TransferTo(bufferStream);
|
||||
bufferStream.Position = 0;
|
||||
}
|
||||
isRewound = true;
|
||||
@@ -105,6 +111,12 @@ namespace SharpCompress.IO
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
//don't actually read if we don't really want to read anything
|
||||
//currently a network stream bug on Windows for .NET Core
|
||||
if (count == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int read;
|
||||
if (isRewound && bufferStream.Position != bufferStream.Length)
|
||||
{
|
||||
|
||||
@@ -139,8 +139,6 @@ namespace SharpCompress.Readers
|
||||
}
|
||||
}
|
||||
|
||||
private readonly byte[] skipBuffer = new byte[4096];
|
||||
|
||||
private void Skip()
|
||||
{
|
||||
if (ArchiveType != ArchiveType.Rar
|
||||
@@ -148,25 +146,21 @@ namespace SharpCompress.Readers
|
||||
&& Entry.CompressedSize > 0)
|
||||
{
|
||||
//not solid and has a known compressed size then we can skip raw bytes.
|
||||
var rawStream = Entry.Parts.First().GetRawStream();
|
||||
var part = Entry.Parts.First();
|
||||
var rawStream = part.GetRawStream();
|
||||
|
||||
if (rawStream != null)
|
||||
{
|
||||
var bytesToAdvance = Entry.CompressedSize;
|
||||
for (var i = 0; i < bytesToAdvance / skipBuffer.Length; i++)
|
||||
{
|
||||
rawStream.Read(skipBuffer, 0, skipBuffer.Length);
|
||||
}
|
||||
rawStream.Read(skipBuffer, 0, (int)(bytesToAdvance % skipBuffer.Length));
|
||||
rawStream.Skip(bytesToAdvance);
|
||||
part.Skipped = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
//don't know the size so we have to try to decompress to skip
|
||||
using (var s = OpenEntryStream())
|
||||
{
|
||||
while (s.Read(skipBuffer, 0, skipBuffer.Length) > 0)
|
||||
{
|
||||
}
|
||||
s.Skip();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,11 +29,11 @@ namespace SharpCompress.Readers.GZip
|
||||
return new GZipReader(stream, options ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion Open
|
||||
|
||||
internal override IEnumerable<GZipEntry> GetEntries(Stream stream)
|
||||
{
|
||||
return GZipEntry.GetEntries(stream);
|
||||
return GZipEntry.GetEntries(stream, Options);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,6 +8,7 @@ namespace SharpCompress.Readers
|
||||
/// Look for RarArchive (Check for self-extracting archives or cases where RarArchive isn't at the start of the file)
|
||||
/// </summary>
|
||||
public bool LookForHeader { get; set; }
|
||||
|
||||
public string Password { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -114,11 +114,11 @@ namespace SharpCompress.Readers.Tar
|
||||
return new TarReader(rewindableStream, options, CompressionType.None);
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion Open
|
||||
|
||||
internal override IEnumerable<TarEntry> GetEntries(Stream stream)
|
||||
{
|
||||
return TarEntry.GetEntries(StreamingMode.Streaming, stream, compressionType);
|
||||
return TarEntry.GetEntries(StreamingMode.Streaming, stream, compressionType, Options.ArchiveEncoding);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,13 +8,13 @@ namespace SharpCompress.Readers.Zip
|
||||
{
|
||||
public class ZipReader : AbstractReader<ZipEntry, ZipVolume>
|
||||
{
|
||||
private readonly StreamingZipHeaderFactory headerFactory;
|
||||
private readonly StreamingZipHeaderFactory _headerFactory;
|
||||
|
||||
internal ZipReader(Stream stream, ReaderOptions options)
|
||||
: base(options, ArchiveType.Zip)
|
||||
{
|
||||
Volume = new ZipVolume(stream, options);
|
||||
headerFactory = new StreamingZipHeaderFactory(options.Password);
|
||||
_headerFactory = new StreamingZipHeaderFactory(options.Password, options.ArchiveEncoding);
|
||||
}
|
||||
|
||||
public override ZipVolume Volume { get; }
|
||||
@@ -33,26 +33,26 @@ namespace SharpCompress.Readers.Zip
|
||||
return new ZipReader(stream, options ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion Open
|
||||
|
||||
internal override IEnumerable<ZipEntry> GetEntries(Stream stream)
|
||||
{
|
||||
foreach (ZipHeader h in headerFactory.ReadStreamHeader(stream))
|
||||
foreach (ZipHeader h in _headerFactory.ReadStreamHeader(stream))
|
||||
{
|
||||
if (h != null)
|
||||
{
|
||||
switch (h.ZipHeaderType)
|
||||
{
|
||||
case ZipHeaderType.LocalEntry:
|
||||
{
|
||||
yield return new ZipEntry(new StreamingZipFilePart(h as LocalEntryHeader,
|
||||
stream));
|
||||
}
|
||||
{
|
||||
yield return new ZipEntry(new StreamingZipFilePart(h as LocalEntryHeader,
|
||||
stream));
|
||||
}
|
||||
break;
|
||||
case ZipHeaderType.DirectoryEnd:
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,12 +2,11 @@
|
||||
<PropertyGroup>
|
||||
<AssemblyTitle>SharpCompress - Pure C# Decompression/Compression</AssemblyTitle>
|
||||
<NeutralLanguage>en-US</NeutralLanguage>
|
||||
<VersionPrefix>0.17.0</VersionPrefix>
|
||||
<AssemblyVersion>0.17.0.0</AssemblyVersion>
|
||||
<FileVersion>0.17.0.0</FileVersion>
|
||||
<VersionPrefix>0.19.2</VersionPrefix>
|
||||
<AssemblyVersion>0.19.2.0</AssemblyVersion>
|
||||
<FileVersion>0.19.2.0</FileVersion>
|
||||
<Authors>Adam Hathcock</Authors>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'==''">net45;net35;netstandard1.0;netstandard1.3</TargetFrameworks>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'!=''">$(LibraryFrameworks)</TargetFrameworks>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'==''">net45;net35;netstandard1.0;netstandard1.3;netstandard2.0</TargetFrameworks>
|
||||
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
|
||||
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
|
||||
<AssemblyName>SharpCompress</AssemblyName>
|
||||
@@ -15,14 +14,20 @@
|
||||
<SignAssembly>true</SignAssembly>
|
||||
<PublicSign Condition=" '$(OS)' != 'Windows_NT' ">true</PublicSign>
|
||||
<PackageId>SharpCompress</PackageId>
|
||||
<PackageTags>rar;unrar;zip;unzip;bzip2;gzip;tar;7zip</PackageTags>
|
||||
<PackageTags>rar;unrar;zip;unzip;bzip2;gzip;tar;7zip;lzip;xz</PackageTags>
|
||||
<PackageProjectUrl>https://github.com/adamhathcock/sharpcompress</PackageProjectUrl>
|
||||
<PackageLicenseUrl>https://github.com/adamhathcock/sharpcompress/blob/master/LICENSE.txt</PackageLicenseUrl>
|
||||
<GenerateAssemblyTitleAttribute>false</GenerateAssemblyTitleAttribute>
|
||||
<GenerateAssemblyProductAttribute>false</GenerateAssemblyProductAttribute>
|
||||
<Description>SharpCompress is a compression library for NET Standard 1.0 that can unrar, decompress 7zip, zip/unzip, tar/untar bzip2/unbzip2 and gzip/ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip is implemented.</Description>
|
||||
<Description>SharpCompress is a compression library for NET Standard 1.0 that can unrar, decompress 7zip, decompress xz, zip/unzip, tar/untar lzip/unlzip, bzip2/unbzip2 and gzip/ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip is implemented.</Description>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition=" '$(TargetFramework)' == 'netstandard1.0' ">
|
||||
<DefineConstants>$(DefineConstants);NO_FILE;NO_CRYPTO;SILVERLIGHT</DefineConstants>
|
||||
</PropertyGroup>
|
||||
</Project>
|
||||
<PropertyGroup Condition=" '$(TargetFramework)' == 'netstandard1.3' ">
|
||||
<DefineConstants>$(DefineConstants);NETCORE</DefineConstants>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition=" '$(TargetFramework)' == 'netstandard2.0' ">
|
||||
<DefineConstants>$(DefineConstants);NETCORE</DefineConstants>
|
||||
</PropertyGroup>
|
||||
</Project>
|
||||
@@ -2,6 +2,9 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
#if NETCORE
|
||||
using SharpCompress.Buffers;
|
||||
#endif
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress
|
||||
@@ -138,37 +141,61 @@ namespace SharpCompress
|
||||
|
||||
public static void Skip(this Stream source, long advanceAmount)
|
||||
{
|
||||
byte[] buffer = new byte[32 * 1024];
|
||||
int read = 0;
|
||||
int readCount = 0;
|
||||
do
|
||||
if (source.CanSeek)
|
||||
{
|
||||
readCount = buffer.Length;
|
||||
if (readCount > advanceAmount)
|
||||
{
|
||||
readCount = (int)advanceAmount;
|
||||
}
|
||||
read = source.Read(buffer, 0, readCount);
|
||||
if (read <= 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
advanceAmount -= read;
|
||||
if (advanceAmount == 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
source.Position += advanceAmount;
|
||||
return;
|
||||
}
|
||||
|
||||
byte[] buffer = GetTransferByteArray();
|
||||
try
|
||||
{
|
||||
int read = 0;
|
||||
int readCount = 0;
|
||||
do
|
||||
{
|
||||
readCount = buffer.Length;
|
||||
if (readCount > advanceAmount)
|
||||
{
|
||||
readCount = (int)advanceAmount;
|
||||
}
|
||||
read = source.Read(buffer, 0, readCount);
|
||||
if (read <= 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
advanceAmount -= read;
|
||||
if (advanceAmount == 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
while (true);
|
||||
}
|
||||
finally
|
||||
{
|
||||
#if NETCORE
|
||||
ArrayPool<byte>.Shared.Return(buffer);
|
||||
#endif
|
||||
}
|
||||
while (true);
|
||||
}
|
||||
|
||||
public static void SkipAll(this Stream source)
|
||||
public static void Skip(this Stream source)
|
||||
{
|
||||
byte[] buffer = new byte[32 * 1024];
|
||||
do
|
||||
byte[] buffer = GetTransferByteArray();
|
||||
try
|
||||
{
|
||||
do
|
||||
{
|
||||
}
|
||||
while (source.Read(buffer, 0, buffer.Length) == buffer.Length);
|
||||
}
|
||||
finally
|
||||
{
|
||||
#if NETCORE
|
||||
ArrayPool<byte>.Shared.Return(buffer);
|
||||
#endif
|
||||
}
|
||||
while (source.Read(buffer, 0, buffer.Length) == buffer.Length);
|
||||
}
|
||||
|
||||
public static DateTime DosDateToDateTime(UInt16 iDate, UInt16 iTime)
|
||||
@@ -233,30 +260,48 @@ namespace SharpCompress
|
||||
public static long TransferTo(this Stream source, Stream destination)
|
||||
{
|
||||
byte[] array = GetTransferByteArray();
|
||||
int count;
|
||||
long total = 0;
|
||||
while (ReadTransferBlock(source, array, out count))
|
||||
try
|
||||
{
|
||||
total += count;
|
||||
destination.Write(array, 0, count);
|
||||
int count;
|
||||
long total = 0;
|
||||
while (ReadTransferBlock(source, array, out count))
|
||||
{
|
||||
total += count;
|
||||
destination.Write(array, 0, count);
|
||||
}
|
||||
return total;
|
||||
}
|
||||
finally
|
||||
{
|
||||
#if NETCORE
|
||||
ArrayPool<byte>.Shared.Return(array);
|
||||
#endif
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
public static long TransferTo(this Stream source, Stream destination, Common.Entry entry, IReaderExtractionListener readerExtractionListener)
|
||||
{
|
||||
byte[] array = GetTransferByteArray();
|
||||
int count;
|
||||
var iterations = 0;
|
||||
long total = 0;
|
||||
while (ReadTransferBlock(source, array, out count))
|
||||
try
|
||||
{
|
||||
total += count;
|
||||
destination.Write(array, 0, count);
|
||||
iterations++;
|
||||
readerExtractionListener.FireEntryExtractionProgress(entry, total, iterations);
|
||||
int count;
|
||||
var iterations = 0;
|
||||
long total = 0;
|
||||
while (ReadTransferBlock(source, array, out count))
|
||||
{
|
||||
total += count;
|
||||
destination.Write(array, 0, count);
|
||||
iterations++;
|
||||
readerExtractionListener.FireEntryExtractionProgress(entry, total, iterations);
|
||||
}
|
||||
return total;
|
||||
}
|
||||
finally
|
||||
{
|
||||
#if NETCORE
|
||||
ArrayPool<byte>.Shared.Return(array);
|
||||
#endif
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
private static bool ReadTransferBlock(Stream source, byte[] array, out int count)
|
||||
@@ -266,7 +311,11 @@ namespace SharpCompress
|
||||
|
||||
private static byte[] GetTransferByteArray()
|
||||
{
|
||||
#if NETCORE
|
||||
return ArrayPool<byte>.Shared.Rent(81920);
|
||||
#else
|
||||
return new byte[81920];
|
||||
#endif
|
||||
}
|
||||
|
||||
public static bool ReadFully(this Stream stream, byte[] buffer)
|
||||
|
||||
@@ -6,29 +6,30 @@ namespace SharpCompress.Writers
|
||||
{
|
||||
public abstract class AbstractWriter : IWriter
|
||||
{
|
||||
private bool closeStream;
|
||||
private bool isDisposed;
|
||||
|
||||
protected AbstractWriter(ArchiveType type)
|
||||
protected AbstractWriter(ArchiveType type, WriterOptions writerOptions)
|
||||
{
|
||||
WriterType = type;
|
||||
WriterOptions = writerOptions;
|
||||
}
|
||||
|
||||
protected void InitalizeStream(Stream stream, bool closeStream)
|
||||
protected void InitalizeStream(Stream stream)
|
||||
{
|
||||
OutputStream = stream;
|
||||
this.closeStream = closeStream;
|
||||
}
|
||||
|
||||
protected Stream OutputStream { get; private set; }
|
||||
|
||||
public ArchiveType WriterType { get; }
|
||||
|
||||
protected WriterOptions WriterOptions { get; }
|
||||
|
||||
public abstract void Write(string filename, Stream source, DateTime? modificationTime);
|
||||
|
||||
protected virtual void Dispose(bool isDisposing)
|
||||
{
|
||||
if (isDisposing && closeStream)
|
||||
if (isDisposing && !WriterOptions.LeaveStreamOpen)
|
||||
{
|
||||
OutputStream.Dispose();
|
||||
}
|
||||
|
||||
@@ -8,12 +8,15 @@ namespace SharpCompress.Writers.GZip
|
||||
{
|
||||
public class GZipWriter : AbstractWriter
|
||||
{
|
||||
private bool wroteToStream;
|
||||
private bool _wroteToStream;
|
||||
|
||||
public GZipWriter(Stream destination, bool leaveOpen = false)
|
||||
: base(ArchiveType.GZip)
|
||||
public GZipWriter(Stream destination, GZipWriterOptions options = null)
|
||||
: base(ArchiveType.GZip, options ?? new GZipWriterOptions())
|
||||
{
|
||||
InitalizeStream(new GZipStream(destination, CompressionMode.Compress, leaveOpen), !leaveOpen);
|
||||
InitalizeStream(new GZipStream(destination, CompressionMode.Compress,
|
||||
options?.CompressionLevel ?? CompressionLevel.Default,
|
||||
WriterOptions.LeaveStreamOpen,
|
||||
WriterOptions.ArchiveEncoding.GetEncoding()));
|
||||
}
|
||||
|
||||
protected override void Dispose(bool isDisposing)
|
||||
@@ -28,7 +31,7 @@ namespace SharpCompress.Writers.GZip
|
||||
|
||||
public override void Write(string filename, Stream source, DateTime? modificationTime)
|
||||
{
|
||||
if (wroteToStream)
|
||||
if (_wroteToStream)
|
||||
{
|
||||
throw new ArgumentException("Can only write a single stream to a GZip file.");
|
||||
}
|
||||
@@ -36,7 +39,7 @@ namespace SharpCompress.Writers.GZip
|
||||
stream.FileName = filename;
|
||||
stream.LastModified = modificationTime;
|
||||
source.TransferTo(stream);
|
||||
wroteToStream = true;
|
||||
_wroteToStream = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
28
src/SharpCompress/Writers/GZip/GZipWriterOptions.cs
Normal file
28
src/SharpCompress/Writers/GZip/GZipWriterOptions.cs
Normal file
@@ -0,0 +1,28 @@
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
|
||||
namespace SharpCompress.Writers.GZip
|
||||
{
|
||||
public class GZipWriterOptions : WriterOptions
|
||||
{
|
||||
public GZipWriterOptions()
|
||||
: base(CompressionType.GZip)
|
||||
{
|
||||
}
|
||||
|
||||
internal GZipWriterOptions(WriterOptions options)
|
||||
: base(options.CompressionType)
|
||||
{
|
||||
LeaveStreamOpen = options.LeaveStreamOpen;
|
||||
ArchiveEncoding = options.ArchiveEncoding;
|
||||
|
||||
var writerOptions = options as GZipWriterOptions;
|
||||
if (writerOptions != null)
|
||||
{
|
||||
CompressionLevel = writerOptions.CompressionLevel;
|
||||
}
|
||||
}
|
||||
|
||||
public CompressionLevel CompressionLevel { get; set; } = CompressionLevel.Default;
|
||||
}
|
||||
}
|
||||
@@ -11,9 +11,13 @@ namespace SharpCompress.Writers.Tar
|
||||
{
|
||||
public class TarWriter : AbstractWriter
|
||||
{
|
||||
public TarWriter(Stream destination, WriterOptions options)
|
||||
: base(ArchiveType.Tar)
|
||||
private bool finalizeArchiveOnClose;
|
||||
|
||||
public TarWriter(Stream destination, TarWriterOptions options)
|
||||
: base(ArchiveType.Tar, options)
|
||||
{
|
||||
finalizeArchiveOnClose = options.FinalizeArchiveOnClose;
|
||||
|
||||
if (!destination.CanWrite)
|
||||
{
|
||||
throw new ArgumentException("Tars require writable streams.");
|
||||
@@ -42,7 +46,7 @@ namespace SharpCompress.Writers.Tar
|
||||
throw new InvalidFormatException("Tar does not support compression: " + options.CompressionType);
|
||||
}
|
||||
}
|
||||
InitalizeStream(destination, true);
|
||||
InitalizeStream(destination);
|
||||
}
|
||||
|
||||
public override void Write(string filename, Stream source, DateTime? modificationTime)
|
||||
@@ -72,7 +76,8 @@ namespace SharpCompress.Writers.Tar
|
||||
|
||||
long realSize = size ?? source.Length;
|
||||
|
||||
TarHeader header = new TarHeader();
|
||||
TarHeader header = new TarHeader(WriterOptions.ArchiveEncoding);
|
||||
|
||||
header.LastModifiedTime = modificationTime ?? TarHeader.Epoch;
|
||||
header.Name = NormalizeFilename(filename);
|
||||
header.Size = realSize;
|
||||
@@ -96,8 +101,10 @@ namespace SharpCompress.Writers.Tar
|
||||
{
|
||||
if (isDisposing)
|
||||
{
|
||||
PadTo512(0, true);
|
||||
PadTo512(0, true);
|
||||
if (finalizeArchiveOnClose) {
|
||||
PadTo512(0, true);
|
||||
PadTo512(0, true);
|
||||
}
|
||||
switch (OutputStream)
|
||||
{
|
||||
case BZip2Stream b:
|
||||
|
||||
23
src/SharpCompress/Writers/Tar/TarWriterOptions.cs
Executable file
23
src/SharpCompress/Writers/Tar/TarWriterOptions.cs
Executable file
@@ -0,0 +1,23 @@
|
||||
using SharpCompress.Archives;
|
||||
using SharpCompress.Common;
|
||||
|
||||
namespace SharpCompress.Writers.Tar
|
||||
{
|
||||
public class TarWriterOptions : WriterOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Indicates if archive should be finalized (by 2 empty blocks) on close.
|
||||
/// </summary>
|
||||
public bool FinalizeArchiveOnClose { get; }
|
||||
|
||||
public TarWriterOptions(CompressionType compressionType, bool finalizeArchiveOnClose)
|
||||
: base(compressionType)
|
||||
{
|
||||
FinalizeArchiveOnClose = finalizeArchiveOnClose;
|
||||
}
|
||||
|
||||
internal TarWriterOptions(WriterOptions options) : this(options.CompressionType, true)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,7 +19,7 @@ namespace SharpCompress.Writers
|
||||
{
|
||||
throw new InvalidFormatException("GZip archives only support GZip compression type.");
|
||||
}
|
||||
return new GZipWriter(stream, writerOptions.LeaveStreamOpen);
|
||||
return new GZipWriter(stream, new GZipWriterOptions(writerOptions));
|
||||
}
|
||||
case ArchiveType.Zip:
|
||||
{
|
||||
@@ -27,7 +27,7 @@ namespace SharpCompress.Writers
|
||||
}
|
||||
case ArchiveType.Tar:
|
||||
{
|
||||
return new TarWriter(stream, writerOptions);
|
||||
return new TarWriter(stream, new TarWriterOptions(writerOptions));
|
||||
}
|
||||
default:
|
||||
{
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Zip;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.Converters;
|
||||
@@ -11,14 +12,16 @@ namespace SharpCompress.Writers.Zip
|
||||
{
|
||||
private readonly ZipCompressionMethod compression;
|
||||
private readonly string fileName;
|
||||
private readonly ArchiveEncoding archiveEncoding;
|
||||
|
||||
public ZipCentralDirectoryEntry(ZipCompressionMethod compression, string fileName, ulong headerOffset)
|
||||
public ZipCentralDirectoryEntry(ZipCompressionMethod compression, string fileName, ulong headerOffset, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
this.compression = compression;
|
||||
this.fileName = fileName;
|
||||
HeaderOffset = headerOffset;
|
||||
this.archiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
|
||||
internal DateTime? ModificationTime { get; set; }
|
||||
internal string Comment { get; set; }
|
||||
internal uint Crc { get; set; }
|
||||
@@ -29,11 +32,11 @@ namespace SharpCompress.Writers.Zip
|
||||
|
||||
internal uint Write(Stream outputStream)
|
||||
{
|
||||
byte[] encodedFilename = Encoding.UTF8.GetBytes(fileName);
|
||||
byte[] encodedComment = Encoding.UTF8.GetBytes(Comment);
|
||||
byte[] encodedFilename = archiveEncoding.Encode(fileName);
|
||||
byte[] encodedComment = archiveEncoding.Encode(Comment);
|
||||
|
||||
var zip64_stream = Compressed >= uint.MaxValue || Decompressed >= uint.MaxValue;
|
||||
var zip64 = zip64_stream || HeaderOffset >= uint.MaxValue || Zip64HeaderOffset != 0;
|
||||
var zip64_stream = Compressed >= uint.MaxValue || Decompressed >= uint.MaxValue;
|
||||
var zip64 = zip64_stream || HeaderOffset >= uint.MaxValue || Zip64HeaderOffset != 0;
|
||||
|
||||
var compressedvalue = zip64 ? uint.MaxValue : (uint)Compressed;
|
||||
var decompressedvalue = zip64 ? uint.MaxValue : (uint)Decompressed;
|
||||
@@ -41,18 +44,18 @@ namespace SharpCompress.Writers.Zip
|
||||
var extralength = zip64 ? (2 + 2 + 8 + 8 + 8 + 4) : 0;
|
||||
var version = (byte)(zip64 ? 45 : 20); // Version 20 required for deflate/encryption
|
||||
|
||||
HeaderFlags flags = HeaderFlags.UTF8;
|
||||
HeaderFlags flags = Equals(archiveEncoding.GetEncoding(), Encoding.UTF8) ? HeaderFlags.UTF8 : HeaderFlags.None;
|
||||
if (!outputStream.CanSeek)
|
||||
{
|
||||
// Cannot use data descriptors with zip64:
|
||||
// https://blogs.oracle.com/xuemingshen/entry/is_zipinput_outputstream_handling_of
|
||||
|
||||
// We check that streams are not written too large in the ZipWritingStream,
|
||||
// so this extra guard is not required, but kept to simplify changing the code
|
||||
// once the zip64 post-data issue is resolved
|
||||
// We check that streams are not written too large in the ZipWritingStream,
|
||||
// so this extra guard is not required, but kept to simplify changing the code
|
||||
// once the zip64 post-data issue is resolved
|
||||
if (!zip64_stream)
|
||||
flags |= HeaderFlags.UsePostDataDescriptor;
|
||||
|
||||
|
||||
if (compression == ZipCompressionMethod.LZMA)
|
||||
{
|
||||
flags |= HeaderFlags.Bit1; // eos marker
|
||||
|
||||
@@ -26,7 +26,7 @@ namespace SharpCompress.Writers.Zip
|
||||
private readonly bool isZip64;
|
||||
|
||||
public ZipWriter(Stream destination, ZipWriterOptions zipWriterOptions)
|
||||
: base(ArchiveType.Zip)
|
||||
: base(ArchiveType.Zip, zipWriterOptions)
|
||||
{
|
||||
zipComment = zipWriterOptions.ArchiveComment ?? string.Empty;
|
||||
isZip64 = zipWriterOptions.UseZip64;
|
||||
@@ -37,7 +37,7 @@ namespace SharpCompress.Writers.Zip
|
||||
|
||||
compressionType = zipWriterOptions.CompressionType;
|
||||
compressionLevel = zipWriterOptions.DeflateCompressionLevel;
|
||||
InitalizeStream(destination, !zipWriterOptions.LeaveStreamOpen);
|
||||
InitalizeStream(destination);
|
||||
}
|
||||
|
||||
private PpmdProperties PpmdProperties
|
||||
@@ -65,6 +65,7 @@ namespace SharpCompress.Writers.Zip
|
||||
}
|
||||
base.Dispose(isDisposing);
|
||||
}
|
||||
|
||||
private static ZipCompressionMethod ToZipCompressionMethod(CompressionType compressionType)
|
||||
{
|
||||
switch (compressionType)
|
||||
@@ -97,9 +98,9 @@ namespace SharpCompress.Writers.Zip
|
||||
public override void Write(string entryPath, Stream source, DateTime? modificationTime)
|
||||
{
|
||||
Write(entryPath, source, new ZipWriterEntryOptions()
|
||||
{
|
||||
ModificationDateTime = modificationTime
|
||||
});
|
||||
{
|
||||
ModificationDateTime = modificationTime
|
||||
});
|
||||
}
|
||||
|
||||
public void Write(string entryPath, Stream source, ZipWriterEntryOptions zipWriterEntryOptions)
|
||||
@@ -117,11 +118,11 @@ namespace SharpCompress.Writers.Zip
|
||||
entryPath = NormalizeFilename(entryPath);
|
||||
options.ModificationDateTime = options.ModificationDateTime ?? DateTime.Now;
|
||||
options.EntryComment = options.EntryComment ?? string.Empty;
|
||||
var entry = new ZipCentralDirectoryEntry(compression, entryPath, (ulong)streamPosition)
|
||||
{
|
||||
Comment = options.EntryComment,
|
||||
ModificationTime = options.ModificationDateTime
|
||||
};
|
||||
var entry = new ZipCentralDirectoryEntry(compression, entryPath, (ulong)streamPosition, WriterOptions.ArchiveEncoding)
|
||||
{
|
||||
Comment = options.EntryComment,
|
||||
ModificationTime = options.ModificationDateTime
|
||||
};
|
||||
|
||||
// Use the archive default setting for zip64 and allow overrides
|
||||
var useZip64 = isZip64;
|
||||
@@ -130,7 +131,7 @@ namespace SharpCompress.Writers.Zip
|
||||
|
||||
var headersize = (uint)WriteHeader(entryPath, options, entry, useZip64);
|
||||
streamPosition += headersize;
|
||||
return new ZipWritingStream(this, OutputStream, entry, compression,
|
||||
return new ZipWritingStream(this, OutputStream, entry, compression,
|
||||
options.DeflateCompressionLevel ?? compressionLevel);
|
||||
}
|
||||
|
||||
@@ -149,12 +150,12 @@ namespace SharpCompress.Writers.Zip
|
||||
|
||||
private int WriteHeader(string filename, ZipWriterEntryOptions zipWriterEntryOptions, ZipCentralDirectoryEntry entry, bool useZip64)
|
||||
{
|
||||
// We err on the side of caution until the zip specification clarifies how to support this
|
||||
if (!OutputStream.CanSeek && useZip64)
|
||||
throw new NotSupportedException("Zip64 extensions are not supported on non-seekable streams");
|
||||
// We err on the side of caution until the zip specification clarifies how to support this
|
||||
if (!OutputStream.CanSeek && useZip64)
|
||||
throw new NotSupportedException("Zip64 extensions are not supported on non-seekable streams");
|
||||
|
||||
var explicitZipCompressionInfo = ToZipCompressionMethod(zipWriterEntryOptions.CompressionType ?? compressionType);
|
||||
byte[] encodedFilename = ArchiveEncoding.Default.GetBytes(filename);
|
||||
byte[] encodedFilename = WriterOptions.ArchiveEncoding.Encode(filename);
|
||||
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes(ZipHeaderFactory.ENTRY_HEADER_BYTES), 0, 4);
|
||||
if (explicitZipCompressionInfo == ZipCompressionMethod.Deflate)
|
||||
@@ -162,17 +163,17 @@ namespace SharpCompress.Writers.Zip
|
||||
if (OutputStream.CanSeek && useZip64)
|
||||
OutputStream.Write(new byte[] { 45, 0 }, 0, 2); //smallest allowed version for zip64
|
||||
else
|
||||
OutputStream.Write(new byte[] { 20, 0 }, 0, 2); //older version which is more compatible
|
||||
OutputStream.Write(new byte[] { 20, 0 }, 0, 2); //older version which is more compatible
|
||||
}
|
||||
else
|
||||
{
|
||||
OutputStream.Write(new byte[] { 63, 0 }, 0, 2); //version says we used PPMd or LZMA
|
||||
}
|
||||
HeaderFlags flags = ArchiveEncoding.Default == Encoding.UTF8 ? HeaderFlags.UTF8 : 0;
|
||||
HeaderFlags flags = Equals(WriterOptions.ArchiveEncoding.GetEncoding(), Encoding.UTF8) ? HeaderFlags.UTF8 : 0;
|
||||
if (!OutputStream.CanSeek)
|
||||
{
|
||||
flags |= HeaderFlags.UsePostDataDescriptor;
|
||||
|
||||
|
||||
if (explicitZipCompressionInfo == ZipCompressionMethod.LZMA)
|
||||
{
|
||||
flags |= HeaderFlags.Bit1; // eos marker
|
||||
@@ -213,11 +214,11 @@ namespace SharpCompress.Writers.Zip
|
||||
|
||||
private void WriteEndRecord(ulong size)
|
||||
{
|
||||
byte[] encodedComment = ArchiveEncoding.Default.GetBytes(zipComment);
|
||||
byte[] encodedComment = WriterOptions.ArchiveEncoding.Encode(zipComment);
|
||||
var zip64 = isZip64 || entries.Count > ushort.MaxValue || streamPosition >= uint.MaxValue || size >= uint.MaxValue;
|
||||
|
||||
var sizevalue = size >= uint.MaxValue ? uint.MaxValue : (uint)size;
|
||||
var streampositionvalue = streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)streamPosition;
|
||||
var streampositionvalue = streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)streamPosition;
|
||||
|
||||
if (zip64)
|
||||
{
|
||||
@@ -250,7 +251,7 @@ namespace SharpCompress.Writers.Zip
|
||||
}
|
||||
|
||||
// Write normal end of central directory record
|
||||
OutputStream.Write(new byte[] {80, 75, 5, 6, 0, 0, 0, 0}, 0, 8);
|
||||
OutputStream.Write(new byte[] { 80, 75, 5, 6, 0, 0, 0, 0 }, 0, 8);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)entries.Count), 0, 2);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)entries.Count), 0, 2);
|
||||
OutputStream.Write(DataConverter.LittleEndian.GetBytes(sizevalue), 0, 4);
|
||||
@@ -273,10 +274,11 @@ namespace SharpCompress.Writers.Zip
|
||||
private CountingWritableSubStream counting;
|
||||
private ulong decompressed;
|
||||
|
||||
// Flag to prevent throwing exceptions on Dispose
|
||||
private bool limitsExceeded;
|
||||
// Flag to prevent throwing exceptions on Dispose
|
||||
private bool limitsExceeded;
|
||||
private bool isDisposed;
|
||||
|
||||
internal ZipWritingStream(ZipWriter writer, Stream originalStream, ZipCentralDirectoryEntry entry,
|
||||
internal ZipWritingStream(ZipWriter writer, Stream originalStream, ZipCentralDirectoryEntry entry,
|
||||
ZipCompressionMethod zipCompressionMethod, CompressionLevel compressionLevel)
|
||||
{
|
||||
this.writer = writer;
|
||||
@@ -305,108 +307,115 @@ namespace SharpCompress.Writers.Zip
|
||||
switch (zipCompressionMethod)
|
||||
{
|
||||
case ZipCompressionMethod.None:
|
||||
{
|
||||
return output;
|
||||
}
|
||||
{
|
||||
return output;
|
||||
}
|
||||
case ZipCompressionMethod.Deflate:
|
||||
{
|
||||
return new DeflateStream(counting, CompressionMode.Compress, compressionLevel,
|
||||
true);
|
||||
}
|
||||
{
|
||||
return new DeflateStream(counting, CompressionMode.Compress, compressionLevel,
|
||||
true);
|
||||
}
|
||||
case ZipCompressionMethod.BZip2:
|
||||
{
|
||||
return new BZip2Stream(counting, CompressionMode.Compress, true);
|
||||
}
|
||||
{
|
||||
return new BZip2Stream(counting, CompressionMode.Compress, true);
|
||||
}
|
||||
case ZipCompressionMethod.LZMA:
|
||||
{
|
||||
counting.WriteByte(9);
|
||||
counting.WriteByte(20);
|
||||
counting.WriteByte(5);
|
||||
counting.WriteByte(0);
|
||||
{
|
||||
counting.WriteByte(9);
|
||||
counting.WriteByte(20);
|
||||
counting.WriteByte(5);
|
||||
counting.WriteByte(0);
|
||||
|
||||
LzmaStream lzmaStream = new LzmaStream(new LzmaEncoderProperties(!originalStream.CanSeek),
|
||||
false, counting);
|
||||
counting.Write(lzmaStream.Properties, 0, lzmaStream.Properties.Length);
|
||||
return lzmaStream;
|
||||
}
|
||||
LzmaStream lzmaStream = new LzmaStream(new LzmaEncoderProperties(!originalStream.CanSeek),
|
||||
false, counting);
|
||||
counting.Write(lzmaStream.Properties, 0, lzmaStream.Properties.Length);
|
||||
return lzmaStream;
|
||||
}
|
||||
case ZipCompressionMethod.PPMd:
|
||||
{
|
||||
counting.Write(writer.PpmdProperties.Properties, 0, 2);
|
||||
return new PpmdStream(writer.PpmdProperties, counting, true);
|
||||
}
|
||||
{
|
||||
counting.Write(writer.PpmdProperties.Properties, 0, 2);
|
||||
return new PpmdStream(writer.PpmdProperties, counting, true);
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw new NotSupportedException("CompressionMethod: " + zipCompressionMethod);
|
||||
}
|
||||
{
|
||||
throw new NotSupportedException("CompressionMethod: " + zipCompressionMethod);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
isDisposed = true;
|
||||
|
||||
base.Dispose(disposing);
|
||||
if (disposing)
|
||||
{
|
||||
writeStream.Dispose();
|
||||
|
||||
if (limitsExceeded)
|
||||
{
|
||||
// We have written invalid data into the archive,
|
||||
// so we destroy it now, instead of allowing the user to continue
|
||||
// with a defunct archive
|
||||
originalStream.Dispose();
|
||||
return;
|
||||
}
|
||||
if (limitsExceeded)
|
||||
{
|
||||
// We have written invalid data into the archive,
|
||||
// so we destroy it now, instead of allowing the user to continue
|
||||
// with a defunct archive
|
||||
originalStream.Dispose();
|
||||
return;
|
||||
}
|
||||
|
||||
entry.Crc = (uint)crc.Crc32Result;
|
||||
entry.Compressed = counting.Count;
|
||||
entry.Decompressed = decompressed;
|
||||
|
||||
var zip64 = entry.Compressed >= uint.MaxValue || entry.Decompressed >= uint.MaxValue;
|
||||
var compressedvalue = zip64 ? uint.MaxValue : (uint)counting.Count;
|
||||
var decompressedvalue = zip64 ? uint.MaxValue : (uint)entry.Decompressed;
|
||||
var compressedvalue = zip64 ? uint.MaxValue : (uint)counting.Count;
|
||||
var decompressedvalue = zip64 ? uint.MaxValue : (uint)entry.Decompressed;
|
||||
|
||||
if (originalStream.CanSeek)
|
||||
{
|
||||
originalStream.Position = (long)(entry.HeaderOffset + 6);
|
||||
originalStream.WriteByte(0);
|
||||
|
||||
|
||||
originalStream.Position = (long)(entry.HeaderOffset + 14);
|
||||
|
||||
writer.WriteFooter(entry.Crc, compressedvalue, decompressedvalue);
|
||||
|
||||
// Ideally, we should not throw from Dispose()
|
||||
// We should not get here as the Write call checks the limits
|
||||
if (zip64 && entry.Zip64HeaderOffset == 0)
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
// Ideally, we should not throw from Dispose()
|
||||
// We should not get here as the Write call checks the limits
|
||||
if (zip64 && entry.Zip64HeaderOffset == 0)
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
|
||||
// If we have pre-allocated space for zip64 data,
|
||||
// fill it out, even if it is not required
|
||||
if (entry.Zip64HeaderOffset != 0)
|
||||
{
|
||||
originalStream.Position = (long)(entry.HeaderOffset + entry.Zip64HeaderOffset);
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes((ushort)0x0001), 0, 2);
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes((ushort)(8 + 8)), 0, 2);
|
||||
// If we have pre-allocated space for zip64 data,
|
||||
// fill it out, even if it is not required
|
||||
if (entry.Zip64HeaderOffset != 0)
|
||||
{
|
||||
originalStream.Position = (long)(entry.HeaderOffset + entry.Zip64HeaderOffset);
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes((ushort)0x0001), 0, 2);
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes((ushort)(8 + 8)), 0, 2);
|
||||
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes(entry.Decompressed), 0, 8);
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes(entry.Compressed), 0, 8);
|
||||
}
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes(entry.Decompressed), 0, 8);
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes(entry.Compressed), 0, 8);
|
||||
}
|
||||
|
||||
originalStream.Position = writer.streamPosition + (long)entry.Compressed;
|
||||
writer.streamPosition += (long)entry.Compressed;
|
||||
}
|
||||
else
|
||||
{
|
||||
// We have a streaming archive, so we should add a post-data-descriptor,
|
||||
// but we cannot as it does not hold the zip64 values
|
||||
// Throwing an exception until the zip specification is clarified
|
||||
// We have a streaming archive, so we should add a post-data-descriptor,
|
||||
// but we cannot as it does not hold the zip64 values
|
||||
// Throwing an exception until the zip specification is clarified
|
||||
|
||||
// Ideally, we should not throw from Dispose()
|
||||
// We should not get here as the Write call checks the limits
|
||||
if (zip64)
|
||||
throw new NotSupportedException("Streams larger than 4GiB are not supported for non-seekable streams");
|
||||
// Ideally, we should not throw from Dispose()
|
||||
// We should not get here as the Write call checks the limits
|
||||
if (zip64)
|
||||
throw new NotSupportedException("Streams larger than 4GiB are not supported for non-seekable streams");
|
||||
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes(ZipHeaderFactory.POST_DATA_DESCRIPTOR), 0, 4);
|
||||
writer.WriteFooter(entry.Crc,
|
||||
originalStream.Write(DataConverter.LittleEndian.GetBytes(ZipHeaderFactory.POST_DATA_DESCRIPTOR), 0, 4);
|
||||
writer.WriteFooter(entry.Crc,
|
||||
(uint)compressedvalue,
|
||||
(uint)decompressedvalue);
|
||||
writer.streamPosition += (long)entry.Compressed + 16;
|
||||
@@ -437,36 +446,35 @@ namespace SharpCompress.Writers.Zip
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
// We check the limits first, because we can keep the archive consistent
|
||||
// if we can prevent the writes from happening
|
||||
if (entry.Zip64HeaderOffset == 0)
|
||||
{
|
||||
// Pre-check, the counting.Count is not exact, as we do not know the size before having actually compressed it
|
||||
if (limitsExceeded || ((decompressed + (uint)count) > uint.MaxValue) || (counting.Count + (uint)count) > uint.MaxValue)
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
}
|
||||
// We check the limits first, because we can keep the archive consistent
|
||||
// if we can prevent the writes from happening
|
||||
if (entry.Zip64HeaderOffset == 0)
|
||||
{
|
||||
// Pre-check, the counting.Count is not exact, as we do not know the size before having actually compressed it
|
||||
if (limitsExceeded || ((decompressed + (uint)count) > uint.MaxValue) || (counting.Count + (uint)count) > uint.MaxValue)
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
}
|
||||
|
||||
decompressed += (uint)count;
|
||||
crc.SlurpBlock(buffer, offset, count);
|
||||
writeStream.Write(buffer, offset, count);
|
||||
|
||||
if (entry.Zip64HeaderOffset == 0)
|
||||
{
|
||||
// Post-check, this is accurate
|
||||
if ((decompressed > uint.MaxValue) || counting.Count > uint.MaxValue)
|
||||
{
|
||||
// We have written the data, so the archive is now broken
|
||||
// Throwing the exception here, allows us to avoid
|
||||
// throwing an exception in Dispose() which is discouraged
|
||||
// as it can mask other errors
|
||||
limitsExceeded = true;
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
}
|
||||
}
|
||||
|
||||
if (entry.Zip64HeaderOffset == 0)
|
||||
{
|
||||
// Post-check, this is accurate
|
||||
if ((decompressed > uint.MaxValue) || counting.Count > uint.MaxValue)
|
||||
{
|
||||
// We have written the data, so the archive is now broken
|
||||
// Throwing the exception here, allows us to avoid
|
||||
// throwing an exception in Dispose() which is discouraged
|
||||
// as it can mask other errors
|
||||
limitsExceeded = true;
|
||||
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endregion
|
||||
#endregion Nested type: ZipWritingStream
|
||||
}
|
||||
}
|
||||
@@ -15,8 +15,15 @@ namespace SharpCompress.Writers.Zip
|
||||
: base(options.CompressionType)
|
||||
{
|
||||
LeaveStreamOpen = options.LeaveStreamOpen;
|
||||
if (options is ZipWriterOptions)
|
||||
UseZip64 = ((ZipWriterOptions)options).UseZip64;
|
||||
ArchiveEncoding = options.ArchiveEncoding;
|
||||
|
||||
var writerOptions = options as ZipWriterOptions;
|
||||
if (writerOptions != null)
|
||||
{
|
||||
UseZip64 = writerOptions.UseZip64;
|
||||
DeflateCompressionLevel = writerOptions.DeflateCompressionLevel;
|
||||
ArchiveComment = writerOptions.ArchiveComment;
|
||||
}
|
||||
}
|
||||
/// <summary>
|
||||
/// When CompressionType.Deflate is used, this property is referenced. Defaults to CompressionLevel.Default.
|
||||
|
||||
@@ -40,7 +40,7 @@ namespace SharpCompress.Test
|
||||
foreach (var entry in archive.Entries.Where(entry => !entry.IsDirectory))
|
||||
{
|
||||
entry.WriteToDirectory(SCRATCH_FILES_PATH,
|
||||
new ExtractionOptions()
|
||||
new ExtractionOptions
|
||||
{
|
||||
ExtractFullPath = true,
|
||||
Overwrite = true
|
||||
@@ -51,24 +51,24 @@ namespace SharpCompress.Test
|
||||
}
|
||||
}
|
||||
|
||||
protected void ArchiveStreamRead(string testArchive)
|
||||
protected void ArchiveStreamRead(string testArchive, ReaderOptions readerOptions = null)
|
||||
{
|
||||
testArchive = Path.Combine(TEST_ARCHIVES_PATH, testArchive);
|
||||
ArchiveStreamRead(testArchive.AsEnumerable());
|
||||
ArchiveStreamRead(readerOptions, testArchive.AsEnumerable());
|
||||
}
|
||||
|
||||
protected void ArchiveStreamRead(params string[] testArchives)
|
||||
protected void ArchiveStreamRead(ReaderOptions readerOptions = null, params string[] testArchives)
|
||||
{
|
||||
ArchiveStreamRead(testArchives.Select(x => Path.Combine(TEST_ARCHIVES_PATH, x)));
|
||||
ArchiveStreamRead(readerOptions, testArchives.Select(x => Path.Combine(TEST_ARCHIVES_PATH, x)));
|
||||
}
|
||||
|
||||
protected void ArchiveStreamRead(IEnumerable<string> testArchives)
|
||||
protected void ArchiveStreamRead(ReaderOptions readerOptions, IEnumerable<string> testArchives)
|
||||
{
|
||||
foreach (var path in testArchives)
|
||||
{
|
||||
ResetScratch();
|
||||
using (Stream stream = File.OpenRead(path))
|
||||
using (var archive = ArchiveFactory.Open(stream))
|
||||
using (var archive = ArchiveFactory.Open(stream, readerOptions))
|
||||
{
|
||||
foreach (var entry in archive.Entries.Where(entry => !entry.IsDirectory))
|
||||
{
|
||||
@@ -83,17 +83,17 @@ namespace SharpCompress.Test
|
||||
}
|
||||
}
|
||||
|
||||
protected void ArchiveFileRead(string testArchive)
|
||||
protected void ArchiveFileRead(string testArchive, ReaderOptions readerOptions = null)
|
||||
{
|
||||
testArchive = Path.Combine(TEST_ARCHIVES_PATH, testArchive);
|
||||
ArchiveFileRead(testArchive.AsEnumerable());
|
||||
ArchiveFileRead(testArchive.AsEnumerable(), readerOptions);
|
||||
}
|
||||
protected void ArchiveFileRead(IEnumerable<string> testArchives)
|
||||
protected void ArchiveFileRead(IEnumerable<string> testArchives, ReaderOptions readerOptions = null)
|
||||
{
|
||||
foreach (var path in testArchives)
|
||||
{
|
||||
ResetScratch();
|
||||
using (var archive = ArchiveFactory.Open(path))
|
||||
using (var archive = ArchiveFactory.Open(path, readerOptions))
|
||||
{
|
||||
//archive.EntryExtractionBegin += archive_EntryExtractionBegin;
|
||||
//archive.FilePartExtractionBegin += archive_FilePartExtractionBegin;
|
||||
|
||||
@@ -35,10 +35,10 @@ namespace SharpCompress.Test.Rar
|
||||
ResetScratch();
|
||||
using (Stream stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, testArchive)))
|
||||
using (var archive = RarArchive.Open(stream, new ReaderOptions()
|
||||
{
|
||||
Password = password,
|
||||
LeaveStreamOpen = true
|
||||
}))
|
||||
{
|
||||
Password = password,
|
||||
LeaveStreamOpen = true
|
||||
}))
|
||||
{
|
||||
foreach (var entry in archive.Entries)
|
||||
{
|
||||
@@ -66,10 +66,10 @@ namespace SharpCompress.Test.Rar
|
||||
{
|
||||
ResetScratch();
|
||||
using (var archive = RarArchive.Open(Path.Combine(TEST_ARCHIVES_PATH, archiveName), new ReaderOptions()
|
||||
{
|
||||
Password = password,
|
||||
LeaveStreamOpen = true
|
||||
}))
|
||||
{
|
||||
Password = password,
|
||||
LeaveStreamOpen = true
|
||||
}))
|
||||
{
|
||||
foreach (var entry in archive.Entries.Where(entry => !entry.IsDirectory))
|
||||
{
|
||||
@@ -120,12 +120,12 @@ namespace SharpCompress.Test.Rar
|
||||
public void Rar_Jpg_ArchiveStreamRead()
|
||||
{
|
||||
ResetScratch();
|
||||
using (var stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "RarJpeg.jpg")))
|
||||
using (var stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "Rarjpeg.jpg")))
|
||||
{
|
||||
using (var archive = RarArchive.Open(stream, new ReaderOptions()
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
{
|
||||
foreach (var entry in archive.Entries.Where(entry => !entry.IsDirectory))
|
||||
{
|
||||
@@ -224,7 +224,7 @@ namespace SharpCompress.Test.Rar
|
||||
using (var archive = RarArchive.Open(stream))
|
||||
{
|
||||
Assert.False(archive.IsSolid);
|
||||
Assert.True(archive.Entries.Any(entry => entry.IsDirectory));
|
||||
Assert.Contains(true, archive.Entries.Select(entry => entry.IsDirectory));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -233,10 +233,10 @@ namespace SharpCompress.Test.Rar
|
||||
public void Rar_Jpg_ArchiveFileRead()
|
||||
{
|
||||
ResetScratch();
|
||||
using (var archive = RarArchive.Open(Path.Combine(TEST_ARCHIVES_PATH, "RarJpeg.jpg"), new ReaderOptions()
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
using (var archive = RarArchive.Open(Path.Combine(TEST_ARCHIVES_PATH, "Rarjpeg.jpg"), new ReaderOptions()
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
{
|
||||
foreach (var entry in archive.Entries.Where(entry => !entry.IsDirectory))
|
||||
{
|
||||
|
||||
@@ -12,14 +12,14 @@ namespace SharpCompress.Test.Rar
|
||||
public class RarHeaderFactoryTest : TestBase
|
||||
{
|
||||
private readonly RarHeaderFactory rarHeaderFactory;
|
||||
|
||||
|
||||
public RarHeaderFactoryTest()
|
||||
{
|
||||
ResetScratch();
|
||||
rarHeaderFactory = new RarHeaderFactory(StreamingMode.Seekable, new ReaderOptions()
|
||||
{
|
||||
LeaveStreamOpen = true
|
||||
});
|
||||
{
|
||||
LeaveStreamOpen = true
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ namespace SharpCompress.Test.Rar
|
||||
public void ReadHeaders_RecognizeEncryptedFlag()
|
||||
{
|
||||
|
||||
ReadEncryptedFlag("Rar.Encrypted_filesAndHeader.rar", true);
|
||||
ReadEncryptedFlag("Rar.encrypted_filesAndHeader.rar", true);
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -149,9 +149,9 @@ namespace SharpCompress.Test.Rar
|
||||
ResetScratch();
|
||||
using (Stream stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, testArchive)))
|
||||
using (var reader = RarReader.Open(stream, new ReaderOptions()
|
||||
{
|
||||
Password = password
|
||||
}))
|
||||
{
|
||||
Password = password
|
||||
}))
|
||||
{
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
@@ -209,9 +209,9 @@ namespace SharpCompress.Test.Rar
|
||||
ResetScratch();
|
||||
using (var stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "Audio_program.rar")))
|
||||
using (var reader = RarReader.Open(stream, new ReaderOptions()
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
{
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
@@ -231,11 +231,11 @@ namespace SharpCompress.Test.Rar
|
||||
public void Rar_Jpg_Reader()
|
||||
{
|
||||
ResetScratch();
|
||||
using (var stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "RarJpeg.jpg")))
|
||||
using (var stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "Rarjpeg.jpg")))
|
||||
using (var reader = RarReader.Open(stream, new ReaderOptions()
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
{
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
@@ -262,9 +262,9 @@ namespace SharpCompress.Test.Rar
|
||||
ResetScratch();
|
||||
using (var stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "Rar.solid.rar")))
|
||||
using (var reader = RarReader.Open(stream, new ReaderOptions()
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
{
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
@@ -287,9 +287,9 @@ namespace SharpCompress.Test.Rar
|
||||
ResetScratch();
|
||||
using (var stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "Rar.rar")))
|
||||
using (var reader = RarReader.Open(stream, new ReaderOptions()
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
{
|
||||
LookForHeader = true
|
||||
}))
|
||||
{
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user