Compare commits

...

62 Commits

Author SHA1 Message Date
Adam Hathcock
0cab9bd4b4 Mark for 0.20.0 2018-03-24 07:42:20 +00:00
Adam Hathcock
279d305013 Merge pull request #359 from prettierci-commits/prettierci-master-1521104105
PrettierCI master Sync
2018-03-15 08:56:49 +00:00
PrettierCI
750c1fb069 Sync with Prettier 2018-03-15 08:55:06 +00:00
Adam Hathcock
359a6042cd Merge pull request #352 from adamhathcock/cake-026
Cake 0.26
2018-03-01 15:40:30 +00:00
Adam Hathcock
e27d2ec660 Remove netcoreapp1.x testing 2018-03-01 15:35:55 +00:00
Adam Hathcock
da56bfc01f Merge pull request #354 from frabar666/deflate64-decompress
Support Deflate64 decompression
2018-03-01 09:14:06 +00:00
frabar666
6e2c7d2857 support Deflate64 decompression 2018-02-27 23:31:11 +01:00
Adam Hathcock
5481609554 Build with new cake 2018-02-27 08:52:55 +00:00
Frederik Carlier
a62f4df0b1 Implement entry.ToString(), let it return entry.Key (#351) 2018-02-16 13:43:23 +00:00
Adam Hathcock
f893c1272c Merge pull request #337 from 4ybaka/issue-323-tar-archive-finalization
Added ability to leave tar archive open after stream is closed
2018-01-14 19:52:08 +00:00
Dmitry
e701f5277e Merge branch 'master' into issue-323-tar-archive-finalization 2018-01-13 00:47:04 +01:00
Dmitry Nesterov
f85fd1f6a4 Added ability to leave tar archive open after stream is closed 2018-01-13 00:44:42 +01:00
Dmitry Nesterov
8f7ea420b3 Revert "Added ability to leave tar archive open after stream is closed"
This reverts commit 9092ecf331.
2018-01-13 00:41:35 +01:00
Adam Hathcock
d8c8dabb52 Merge pull request #336 from diontools/ImproveStreamSkipping
Utility.Skip uses seek
2018-01-10 11:23:24 +00:00
Dmitry Nesterov
9092ecf331 Added ability to leave tar archive open after stream is closed 2018-01-04 22:57:32 +01:00
diontools
2fd9fe96ad Utility.Skip uses seek 2018-01-03 00:23:34 +09:00
Adam Hathcock
02f68b793c Mark for 0.19.2 2017-12-16 09:08:17 +00:00
Adam Hathcock
57b9133a0f Change namespace and visibility to avoid collisions (#333) 2017-12-16 09:05:21 +00:00
Adam Hathcock
815f5e09e8 Mark for 0.19.1 2017-12-15 14:46:14 +00:00
Adam Hathcock
5bdf01ee59 Absorb arraypool from CoreFX (#331) 2017-12-15 14:45:02 +00:00
Adam Hathcock
bd9417e74c Mark for 0.19 2017-12-12 11:17:57 +00:00
Adam Hathcock
694e869162 Use arraypool for transfer/skip (#326)
* Use arraypool for transfer/skip

* Merge fixes

* Remove redundant constant
2017-12-08 13:58:38 +00:00
Adam Hathcock
45845f8963 Add Circle CI build 2017-12-08 12:03:28 +00:00
Adam Hathcock
a8b6def76a Netcore2 (#302)
* Add netstandard 2.0 target and netcoreapp2.0 tests

* Update xunit

* set tests explicitly to netcore2

* update travis

* Don't say build as netcoreapp1.0

* try adding dotnet 1 too

* Remove .NET Core 1 support

* switch to circle

* update cake

* fix circle build

* try fix file ending test again

* Fix casing on files

* Another casing fix

* Add back netstandard1.0

* Finish adding netstandard 1.0 back

* Add netstandard1.3 back
2017-12-08 12:00:29 +00:00
Sors
a4ebd5fb3d Rar 5 format (#310)
Fix rar 5 format comment
2017-12-04 18:59:49 +00:00
Adam Hathcock
3da3b212fa create new memorystream to allow proper resizing as memorystream could be a user provided buffer. Update xunit (#307) 2017-12-04 18:48:38 +00:00
Martijn Kant
c2528cf93e Mk/add support for extracting password protected LZMA(2) 7z archives (#324)
* Added possibility to decompress a password protected 7z LZMA archive

* Fix tests
2017-12-04 10:55:30 +00:00
coderb
550fecd4d3 bugfix: eliminate spurious rar crc exception when Read() is called with count = 0 (#313) 2017-10-23 11:58:02 +01:00
Adam Hathcock
50b01428b4 Mark for 0.18.2 2017-09-22 09:16:42 +01:00
Thritton
bb59f28b22 Update ArchiveReader.cs (#303)
#227
Added check if argument is in range in method TranslateTime(long? time)
2017-09-19 15:25:10 +01:00
François
7064cda6de Zlib: fix Adler32 implementation (#301) 2017-09-17 22:21:09 +01:00
Adam Hathcock
525c1873e8 Fix merge 2017-09-17 22:16:57 +01:00
François
3d91b4eb5e XZ: fix padding issues (#300)
* XZ: fix variable-length integers decoding

* XZ: fix block and index padding issues

* cleanup in XZStreamTests
2017-09-17 22:14:23 +01:00
François
f20c03180e XZ: fix variable-length integers decoding (#299) 2017-09-17 22:05:20 +01:00
Vladimir Kozlov
08fee76b4e Fixes Double Dispose() of ZipWritingStream #294 https://github.com/adamhathcock/sharpcompress/issues/294 (#295) 2017-09-08 13:25:53 +01:00
Adam Hathcock
0f511c4b2a Mark for 0.18.1 2017-08-17 11:43:34 +01:00
twirpx
42d9dfd117 Fixed bug: Passing default ReaderOptions when creating ZipReader for solid extraction (#287) 2017-08-16 08:19:23 +01:00
Adam Hathcock
3983db08ff Use nameof 2017-07-27 11:05:33 -05:00
Adam Hathcock
72114bceea Add release link 2017-07-17 10:22:58 -05:00
Adam Hathcock
c303f96682 mark for 0.18 2017-07-17 10:11:27 -05:00
Adam Hathcock
0e785968c4 Rework usage of WriterOptions for writers since it was inconsistently used. (#271) 2017-07-17 11:05:42 -04:00
Adam Hathcock
15110e18e2 Don't skip ZipReader data twice. (#272)
* Don't skip ZipReader data twice.

* Add archive for a new test
2017-07-17 11:05:21 -04:00
Adam Hathcock
5465af041b Use Skip and ReadFully extension methods where possible. (#276) 2017-07-17 10:55:22 -04:00
Adam Hathcock
310d56fc16 Made ArchiveEncoding a non-static class that is used with options. (#274)
* Made ArchiveEncoding a non-static class that is used with options.

* Revert some formatting.

* Optional string decoder delegate (#278)
2017-07-17 10:53:20 -04:00
eklann
231258ef69 Force encoding (#266)
* Fixing build

* Fixing build

* Fixing build

* Fixed build (seems working now)

* Added support to force specific encoding when reading or writing an archive

* Minor fixed related to force encoding

* Removed obsolete project file not present in master
2017-07-05 10:15:49 -05:00
Sam Bott
16b7e3ffc8 Add XZ tests (#258)
* tests added and converted to xunit

* reordered two assertions
2017-06-11 13:44:00 +01:00
Adam Hathcock
513e59f830 Mark for 0.17.1 2017-06-09 08:28:35 +01:00
Adam Hathcock
b10a1cf2bd Bug on Windows on .NET Core fix (#257)
* Bug on Windows on .NET Core fix: https://github.com/dotnet/corefx/issues/20676

* Add comment
2017-06-09 08:22:47 +01:00
Adam Hathcock
1656edaa29 Add some more details to nuget package 2017-06-01 12:36:01 +01:00
Adam Hathcock
cff49aacba Added explicit tar skip check. Caught skip issue. 2017-06-01 11:25:32 +01:00
Adam Hathcock
19c32aff6c README fixes 2017-06-01 10:56:11 +01:00
Adam Hathcock
db3ec8337f Mark for 0.17 2017-06-01 10:54:50 +01:00
Adam Hathcock
e7bfc40461 Fix Skipping when compressed size is unknown (fallback to decompressing) 2017-06-01 09:26:08 +01:00
Adam Hathcock
3d3ca254ba Zip64 introduced seekable behavior into ZipWriter. The position may … (#252)
* Zip64 introduced seekable behavior into ZipWriter.  The position may not be zero.

* Remove some dead code

* Update formats for zip64

* Make version created by and version needed to extract the same

* Running tests is faster than skipping
2017-05-31 16:55:49 +01:00
Adam Hathcock
b45bc859a4 XZ Format (#247)
* Started integrated XZ format from https://github.com/sambott/XZ.NET

* Add readme line as it was copy/pasted

* Tar used with XZ

* update formats
2017-05-31 16:55:26 +01:00
Adam Hathcock
912d7a8775 Lzip (#245)
* First pass.  Writing isn't implemented on stream.  Tests are busted.

* LZipReader works...no file name :(

* LZipWriter works

* Writing tests are actually correct now.  LZipStream correctly writes trailer now.  lzip command line tool likes it.

* Add recommendation blurb

* Update notes for formats

* LZip isn't an archive format

* Attempting to fix and implement crc32

* LZip writing test passes

* Had to invert crc to check uncompressed data.
2017-05-31 16:51:24 +01:00
Adam Hathcock
16885da1b5 Mark for 0.16.2 2017-05-31 14:47:51 +01:00
Adam Hathcock
26714052eb Merge pull request #249 from adamhathcock/zip_entry_compression_fix
Per entry compression was being written out incorrectly on the centra…
2017-05-31 12:55:37 +01:00
Adam Hathcock
3df763a783 Merge branch 'master' into zip_entry_compression_fix 2017-05-31 11:15:30 +01:00
Adam Hathcock
3f24a744c0 Merge branch 'master' into zip_entry_compression_fix 2017-05-30 16:10:41 +01:00
Adam Hathcock
9270d7cabf Add cache for dotnet packages 2017-05-30 16:04:55 +01:00
Adam Hathcock
69fc74e376 Per entry compression was being written out incorrectly on the central directory. Fix for that. 2017-05-30 15:37:41 +01:00
144 changed files with 5485 additions and 914 deletions

15
.circleci/config.yml Normal file
View File

@@ -0,0 +1,15 @@
version: 2
jobs:
build:
docker:
- image: microsoft/dotnet:2.0.5-sdk-2.1.4
steps:
- checkout
- run:
name: Install unzip
command: |
apt-get update
apt-get install -y unzip
- run:
name: Build
command: ./build.sh

2
.gitattributes vendored
View File

@@ -2,4 +2,4 @@
* text=auto
# need original files to be windows
test/TestArchives/Original/*.txt eol=crlf
*.txt text eol=crlf

1
.gitignore vendored
View File

@@ -14,3 +14,4 @@ tests/TestArchives/Scratch
.vs
tools
.vscode
.idea/

View File

@@ -1,10 +0,0 @@
dist: trusty
language: csharp
solution: SharpCompress.sln
matrix:
include:
- dotnet: 1.0.4
mono: none
env: DOTNETCORE=1
script:
- ./build.sh

View File

@@ -1,36 +1,60 @@
# Archive Formats
# Formats
## Accessing Archives
Archive classes allow random access to a seekable stream.
Reader classes allow forward-only reading
Writer classes allow forward-only Writing
* Archive classes allow random access to a seekable stream.
* Reader classes allow forward-only reading on a stream.
* Writer classes allow forward-only Writing on a stream.
## Supported Format Table
| Archive Format | Compression Format(s) | Compress/Decompress | Archive API | Reader API | Writer API |
| --- | --- | --- | --- | --- | --- |
| Rar | Rar | Decompress (1) | RarArchive | RarReader | N/A |
| Zip (2) | None, DEFLATE, BZip2, LZMA/LZMA2, PPMd | Both | ZipArchive | ZipReader | ZipWriter |
| Tar | None, BZip2, GZip, LZip | Both | TarArchive | TarReader | TarWriter (3) |
| GZip (single file) | GZip | Both | GZipArchive | GZipReader | GZipWriter |
| 7Zip (4) | LZMA, LZMA2, BZip2, PPMd, BCJ, BCJ2, Deflate | Decompress | SevenZipArchive | N/A | N/A |
| Archive Format | Compression Format(s) | Compress/Decompress | Archive API | Reader API | Writer API |
| ---------------------- | ------------------------------------------------- | ------------------- | --------------- | ---------- | ------------- |
| Rar | Rar | Decompress (1) | RarArchive | RarReader | N/A |
| Zip (2) | None, DEFLATE, Deflate64, BZip2, LZMA/LZMA2, PPMd | Both | ZipArchive | ZipReader | ZipWriter |
| Tar | None | Both | TarArchive | TarReader | TarWriter (3) |
| Tar.GZip | DEFLATE | Both | TarArchive | TarReader | TarWriter (3) |
| Tar.BZip2 | BZip2 | Both | TarArchive | TarReader | TarWriter (3) |
| Tar.LZip | LZMA | Both | TarArchive | TarReader | TarWriter (3) |
| Tar.XZ | LZMA2 | Decompress | TarArchive | TarReader | TarWriter (3) |
| GZip (single file) | DEFLATE | Both | GZipArchive | GZipReader | GZipWriter |
| 7Zip (4) | LZMA, LZMA2, BZip2, PPMd, BCJ, BCJ2, Deflate | Decompress | SevenZipArchive | N/A | N/A |
| LZip (single file) (5) | LZip (LZMA) | Both | LZipArchive | LZipReader | LZipWriter |
1. SOLID Rars are only supported in the RarReader API.
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading is supported.
3. The Tar format requires a file size in the header. If no size is specified to the TarWriter and the stream is not seekable, then an exception will be thrown.
4. The 7Zip format doesn't allow for reading as a forward-only stream so 7Zip is only supported through the Archive API
1. SOLID Rars are only supported in the RarReader API.
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading/writing is supported but only with seekable streams as the Zip spec doesn't support Zip64 data in post data descriptors. Deflate64 is only supported for reading.
3. The Tar format requires a file size in the header. If no size is specified to the TarWriter and the stream is not seekable, then an exception will be thrown.
4. The 7Zip format doesn't allow for reading as a forward-only stream so 7Zip is only supported through the Archive API
5. LZip has no support for extra data like the file name or timestamp. There is a default filename used when looking at the entry Key on the archive.
## Compressors
## Compression Streams
For those who want to directly compress/decompress bits
For those who want to directly compress/decompress bits. The single file formats are represented here as well. However, BZip2, LZip and XZ have no metadata (GZip has a little) so using them without something like a Tar file makes little sense.
| Compressor | Compress/Decompress |
| --- | --- |
| BZip2Stream | Both |
| GZipStream | Both |
| DeflateStream | Both |
| LZMAStream | Both |
| PPMdStream | Both |
| ADCStream | Decompress |
| LZipStream | Decompress |
| Compressor | Compress/Decompress |
| --------------- | ------------------- |
| BZip2Stream | Both |
| GZipStream | Both |
| DeflateStream | Both |
| Deflate64Stream | Decompress |
| LZMAStream | Both |
| PPMdStream | Both |
| ADCStream | Decompress |
| LZipStream | Both |
| XZStream | Decompress |
## Archive Formats vs Compression
Sometimes the terminology gets mixed.
### Compression
DEFLATE, LZMA are pure compression algorithms
### Formats
Formats like Zip, 7Zip, Rar are archive formats only. They use other compression methods (e.g. DEFLATE, LZMA, etc.) or propriatory (e.g RAR)
### Overlap
GZip, BZip2 and LZip are single file archival formats. The overlap in the API happens because Tar uses the single file formats as "compression" methods and the API tries to hide this a bit.

View File

@@ -2,24 +2,35 @@
SharpCompress is a compression library in pure C# for .NET 3.5, 4.5, .NET Standard 1.0, 1.3 that can unrar, un7zip, unzip, untar unbzip2 and ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip are implemented.
The major feature is support for non-seekable streams so large files can be processed on the fly (i.e. download stream).
The major feature is support for non-seekable streams so large files can be processed on the fly (i.e. download stream).
AppVeyor Build -
AppVeyor Build -
[![Build status](https://ci.appveyor.com/api/projects/status/voxg971oemmvxh1e/branch/master?svg=true)](https://ci.appveyor.com/project/adamhathcock/sharpcompress/branch/master)
Travis CI Build -
[![Build Status](https://travis-ci.org/adamhathcock/sharpcompress.svg?branch=master)](https://travis-ci.org/adamhathcock/sharpcompress)
Circle CI Build -
[![CircleCI](https://circleci.com/gh/adamhathcock/sharpcompress.svg?style=svg)](https://circleci.com/gh/adamhathcock/sharpcompress)
## Need Help?
Post Issues on Github!
Check the [Supported Formats](FORMATS.md) and [Basic Usage.](USAGE.md)
## Recommended Formats
In general, I recommend GZip (Deflate)/BZip2 (BZip)/LZip (LZMA) as the simplicity of the formats lend to better long term archival as well as the streamability. Tar is often used in conjunction for multiple files in a single archive (e.g. `.tar.gz`)
Zip is okay, but it's a very hap-hazard format and the variation in headers and implementations makes it hard to get correct. Uses Deflate by default but supports a lot of compression methods.
RAR is not recommended as it's a propriatory format and the compression is closed source. Use Tar/LZip for LZMA
7Zip and XZ both are overly complicated. 7Zip does not support streamable formats. XZ has known holes explained here: (http://www.nongnu.org/lzip/xz_inadequate.html) Use Tar/LZip for LZMA compression instead.
## A Simple Request
Hi everyone. I hope you're using SharpCompress and finding it useful. Please give me feedback on what you'd like to see changed especially as far as usability goes. New feature suggestions are always welcome as well. I would also like to know what projects SharpCompress is being used in. I like seeing how it is used to give me ideas for future versions. Thanks!
Please do not email me directly to ask for help. If you think there is a real issue, please report it here.
Please do not email me directly to ask for help. If you think there is a real issue, please report it here.
## Want to contribute?
@@ -34,6 +45,26 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
## Version Log
### Version 0.18
* [Now on Github releases](https://github.com/adamhathcock/sharpcompress/releases/tag/0.18)
### Version 0.17.1
* Fix - [Bug Fix for .NET Core on Windows](https://github.com/adamhathcock/sharpcompress/pull/257)
### Version 0.17.0
* New - Full LZip support! Can read and write LZip files and Tars inside LZip files. [Make LZip a first class citizen. #241](https://github.com/adamhathcock/sharpcompress/issues/241)
* New - XZ read support! Can read XZ files and Tars inside XZ files. [XZ in SharpCompress #91](https://github.com/adamhathcock/sharpcompress/issues/94)
* Fix - [Regression - zip file writing on seekable streams always assumed stream start was 0. Introduced with Zip64 writing.](https://github.com/adamhathcock/sharpcompress/issues/244)
* Fix - [Zip files with post-data descriptors can be properly skipped via decompression](https://github.com/adamhathcock/sharpcompress/issues/162)
### Version 0.16.2
* Fix [.NET 3.5 should support files and cryptography (was a regression from 0.16.0)](https://github.com/adamhathcock/sharpcompress/pull/251)
* Fix [Zip per entry compression customization wrote the wrong method into the zip archive](https://github.com/adamhathcock/sharpcompress/pull/249)
### Version 0.16.1
* Fix [Preserve compression method when getting a compressed stream](https://github.com/adamhathcock/sharpcompress/pull/235)
@@ -106,7 +137,7 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
### Version 0.11.6
* Bug fix for global header in Tar
* Writers now have a leaveOpen `bool` overload. They won't close streams if not-requested to.
* Writers now have a leaveOpen `bool` overload. They won't close streams if not-requested to.
### Version 0.11.5
@@ -125,7 +156,7 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
### Version 0.11
* Been over a year, contains mainly fixes from contributors!
* Been over a year, contains mainly fixes from contributors!
* Possible breaking change: ArchiveEncoding is UTF8 by default now.
* TAR supports writing long names using longlink
* RAR Protect Header added
@@ -152,6 +183,8 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
* Embedded some BouncyCastle crypto classes to allow RAR Decryption and Winzip AES Decryption in Portable and Windows Store DLLs
* Built in Release (I think)
XZ implementation based on: https://github.com/sambott/XZ.NET by @sambott
7Zip implementation based on: https://code.google.com/p/managed-lzma/
LICENSE

View File

@@ -45,10 +45,14 @@
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/INDENT_ANONYMOUS_METHOD_BLOCK/@EntryValue">True</s:Boolean>
<s:Int64 x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/KEEP_BLANK_LINES_IN_CODE/@EntryValue">1</s:Int64>
<s:Int64 x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/KEEP_BLANK_LINES_IN_DECLARATIONS/@EntryValue">1</s:Int64>
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_ACCESSOR_ATTRIBUTE_ON_SAME_LINE_EX/@EntryValue">NEVER</s:String>
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_ACCESSORHOLDER_ATTRIBUTE_ON_SAME_LINE_EX/@EntryValue">NEVER</s:String>
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_CONSTRUCTOR_INITIALIZER_ON_SAME_LINE/@EntryValue">False</s:Boolean>
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_FIELD_ATTRIBUTE_ON_SAME_LINE/@EntryValue">False</s:Boolean>
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_FIELD_ATTRIBUTE_ON_SAME_LINE_EX/@EntryValue">NEVER</s:String>
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_ACCESSORHOLDER_ON_SINGLE_LINE/@EntryValue">True</s:Boolean>
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_ACCESSOR_ATTRIBUTE_ON_SAME_LINE/@EntryValue">False</s:Boolean>
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_EMBEDDED_STATEMENT_ON_SAME_LINE/@EntryValue">NEVER</s:String>
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_SIMPLE_INITIALIZER_ON_SINGLE_LINE/@EntryValue">True</s:Boolean>
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CSharpFormat/PLACE_WHILE_ON_NEW_LINE/@EntryValue">True</s:Boolean>
@@ -114,6 +118,11 @@
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=NAMESPACE_005FALIAS/@EntryIndexedValue">&lt;Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=XAML_005FFIELD/@EntryIndexedValue">&lt;Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/XamlNaming/UserRules/=XAML_005FRESOURCE/@EntryIndexedValue">&lt;Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /&gt;</s:String>
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpAttributeForSingleLineMethodUpgrade/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpKeepExistingMigration/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpPlaceEmbeddedOnSameLineMigration/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ECSharpRenamePlacementToArrangementMigration/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EAddAccessorOwnerDeclarationBracesMigration/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002ECSharpPlaceAttributeOnSameLineMigration/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EMigrateBlankLinesAroundFieldToBlankLinesAroundProperty/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/Environment/SettingsMigration/IsMigratorApplied/=JetBrains_002EReSharper_002EPsi_002ECSharp_002ECodeStyle_002ESettingsUpgrade_002EMigrateThisQualifierSettings/@EntryIndexedValue">True</s:Boolean></wpf:ResourceDictionary>

View File

@@ -1,7 +1,8 @@
# SharpCompress Usage
## Stream Rules
When dealing with Streams, the rule should be that you don't close a stream you didn't create. This, in effect, should mean you should always put a Stream in a using block to dispose it.
When dealing with Streams, the rule should be that you don't close a stream you didn't create. This, in effect, should mean you should always put a Stream in a using block to dispose it.
However, the .NET Framework often has classes that will dispose streams by default to make things "easy" like the following:
@@ -12,7 +13,7 @@ using (var reader = new StreamReader(File.Open("foo")))
}
```
In this example, reader should get disposed. However, stream rules should say the the `FileStream` created by `File.Open` should remain open. However, the .NET Framework closes it for you by default unless you override the constructor. In general, you should be writing Stream code like this:
In this example, reader should get disposed. However, stream rules should say the the `FileStream` created by `File.Open` should remain open. However, the .NET Framework closes it for you by default unless you override the constructor. In general, you should be writing Stream code like this:
```C#
using (var fileStream = File.Open("foo"))
@@ -25,7 +26,7 @@ using (var reader = new StreamReader(fileStream))
To deal with the "correct" rules as well as the expectations of users, I've decided on this:
* When writing, leave streams open.
* When reading, close streams
* When reading, close streams
To be explicit though, consider always using the overloads that use `ReaderOptions` or `WriterOptions` and explicitly set `LeaveStreamOpen` the way you want.
@@ -43,11 +44,9 @@ using (var archive = ZipArchive.Create())
}
```
### Create Zip Archive from all files in a directory and save in memory
```C#
var memoryStream = new MemoryStream();
using (var archive = ZipArchive.Create())
{

View File

@@ -25,13 +25,17 @@ Task("Build")
var settings = new DotNetCoreBuildSettings
{
Framework = "netstandard1.0",
Configuration = "Release"
Configuration = "Release",
NoRestore = true
};
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
settings.Framework = "netcoreapp1.1";
DotNetCoreBuild("./tests/SharpCompress.Test/SharpCompress.Test.csproj", settings);
settings.Framework = "netstandard1.3";
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
settings.Framework = "netstandard2.0";
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
}
});
@@ -39,23 +43,15 @@ Task("Test")
.IsDependentOn("Build")
.Does(() =>
{
if (!bool.Parse(EnvironmentVariable("APPVEYOR") ?? "false")
&& !bool.Parse(EnvironmentVariable("TRAVIS") ?? "false"))
var files = GetFiles("tests/**/*.csproj");
foreach(var file in files)
{
var files = GetFiles("tests/**/*.csproj");
foreach(var file in files)
var settings = new DotNetCoreTestSettings
{
var settings = new DotNetCoreTestSettings
{
Configuration = "Release"
};
DotNetCoreTest(file.ToString(), settings);
}
}
else
{
Information("Skipping tests as this is AppVeyor or Travis CI");
Configuration = "Release",
Framework = "netcoreapp2.0"
};
DotNetCoreTest(file.ToString(), settings);
}
});

View File

@@ -8,7 +8,7 @@
# Define directories.
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
TOOLS_DIR=$SCRIPT_DIR/tools
CAKE_VERSION=0.19.1
CAKE_VERSION=0.26.0
CAKE_DLL=$TOOLS_DIR/Cake.CoreCLR.$CAKE_VERSION/Cake.dll
# Make sure the tools folder exist.

View File

@@ -6,6 +6,7 @@ using SharpCompress.Archives.SevenZip;
using SharpCompress.Archives.Tar;
using SharpCompress.Archives.Zip;
using SharpCompress.Common;
using SharpCompress.Compressors.LZMA;
using SharpCompress.Readers;
namespace SharpCompress.Archives
@@ -55,7 +56,7 @@ namespace SharpCompress.Archives
stream.Seek(0, SeekOrigin.Begin);
return TarArchive.Open(stream, readerOptions);
}
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip");
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, LZip");
}
public static IWritableArchive Create(ArchiveType type)

View File

@@ -14,6 +14,7 @@ namespace SharpCompress.Archives.GZip
public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
{
#if !NO_FILE
/// <summary>
/// Constructor expects a filepath to an existing file.
/// </summary>
@@ -36,6 +37,7 @@ namespace SharpCompress.Archives.GZip
return new GZipArchive(fileInfo, readerOptions ?? new ReaderOptions());
}
#endif
/// <summary>
/// Takes a seekable Stream as a source
/// </summary>
@@ -54,11 +56,11 @@ namespace SharpCompress.Archives.GZip
#if !NO_FILE
/// <summary>
/// Constructor with a FileInfo object to an existing file.
/// </summary>
/// <param name="fileInfo"></param>
/// <param name="options"></param>
/// <summary>
/// Constructor with a FileInfo object to an existing file.
/// </summary>
/// <param name="fileInfo"></param>
/// <param name="options"></param>
internal GZipArchive(FileInfo fileInfo, ReaderOptions options)
: base(ArchiveType.GZip, fileInfo, options)
{
@@ -104,15 +106,9 @@ namespace SharpCompress.Archives.GZip
{
// read the header on the first read
byte[] header = new byte[10];
int n = stream.Read(header, 0, header.Length);
// workitem 8501: handle edge case (decompress empty stream)
if (n == 0)
{
return false;
}
if (n != 10)
if (!stream.ReadFully(header))
{
return false;
}
@@ -158,7 +154,7 @@ namespace SharpCompress.Archives.GZip
{
throw new InvalidOperationException("Only one entry is allowed in a GZip Archive");
}
using (var writer = new GZipWriter(stream))
using (var writer = new GZipWriter(stream, new GZipWriterOptions(options)))
{
foreach (var entry in oldEntries.Concat(newEntries)
.Where(x => !x.IsDirectory))
@@ -179,7 +175,7 @@ namespace SharpCompress.Archives.GZip
protected override IEnumerable<GZipArchiveEntry> LoadEntries(IEnumerable<GZipVolume> volumes)
{
Stream stream = volumes.Single().Stream;
yield return new GZipArchiveEntry(this, new GZipFilePart(stream));
yield return new GZipArchiveEntry(this, new GZipFilePart(stream, ReaderOptions.ArchiveEncoding));
}
protected override IReader CreateReaderForSolidExtraction()

View File

@@ -4,6 +4,7 @@ using System.IO;
using System.Linq;
using SharpCompress.Common;
using SharpCompress.Common.SevenZip;
using SharpCompress.Compressors.LZMA.Utilites;
using SharpCompress.IO;
using SharpCompress.Readers;
@@ -106,7 +107,7 @@ namespace SharpCompress.Archives.SevenZip
for (int i = 0; i < database.Files.Count; i++)
{
var file = database.Files[i];
yield return new SevenZipArchiveEntry(this, new SevenZipFilePart(stream, database, i, file));
yield return new SevenZipArchiveEntry(this, new SevenZipFilePart(stream, database, i, file, ReaderOptions.ArchiveEncoding));
}
}
@@ -117,7 +118,7 @@ namespace SharpCompress.Archives.SevenZip
stream.Position = 0;
var reader = new ArchiveReader();
reader.Open(stream);
database = reader.ReadDatabase(null);
database = reader.ReadDatabase(new PasswordProvider(ReaderOptions.Password));
}
}
@@ -144,7 +145,7 @@ namespace SharpCompress.Archives.SevenZip
protected override IReader CreateReaderForSolidExtraction()
{
return new SevenZipReader(this);
return new SevenZipReader(ReaderOptions, this);
}
public override bool IsSolid { get { return Entries.Where(x => !x.IsDirectory).GroupBy(x => x.FilePart.Folder).Count() > 1; } }
@@ -165,8 +166,8 @@ namespace SharpCompress.Archives.SevenZip
private Stream currentStream;
private CFileItem currentItem;
internal SevenZipReader(SevenZipArchive archive)
: base(new ReaderOptions(), ArchiveType.SevenZip)
internal SevenZipReader(ReaderOptions readerOptions, SevenZipArchive archive)
: base(readerOptions, ArchiveType.SevenZip)
{
this.archive = archive;
}
@@ -190,7 +191,7 @@ namespace SharpCompress.Archives.SevenZip
}
else
{
currentStream = archive.database.GetFolderStream(stream, currentFolder, null);
currentStream = archive.database.GetFolderStream(stream, currentFolder, new PasswordProvider(Options.Password));
}
foreach (var entry in group)
{
@@ -205,5 +206,21 @@ namespace SharpCompress.Archives.SevenZip
return CreateEntryStream(new ReadOnlySubStream(currentStream, currentItem.Size));
}
}
private class PasswordProvider : IPasswordProvider
{
private readonly string _password;
public PasswordProvider(string password)
{
_password = password;
}
public string CryptoGetTextPassword()
{
return _password;
}
}
}
}

View File

@@ -16,7 +16,7 @@ namespace SharpCompress.Archives.Tar
public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
{
#if !NO_FILE
/// <summary>
/// Constructor expects a filepath to an existing file.
/// </summary>
@@ -39,7 +39,7 @@ namespace SharpCompress.Archives.Tar
return new TarArchive(fileInfo, readerOptions ?? new ReaderOptions());
}
#endif
/// <summary>
/// Takes a seekable Stream as a source
/// </summary>
@@ -52,6 +52,7 @@ namespace SharpCompress.Archives.Tar
}
#if !NO_FILE
public static bool IsTarFile(string filePath)
{
return IsTarFile(new FileInfo(filePath));
@@ -74,7 +75,7 @@ namespace SharpCompress.Archives.Tar
{
try
{
TarHeader tar = new TarHeader();
TarHeader tar = new TarHeader(new ArchiveEncoding());
tar.Read(new BinaryReader(stream));
return tar.Name.Length > 0 && Enum.IsDefined(typeof(EntryType), tar.EntryType);
}
@@ -98,7 +99,6 @@ namespace SharpCompress.Archives.Tar
protected override IEnumerable<TarVolume> LoadVolumes(FileInfo file)
{
return new TarVolume(file.OpenRead(), ReaderOptions).AsEnumerable();
}
#endif
@@ -127,7 +127,7 @@ namespace SharpCompress.Archives.Tar
{
Stream stream = volumes.Single().Stream;
TarHeader previousHeader = null;
foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream))
foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream, ReaderOptions.ArchiveEncoding))
{
if (header != null)
{
@@ -152,7 +152,7 @@ namespace SharpCompress.Archives.Tar
memoryStream.Position = 0;
var bytes = memoryStream.ToArray();
header.Name = ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length).TrimNulls();
header.Name = ReaderOptions.ArchiveEncoding.Decode(bytes).TrimNulls();
}
}
@@ -182,7 +182,7 @@ namespace SharpCompress.Archives.Tar
IEnumerable<TarArchiveEntry> oldEntries,
IEnumerable<TarArchiveEntry> newEntries)
{
using (var writer = new TarWriter(stream, options))
using (var writer = new TarWriter(stream, new TarWriterOptions(options)))
{
foreach (var entry in oldEntries.Concat(newEntries)
.Where(x => !x.IsDirectory))

View File

@@ -24,6 +24,7 @@ namespace SharpCompress.Archives.Zip
public CompressionLevel DeflateCompressionLevel { get; set; }
#if !NO_FILE
/// <summary>
/// Constructor expects a filepath to an existing file.
/// </summary>
@@ -46,6 +47,7 @@ namespace SharpCompress.Archives.Zip
return new ZipArchive(fileInfo, readerOptions ?? new ReaderOptions());
}
#endif
/// <summary>
/// Takes a seekable Stream as a source
/// </summary>
@@ -58,6 +60,7 @@ namespace SharpCompress.Archives.Zip
}
#if !NO_FILE
public static bool IsZipFile(string filePath, string password = null)
{
return IsZipFile(new FileInfo(filePath), password);
@@ -78,7 +81,7 @@ namespace SharpCompress.Archives.Zip
public static bool IsZipFile(Stream stream, string password = null)
{
StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password);
StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password, new ArchiveEncoding());
try
{
ZipHeader header =
@@ -109,7 +112,7 @@ namespace SharpCompress.Archives.Zip
internal ZipArchive(FileInfo fileInfo, ReaderOptions readerOptions)
: base(ArchiveType.Zip, fileInfo, readerOptions)
{
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password);
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding);
}
protected override IEnumerable<ZipVolume> LoadVolumes(FileInfo file)
@@ -131,7 +134,7 @@ namespace SharpCompress.Archives.Zip
internal ZipArchive(Stream stream, ReaderOptions readerOptions)
: base(ArchiveType.Zip, stream, readerOptions)
{
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password);
headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding);
}
protected override IEnumerable<ZipVolume> LoadVolumes(IEnumerable<Stream> streams)
@@ -150,19 +153,19 @@ namespace SharpCompress.Archives.Zip
switch (h.ZipHeaderType)
{
case ZipHeaderType.DirectoryEntry:
{
yield return new ZipArchiveEntry(this,
new SeekableZipFilePart(headerFactory,
h as DirectoryEntryHeader,
stream));
}
{
yield return new ZipArchiveEntry(this,
new SeekableZipFilePart(headerFactory,
h as DirectoryEntryHeader,
stream));
}
break;
case ZipHeaderType.DirectoryEnd:
{
byte[] bytes = (h as DirectoryEndHeader).Comment;
volume.Comment = ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length);
yield break;
}
{
byte[] bytes = (h as DirectoryEndHeader).Comment;
volume.Comment = ReaderOptions.ArchiveEncoding.Decode(bytes);
yield break;
}
}
}
}
@@ -205,7 +208,7 @@ namespace SharpCompress.Archives.Zip
{
var stream = Volumes.Single().Stream;
stream.Position = 0;
return ZipReader.Open(stream);
return ZipReader.Open(stream, ReaderOptions);
}
}
}

View File

@@ -0,0 +1,119 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#if NETCORE
using System.Runtime.CompilerServices;
using System.Threading;
namespace SharpCompress.Buffers
{
/// <summary>
/// Provides a resource pool that enables reusing instances of type <see cref="T:T[]"/>.
/// </summary>
/// <remarks>
/// <para>
/// Renting and returning buffers with an <see cref="ArrayPool{T}"/> can increase performance
/// in situations where arrays are created and destroyed frequently, resulting in significant
/// memory pressure on the garbage collector.
/// </para>
/// <para>
/// This class is thread-safe. All members may be used by multiple threads concurrently.
/// </para>
/// </remarks>
internal abstract class ArrayPool<T>
{
/// <summary>The lazily-initialized shared pool instance.</summary>
private static ArrayPool<T> s_sharedInstance = null;
/// <summary>
/// Retrieves a shared <see cref="ArrayPool{T}"/> instance.
/// </summary>
/// <remarks>
/// The shared pool provides a default implementation of <see cref="ArrayPool{T}"/>
/// that's intended for general applicability. It maintains arrays of multiple sizes, and
/// may hand back a larger array than was actually requested, but will never hand back a smaller
/// array than was requested. Renting a buffer from it with <see cref="Rent"/> will result in an
/// existing buffer being taken from the pool if an appropriate buffer is available or in a new
/// buffer being allocated if one is not available.
/// </remarks>
public static ArrayPool<T> Shared
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
get { return Volatile.Read(ref s_sharedInstance) ?? EnsureSharedCreated(); }
}
/// <summary>Ensures that <see cref="s_sharedInstance"/> has been initialized to a pool and returns it.</summary>
[MethodImpl(MethodImplOptions.NoInlining)]
private static ArrayPool<T> EnsureSharedCreated()
{
Interlocked.CompareExchange(ref s_sharedInstance, Create(), null);
return s_sharedInstance;
}
/// <summary>
/// Creates a new <see cref="ArrayPool{T}"/> instance using default configuration options.
/// </summary>
/// <returns>A new <see cref="ArrayPool{T}"/> instance.</returns>
public static ArrayPool<T> Create()
{
return new DefaultArrayPool<T>();
}
/// <summary>
/// Creates a new <see cref="ArrayPool{T}"/> instance using custom configuration options.
/// </summary>
/// <param name="maxArrayLength">The maximum length of array instances that may be stored in the pool.</param>
/// <param name="maxArraysPerBucket">
/// The maximum number of array instances that may be stored in each bucket in the pool. The pool
/// groups arrays of similar lengths into buckets for faster access.
/// </param>
/// <returns>A new <see cref="ArrayPool{T}"/> instance with the specified configuration options.</returns>
/// <remarks>
/// The created pool will group arrays into buckets, with no more than <paramref name="maxArraysPerBucket"/>
/// in each bucket and with those arrays not exceeding <paramref name="maxArrayLength"/> in length.
/// </remarks>
public static ArrayPool<T> Create(int maxArrayLength, int maxArraysPerBucket)
{
return new DefaultArrayPool<T>(maxArrayLength, maxArraysPerBucket);
}
/// <summary>
/// Retrieves a buffer that is at least the requested length.
/// </summary>
/// <param name="minimumLength">The minimum length of the array needed.</param>
/// <returns>
/// An <see cref="T:T[]"/> that is at least <paramref name="minimumLength"/> in length.
/// </returns>
/// <remarks>
/// This buffer is loaned to the caller and should be returned to the same pool via
/// <see cref="Return"/> so that it may be reused in subsequent usage of <see cref="Rent"/>.
/// It is not a fatal error to not return a rented buffer, but failure to do so may lead to
/// decreased application performance, as the pool may need to create a new buffer to replace
/// the one lost.
/// </remarks>
public abstract T[] Rent(int minimumLength);
/// <summary>
/// Returns to the pool an array that was previously obtained via <see cref="Rent"/> on the same
/// <see cref="ArrayPool{T}"/> instance.
/// </summary>
/// <param name="array">
/// The buffer previously obtained from <see cref="Rent"/> to return to the pool.
/// </param>
/// <param name="clearArray">
/// If <c>true</c> and if the pool will store the buffer to enable subsequent reuse, <see cref="Return"/>
/// will clear <paramref name="array"/> of its contents so that a subsequent consumer via <see cref="Rent"/>
/// will not see the previous consumer's content. If <c>false</c> or if the pool will release the buffer,
/// the array's contents are left unchanged.
/// </param>
/// <remarks>
/// Once a buffer has been returned to the pool, the caller gives up all ownership of the buffer
/// and must not use it. The reference returned from a given call to <see cref="Rent"/> must only be
/// returned via <see cref="Return"/> once. The default <see cref="ArrayPool{T}"/>
/// may hold onto the returned buffer in order to rent it again, or it may release the returned buffer
/// if it's determined that the pool already has enough buffers stored.
/// </remarks>
public abstract void Return(T[] array, bool clearArray = false);
}
}
#endif

View File

@@ -0,0 +1,144 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#if NETCORE
using System;
namespace SharpCompress.Buffers
{
internal sealed partial class DefaultArrayPool<T> : ArrayPool<T>
{
/// <summary>The default maximum length of each array in the pool (2^20).</summary>
private const int DefaultMaxArrayLength = 1024 * 1024;
/// <summary>The default maximum number of arrays per bucket that are available for rent.</summary>
private const int DefaultMaxNumberOfArraysPerBucket = 50;
/// <summary>Lazily-allocated empty array used when arrays of length 0 are requested.</summary>
private static T[] s_emptyArray; // we support contracts earlier than those with Array.Empty<T>()
private readonly Bucket[] _buckets;
internal DefaultArrayPool() : this(DefaultMaxArrayLength, DefaultMaxNumberOfArraysPerBucket)
{
}
internal DefaultArrayPool(int maxArrayLength, int maxArraysPerBucket)
{
if (maxArrayLength <= 0)
{
throw new ArgumentOutOfRangeException(nameof(maxArrayLength));
}
if (maxArraysPerBucket <= 0)
{
throw new ArgumentOutOfRangeException(nameof(maxArraysPerBucket));
}
// Our bucketing algorithm has a min length of 2^4 and a max length of 2^30.
// Constrain the actual max used to those values.
const int MinimumArrayLength = 0x10, MaximumArrayLength = 0x40000000;
if (maxArrayLength > MaximumArrayLength)
{
maxArrayLength = MaximumArrayLength;
}
else if (maxArrayLength < MinimumArrayLength)
{
maxArrayLength = MinimumArrayLength;
}
// Create the buckets.
int poolId = Id;
int maxBuckets = Utilities.SelectBucketIndex(maxArrayLength);
var buckets = new Bucket[maxBuckets + 1];
for (int i = 0; i < buckets.Length; i++)
{
buckets[i] = new Bucket(Utilities.GetMaxSizeForBucket(i), maxArraysPerBucket, poolId);
}
_buckets = buckets;
}
/// <summary>Gets an ID for the pool to use with events.</summary>
private int Id => GetHashCode();
public override T[] Rent(int minimumLength)
{
// Arrays can't be smaller than zero. We allow requesting zero-length arrays (even though
// pooling such an array isn't valuable) as it's a valid length array, and we want the pool
// to be usable in general instead of using `new`, even for computed lengths.
if (minimumLength < 0)
{
throw new ArgumentOutOfRangeException(nameof(minimumLength));
}
else if (minimumLength == 0)
{
// No need for events with the empty array. Our pool is effectively infinite
// and we'll never allocate for rents and never store for returns.
return s_emptyArray ?? (s_emptyArray = new T[0]);
}
T[] buffer = null;
int index = Utilities.SelectBucketIndex(minimumLength);
if (index < _buckets.Length)
{
// Search for an array starting at the 'index' bucket. If the bucket is empty, bump up to the
// next higher bucket and try that one, but only try at most a few buckets.
const int MaxBucketsToTry = 2;
int i = index;
do
{
// Attempt to rent from the bucket. If we get a buffer from it, return it.
buffer = _buckets[i].Rent();
if (buffer != null)
{
return buffer;
}
}
while (++i < _buckets.Length && i != index + MaxBucketsToTry);
// The pool was exhausted for this buffer size. Allocate a new buffer with a size corresponding
// to the appropriate bucket.
buffer = new T[_buckets[index]._bufferLength];
}
else
{
// The request was for a size too large for the pool. Allocate an array of exactly the requested length.
// When it's returned to the pool, we'll simply throw it away.
buffer = new T[minimumLength];
}
return buffer;
}
public override void Return(T[] array, bool clearArray = false)
{
if (array == null)
{
throw new ArgumentNullException(nameof(array));
}
else if (array.Length == 0)
{
// Ignore empty arrays. When a zero-length array is rented, we return a singleton
// rather than actually taking a buffer out of the lowest bucket.
return;
}
// Determine with what bucket this array length is associated
int bucket = Utilities.SelectBucketIndex(array.Length);
// If we can tell that the buffer was allocated, drop it. Otherwise, check if we have space in the pool
if (bucket < _buckets.Length)
{
// Clear the array if the user requests
if (clearArray)
{
Array.Clear(array, 0, array.Length);
}
// Return the buffer to its bucket. In the future, we might consider having Return return false
// instead of dropping a bucket, in which case we could try to return to a lower-sized bucket,
// just as how in Rent we allow renting from a higher-sized bucket.
_buckets[bucket].Return(array);
}
}
}
}
#endif

View File

@@ -0,0 +1,111 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#if NETCORE
using System;
using System.Diagnostics;
using System.Threading;
namespace SharpCompress.Buffers
{
internal sealed partial class DefaultArrayPool<T> : ArrayPool<T>
{
/// <summary>Provides a thread-safe bucket containing buffers that can be Rent'd and Return'd.</summary>
private sealed class Bucket
{
internal readonly int _bufferLength;
private readonly T[][] _buffers;
private readonly int _poolId;
private SpinLock _lock; // do not make this readonly; it's a mutable struct
private int _index;
/// <summary>
/// Creates the pool with numberOfBuffers arrays where each buffer is of bufferLength length.
/// </summary>
internal Bucket(int bufferLength, int numberOfBuffers, int poolId)
{
_lock = new SpinLock(Debugger.IsAttached); // only enable thread tracking if debugger is attached; it adds non-trivial overheads to Enter/Exit
_buffers = new T[numberOfBuffers][];
_bufferLength = bufferLength;
_poolId = poolId;
}
/// <summary>Gets an ID for the bucket to use with events.</summary>
internal int Id => GetHashCode();
/// <summary>Takes an array from the bucket. If the bucket is empty, returns null.</summary>
internal T[] Rent()
{
T[][] buffers = _buffers;
T[] buffer = null;
// While holding the lock, grab whatever is at the next available index and
// update the index. We do as little work as possible while holding the spin
// lock to minimize contention with other threads. The try/finally is
// necessary to properly handle thread aborts on platforms which have them.
bool lockTaken = false, allocateBuffer = false;
try
{
_lock.Enter(ref lockTaken);
if (_index < buffers.Length)
{
buffer = buffers[_index];
buffers[_index++] = null;
allocateBuffer = buffer == null;
}
}
finally
{
if (lockTaken) _lock.Exit(false);
}
// While we were holding the lock, we grabbed whatever was at the next available index, if
// there was one. If we tried and if we got back null, that means we hadn't yet allocated
// for that slot, in which case we should do so now.
if (allocateBuffer)
{
buffer = new T[_bufferLength];
}
return buffer;
}
/// <summary>
/// Attempts to return the buffer to the bucket. If successful, the buffer will be stored
/// in the bucket and true will be returned; otherwise, the buffer won't be stored, and false
/// will be returned.
/// </summary>
internal void Return(T[] array)
{
// Check to see if the buffer is the correct size for this bucket
if (array.Length != _bufferLength)
{
throw new ArgumentException("Buffer not from pool", nameof(array));
}
// While holding the spin lock, if there's room available in the bucket,
// put the buffer into the next available slot. Otherwise, we just drop it.
// The try/finally is necessary to properly handle thread aborts on platforms
// which have them.
bool lockTaken = false;
try
{
_lock.Enter(ref lockTaken);
if (_index != 0)
{
_buffers[--_index] = array;
}
}
finally
{
if (lockTaken) _lock.Exit(false);
}
}
}
}
}
#endif

View File

@@ -0,0 +1,38 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#if NETCORE
using System.Diagnostics;
using System.Runtime.CompilerServices;
namespace SharpCompress.Buffers
{
internal static class Utilities
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static int SelectBucketIndex(int bufferSize)
{
Debug.Assert(bufferSize > 0);
uint bitsRemaining = ((uint)bufferSize - 1) >> 4;
int poolIndex = 0;
if (bitsRemaining > 0xFFFF) { bitsRemaining >>= 16; poolIndex = 16; }
if (bitsRemaining > 0xFF) { bitsRemaining >>= 8; poolIndex += 8; }
if (bitsRemaining > 0xF) { bitsRemaining >>= 4; poolIndex += 4; }
if (bitsRemaining > 0x3) { bitsRemaining >>= 2; poolIndex += 2; }
if (bitsRemaining > 0x1) { bitsRemaining >>= 1; poolIndex += 1; }
return poolIndex + (int)bitsRemaining;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static int GetMaxSizeForBucket(int binIndex)
{
int maxSize = 16 << binIndex;
Debug.Assert(maxSize >= 0);
return maxSize;
}
}
}
#endif

View File

@@ -1,23 +1,60 @@
using System.Text;
using System;
using System.Text;
namespace SharpCompress.Common
{
public static class ArchiveEncoding
public class ArchiveEncoding
{
/// <summary>
/// Default encoding to use when archive format doesn't specify one.
/// </summary>
public static Encoding Default { get; set; }
public Encoding Default { get; set; }
/// <summary>
/// Encoding used by encryption schemes which don't comply with RFC 2898.
/// ArchiveEncoding used by encryption schemes which don't comply with RFC 2898.
/// </summary>
public static Encoding Password { get; set; }
public Encoding Password { get; set; }
static ArchiveEncoding()
/// <summary>
/// Set this encoding when you want to force it for all encoding operations.
/// </summary>
public Encoding Forced { get; set; }
/// <summary>
/// Set this when you want to use a custom method for all decoding operations.
/// </summary>
/// <returns>string Func(bytes, index, length)</returns>
public Func<byte[], int, int, string> CustomDecoder { get; set; }
public ArchiveEncoding()
{
Default = Encoding.UTF8;
Password = Encoding.UTF8;
}
public string Decode(byte[] bytes)
{
return Decode(bytes, 0, bytes.Length);
}
public string Decode(byte[] bytes, int start, int length)
{
return GetDecoder().Invoke(bytes, start, length);
}
public byte[] Encode(string str)
{
return GetEncoding().GetBytes(str);
}
public Encoding GetEncoding()
{
return Forced ?? Default ?? Encoding.UTF8;
}
public Func<byte[], int, int, string> GetDecoder()
{
return CustomDecoder ?? ((bytes, index, count) => (Default ?? Encoding.UTF8).GetString(bytes, index, count));
}
}
}

View File

@@ -12,6 +12,8 @@
BCJ,
BCJ2,
LZip,
Unknown
Xz,
Unknown,
Deflate64
}
}

View File

@@ -65,6 +65,12 @@ namespace SharpCompress.Common
/// </summary>
public abstract bool IsSplit { get; }
/// <inheritdoc/>
public override string ToString()
{
return this.Key;
}
internal abstract IEnumerable<FilePart> Parts { get; }
internal bool IsSolid { get; set; }

View File

@@ -4,9 +4,17 @@ namespace SharpCompress.Common
{
public abstract class FilePart
{
protected FilePart(ArchiveEncoding archiveEncoding)
{
ArchiveEncoding = archiveEncoding;
}
internal ArchiveEncoding ArchiveEncoding { get; }
internal abstract string FilePartName { get; }
internal abstract Stream GetCompressedStream();
internal abstract Stream GetRawStream();
internal bool Skipped { get; set; }
}
}

View File

@@ -1,6 +1,7 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
namespace SharpCompress.Common.GZip
{
@@ -39,9 +40,9 @@ namespace SharpCompress.Common.GZip
internal override IEnumerable<FilePart> Parts => filePart.AsEnumerable<FilePart>();
internal static IEnumerable<GZipEntry> GetEntries(Stream stream)
internal static IEnumerable<GZipEntry> GetEntries(Stream stream, OptionsBase options)
{
yield return new GZipEntry(new GZipFilePart(stream));
yield return new GZipEntry(new GZipFilePart(stream, options.ArchiveEncoding));
}
}
}

View File

@@ -5,35 +5,37 @@ using SharpCompress.Common.Tar.Headers;
using SharpCompress.Compressors;
using SharpCompress.Compressors.Deflate;
using SharpCompress.Converters;
using System.Text;
namespace SharpCompress.Common.GZip
{
internal class GZipFilePart : FilePart
{
private string name;
private readonly Stream stream;
private string _name;
private readonly Stream _stream;
internal GZipFilePart(Stream stream)
internal GZipFilePart(Stream stream, ArchiveEncoding archiveEncoding)
: base(archiveEncoding)
{
ReadAndValidateGzipHeader(stream);
EntryStartPosition = stream.Position;
this.stream = stream;
this._stream = stream;
}
internal long EntryStartPosition { get; }
internal DateTime? DateModified { get; private set; }
internal override string FilePartName => name;
internal override string FilePartName => _name;
internal override Stream GetCompressedStream()
{
return new DeflateStream(stream, CompressionMode.Decompress, CompressionLevel.Default, false);
return new DeflateStream(_stream, CompressionMode.Decompress, CompressionLevel.Default, false);
}
internal override Stream GetRawStream()
{
return stream;
return _stream;
}
private void ReadAndValidateGzipHeader(Stream stream)
@@ -67,15 +69,16 @@ namespace SharpCompress.Common.GZip
Int16 extraLength = (Int16)(header[0] + header[1] * 256);
byte[] extra = new byte[extraLength];
n = stream.Read(extra, 0, extra.Length);
if (n != extraLength)
if (!stream.ReadFully(extra))
{
throw new ZlibException("Unexpected end-of-file reading GZIP header.");
}
n = extraLength;
}
if ((header[3] & 0x08) == 0x08)
{
name = ReadZeroTerminatedString(stream);
_name = ReadZeroTerminatedString(stream);
}
if ((header[3] & 0x10) == 0x010)
{
@@ -87,7 +90,7 @@ namespace SharpCompress.Common.GZip
}
}
private static string ReadZeroTerminatedString(Stream stream)
private string ReadZeroTerminatedString(Stream stream)
{
byte[] buf1 = new byte[1];
var list = new List<byte>();
@@ -110,8 +113,8 @@ namespace SharpCompress.Common.GZip
}
}
while (!done);
byte[] a = list.ToArray();
return ArchiveEncoding.Default.GetString(a, 0, a.Length);
byte[] buffer = list.ToArray();
return ArchiveEncoding.Decode(buffer);
}
}
}

View File

@@ -1,4 +1,5 @@
namespace SharpCompress.Common
namespace SharpCompress.Common
{
public class OptionsBase
{
@@ -6,5 +7,7 @@
/// SharpCompress will keep the supplied streams open. Default is true.
/// </summary>
public bool LeaveStreamOpen { get; set; } = true;
public ArchiveEncoding ArchiveEncoding { get; set; } = new ArchiveEncoding();
}
}

View File

@@ -1,6 +1,6 @@
using SharpCompress.IO;
using System;
using System.IO;
using SharpCompress.IO;
namespace SharpCompress.Common.Rar.Headers
{
@@ -52,50 +52,50 @@ namespace SharpCompress.Common.Rar.Headers
switch (HeaderType)
{
case HeaderType.FileHeader:
{
if (FileFlags.HasFlag(FileFlags.UNICODE))
{
int length = 0;
while (length < fileNameBytes.Length
&& fileNameBytes[length] != 0)
if (FileFlags.HasFlag(FileFlags.UNICODE))
{
length++;
}
if (length != nameSize)
{
length++;
FileName = FileNameDecoder.Decode(fileNameBytes, length);
int length = 0;
while (length < fileNameBytes.Length
&& fileNameBytes[length] != 0)
{
length++;
}
if (length != nameSize)
{
length++;
FileName = FileNameDecoder.Decode(fileNameBytes, length);
}
else
{
FileName = ArchiveEncoding.Decode(fileNameBytes);
}
}
else
{
FileName = DecodeDefault(fileNameBytes);
FileName = ArchiveEncoding.Decode(fileNameBytes);
}
FileName = ConvertPath(FileName, HostOS);
}
else
{
FileName = DecodeDefault(fileNameBytes);
}
FileName = ConvertPath(FileName, HostOS);
}
break;
case HeaderType.NewSubHeader:
{
int datasize = HeaderSize - NEWLHD_SIZE - nameSize;
if (FileFlags.HasFlag(FileFlags.SALT))
{
datasize -= SALT_SIZE;
}
if (datasize > 0)
{
SubData = reader.ReadBytes(datasize);
}
int datasize = HeaderSize - NEWLHD_SIZE - nameSize;
if (FileFlags.HasFlag(FileFlags.SALT))
{
datasize -= SALT_SIZE;
}
if (datasize > 0)
{
SubData = reader.ReadBytes(datasize);
}
if (NewSubHeaderType.SUBHEAD_TYPE_RR.Equals(fileNameBytes))
{
RecoverySectors = SubData[8] + (SubData[9] << 8)
+ (SubData[10] << 16) + (SubData[11] << 24);
if (NewSubHeaderType.SUBHEAD_TYPE_RR.Equals(fileNameBytes))
{
RecoverySectors = SubData[8] + (SubData[9] << 8)
+ (SubData[10] << 16) + (SubData[11] << 24);
}
}
}
break;
}
@@ -118,12 +118,6 @@ namespace SharpCompress.Common.Rar.Headers
}
}
//only the full .net framework will do other code pages than unicode/utf8
private string DecodeDefault(byte[] bytes)
{
return ArchiveEncoding.Default.GetString(bytes, 0, bytes.Length);
}
private long UInt32To64(uint x, uint y)
{
long l = x;
@@ -178,6 +172,7 @@ namespace SharpCompress.Common.Rar.Headers
}
internal long DataStartPosition { get; set; }
internal HostOS HostOS { get; private set; }
internal uint FileCRC { get; private set; }
@@ -199,6 +194,7 @@ namespace SharpCompress.Common.Rar.Headers
internal FileFlags FileFlags => (FileFlags)Flags;
internal long CompressedSize { get; private set; }
internal long UncompressedSize { get; private set; }
internal string FileName { get; private set; }

View File

@@ -18,9 +18,9 @@ namespace SharpCompress.Common.Rar.Headers
Flags == 0x1A21 &&
HeaderSize == 0x07;
// Rar5 signature: 52 61 72 21 1A 07 10 00 (not supported yet)
// Rar5 signature: 52 61 72 21 1A 07 01 00 (not supported yet)
}
internal bool OldFormat { get; private set; }
}
}
}

View File

@@ -1,6 +1,7 @@
using System;
using System.IO;
using SharpCompress.IO;
using System.Text;
namespace SharpCompress.Common.Rar.Headers
{
@@ -17,14 +18,16 @@ namespace SharpCompress.Common.Rar.Headers
HeaderSize = baseHeader.HeaderSize;
AdditionalSize = baseHeader.AdditionalSize;
ReadBytes = baseHeader.ReadBytes;
ArchiveEncoding = baseHeader.ArchiveEncoding;
}
internal static RarHeader Create(RarCrcBinaryReader reader)
internal static RarHeader Create(RarCrcBinaryReader reader, ArchiveEncoding archiveEncoding)
{
try
{
RarHeader header = new RarHeader();
header.ArchiveEncoding = archiveEncoding;
reader.Mark();
header.ReadStartFromReader(reader);
header.ReadBytes += reader.CurrentReadByteCount;
@@ -50,7 +53,8 @@ namespace SharpCompress.Common.Rar.Headers
}
}
protected virtual void ReadFromReader(MarkingBinaryReader reader) {
protected virtual void ReadFromReader(MarkingBinaryReader reader)
{
throw new NotImplementedException();
}
@@ -76,10 +80,11 @@ namespace SharpCompress.Common.Rar.Headers
return header;
}
private void VerifyHeaderCrc(ushort crc) {
if (HeaderType != HeaderType.MarkHeader)
private void VerifyHeaderCrc(ushort crc)
{
if (HeaderType != HeaderType.MarkHeader)
{
if (crc != HeadCRC)
if (crc != HeadCRC)
{
throw new InvalidFormatException("rar header crc mismatch");
}
@@ -106,6 +111,8 @@ namespace SharpCompress.Common.Rar.Headers
protected short HeaderSize { get; private set; }
internal ArchiveEncoding ArchiveEncoding { get; private set; }
/// <summary>
/// This additional size of the header could be file data
/// </summary>

View File

@@ -117,7 +117,7 @@ namespace SharpCompress.Common.Rar.Headers
{
#if !NO_CRYPTO
var reader = new RarCryptoBinaryReader(stream, Options.Password);
if (IsEncrypted)
{
if (Options.Password == null)
@@ -133,7 +133,7 @@ namespace SharpCompress.Common.Rar.Headers
#endif
RarHeader header = RarHeader.Create(reader);
RarHeader header = RarHeader.Create(reader, Options.ArchiveEncoding);
if (header == null)
{
return null;
@@ -141,110 +141,110 @@ namespace SharpCompress.Common.Rar.Headers
switch (header.HeaderType)
{
case HeaderType.ArchiveHeader:
{
var ah = header.PromoteHeader<ArchiveHeader>(reader);
IsEncrypted = ah.HasPassword;
return ah;
}
{
var ah = header.PromoteHeader<ArchiveHeader>(reader);
IsEncrypted = ah.HasPassword;
return ah;
}
case HeaderType.MarkHeader:
{
return header.PromoteHeader<MarkHeader>(reader);
}
{
return header.PromoteHeader<MarkHeader>(reader);
}
case HeaderType.ProtectHeader:
{
ProtectHeader ph = header.PromoteHeader<ProtectHeader>(reader);
// skip the recovery record data, we do not use it.
switch (StreamingMode)
{
case StreamingMode.Seekable:
{
reader.BaseStream.Position += ph.DataSize;
}
break;
case StreamingMode.Streaming:
{
reader.BaseStream.Skip(ph.DataSize);
}
break;
default:
{
throw new InvalidFormatException("Invalid StreamingMode");
}
}
ProtectHeader ph = header.PromoteHeader<ProtectHeader>(reader);
return ph;
}
// skip the recovery record data, we do not use it.
switch (StreamingMode)
{
case StreamingMode.Seekable:
{
reader.BaseStream.Position += ph.DataSize;
}
break;
case StreamingMode.Streaming:
{
reader.BaseStream.Skip(ph.DataSize);
}
break;
default:
{
throw new InvalidFormatException("Invalid StreamingMode");
}
}
return ph;
}
case HeaderType.NewSubHeader:
{
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
switch (StreamingMode)
{
case StreamingMode.Seekable:
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
switch (StreamingMode)
{
fh.DataStartPosition = reader.BaseStream.Position;
reader.BaseStream.Position += fh.CompressedSize;
}
break;
case StreamingMode.Streaming:
{
//skip the data because it's useless?
reader.BaseStream.Skip(fh.CompressedSize);
}
break;
default:
{
throw new InvalidFormatException("Invalid StreamingMode");
case StreamingMode.Seekable:
{
fh.DataStartPosition = reader.BaseStream.Position;
reader.BaseStream.Position += fh.CompressedSize;
}
break;
case StreamingMode.Streaming:
{
//skip the data because it's useless?
reader.BaseStream.Skip(fh.CompressedSize);
}
break;
default:
{
throw new InvalidFormatException("Invalid StreamingMode");
}
}
return fh;
}
return fh;
}
case HeaderType.FileHeader:
{
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
switch (StreamingMode)
{
case StreamingMode.Seekable:
FileHeader fh = header.PromoteHeader<FileHeader>(reader);
switch (StreamingMode)
{
fh.DataStartPosition = reader.BaseStream.Position;
reader.BaseStream.Position += fh.CompressedSize;
}
break;
case StreamingMode.Streaming:
{
var ms = new ReadOnlySubStream(reader.BaseStream, fh.CompressedSize);
if (fh.Salt == null)
{
fh.PackedStream = ms;
}
else
{
case StreamingMode.Seekable:
{
fh.DataStartPosition = reader.BaseStream.Position;
reader.BaseStream.Position += fh.CompressedSize;
}
break;
case StreamingMode.Streaming:
{
var ms = new ReadOnlySubStream(reader.BaseStream, fh.CompressedSize);
if (fh.Salt == null)
{
fh.PackedStream = ms;
}
else
{
#if !NO_CRYPTO
fh.PackedStream = new RarCryptoWrapper(ms, Options.Password, fh.Salt);
fh.PackedStream = new RarCryptoWrapper(ms, Options.Password, fh.Salt);
#else
throw new NotSupportedException("RarCrypto not supported");
#endif
}
}
break;
default:
{
throw new InvalidFormatException("Invalid StreamingMode");
}
}
break;
default:
{
throw new InvalidFormatException("Invalid StreamingMode");
}
}
return fh;
}
return fh;
}
case HeaderType.EndArchiveHeader:
{
return header.PromoteHeader<EndArchiveHeader>(reader);
}
{
return header.PromoteHeader<EndArchiveHeader>(reader);
}
default:
{
throw new InvalidFormatException("Invalid Rar Header: " + header.HeaderType);
}
{
throw new InvalidFormatException("Invalid Rar Header: " + header.HeaderType);
}
}
}
}
}
}

View File

@@ -9,6 +9,7 @@ namespace SharpCompress.Common.Rar
internal abstract class RarFilePart : FilePart
{
internal RarFilePart(MarkHeader mh, FileHeader fh)
: base(fh.ArchiveEncoding)
{
MarkHeader = mh;
FileHeader = fh;

View File

@@ -22,6 +22,13 @@ namespace SharpCompress.Common.SevenZip
internal List<long> PackStreamStartPositions = new List<long>();
internal List<int> FolderStartFileIndex = new List<int>();
internal List<int> FileIndexToFolderIndexMap = new List<int>();
internal IPasswordProvider PasswordProvider { get; }
public ArchiveDatabase(IPasswordProvider passwordProvider)
{
PasswordProvider = passwordProvider;
}
internal void Clear()
{

View File

@@ -182,7 +182,7 @@ namespace SharpCompress.Common.SevenZip
private DateTime? TranslateTime(long? time)
{
if (time.HasValue)
if (time.HasValue && time.Value >= 0 && time.Value <= 2650467743999999999) //maximum Windows file time 31.12.9999
{
return TranslateTime(time.Value);
}
@@ -1211,7 +1211,7 @@ namespace SharpCompress.Common.SevenZip
public ArchiveDatabase ReadDatabase(IPasswordProvider pass)
{
var db = new ArchiveDatabase();
var db = new ArchiveDatabase(pass);
db.Clear();
db.MajorVersion = _header[6];
@@ -1279,7 +1279,7 @@ namespace SharpCompress.Common.SevenZip
throw new InvalidOperationException();
}
var dataVector = ReadAndDecodePackedStreams(db.StartPositionAfterHeader, pass);
var dataVector = ReadAndDecodePackedStreams(db.StartPositionAfterHeader, db.PasswordProvider);
// compressed header without content is odd but ok
if (dataVector.Count == 0)
@@ -1301,7 +1301,7 @@ namespace SharpCompress.Common.SevenZip
}
}
ReadHeader(db, pass);
ReadHeader(db, db.PasswordProvider);
}
db.Fill();
return db;
@@ -1441,7 +1441,7 @@ namespace SharpCompress.Common.SevenZip
#endregion
}
private Stream GetCachedDecoderStream(ArchiveDatabase _db, int folderIndex, IPasswordProvider pw)
private Stream GetCachedDecoderStream(ArchiveDatabase _db, int folderIndex)
{
Stream s;
if (!_cachedStreams.TryGetValue(folderIndex, out s))
@@ -1456,13 +1456,13 @@ namespace SharpCompress.Common.SevenZip
}
s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes.ToArray(), folderInfo,
pw);
_db.PasswordProvider);
_cachedStreams.Add(folderIndex, s);
}
return s;
}
public Stream OpenStream(ArchiveDatabase _db, int fileIndex, IPasswordProvider pw)
public Stream OpenStream(ArchiveDatabase _db, int fileIndex)
{
int folderIndex = _db.FileIndexToFolderIndexMap[fileIndex];
int numFilesInFolder = _db.NumUnpackStreamsVector[folderIndex];
@@ -1479,12 +1479,12 @@ namespace SharpCompress.Common.SevenZip
skipSize += _db.Files[firstFileIndex + i].Size;
}
Stream s = GetCachedDecoderStream(_db, folderIndex, pw);
Stream s = GetCachedDecoderStream(_db, folderIndex);
s.Position = skipSize;
return new ReadOnlySubStream(s, _db.Files[fileIndex].Size);
}
public void Extract(ArchiveDatabase _db, int[] indices, IPasswordProvider pw)
public void Extract(ArchiveDatabase _db, int[] indices)
{
int numItems;
bool allFilesMode = (indices == null);
@@ -1562,7 +1562,7 @@ namespace SharpCompress.Common.SevenZip
// TODO: If the decoding fails the last file may be extracted incompletely. Delete it?
Stream s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes.ToArray(),
folderInfo, pw);
folderInfo, _db.PasswordProvider);
byte[] buffer = new byte[4 << 10];
for (;;)
{
@@ -1588,4 +1588,4 @@ namespace SharpCompress.Common.SevenZip
#endregion
}
}
}

View File

@@ -7,14 +7,15 @@ namespace SharpCompress.Common.SevenZip
{
internal class SevenZipFilePart : FilePart
{
private CompressionType? type;
private readonly Stream stream;
private readonly ArchiveDatabase database;
private CompressionType? _type;
private readonly Stream _stream;
private readonly ArchiveDatabase _database;
internal SevenZipFilePart(Stream stream, ArchiveDatabase database, int index, CFileItem fileEntry)
internal SevenZipFilePart(Stream stream, ArchiveDatabase database, int index, CFileItem fileEntry, ArchiveEncoding archiveEncoding)
: base(archiveEncoding)
{
this.stream = stream;
this.database = database;
this._stream = stream;
this._database = database;
Index = index;
Header = fileEntry;
if (Header.HasStream)
@@ -41,14 +42,14 @@ namespace SharpCompress.Common.SevenZip
{
return null;
}
var folderStream = database.GetFolderStream(stream, Folder, null);
var folderStream = _database.GetFolderStream(_stream, Folder, _database.PasswordProvider);
int firstFileIndex = database.FolderStartFileIndex[database.Folders.IndexOf(Folder)];
int firstFileIndex = _database.FolderStartFileIndex[_database.Folders.IndexOf(Folder)];
int skipCount = Index - firstFileIndex;
long skipSize = 0;
for (int i = 0; i < skipCount; i++)
{
skipSize += database.Files[firstFileIndex + i].Size;
skipSize += _database.Files[firstFileIndex + i].Size;
}
if (skipSize > 0)
{
@@ -61,11 +62,11 @@ namespace SharpCompress.Common.SevenZip
{
get
{
if (type == null)
if (_type == null)
{
type = GetCompression();
_type = GetCompression();
}
return type.Value;
return _type.Value;
}
}
@@ -84,7 +85,7 @@ namespace SharpCompress.Common.SevenZip
{
var coder = Folder.Coders.First();
switch (coder.MethodId.Id)
{
{
case k_LZMA:
case k_LZMA2:
{

View File

@@ -9,6 +9,11 @@ namespace SharpCompress.Common.Tar.Headers
{
internal static readonly DateTime Epoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
public TarHeader(ArchiveEncoding archiveEncoding)
{
ArchiveEncoding = archiveEncoding;
}
internal string Name { get; set; }
//internal int Mode { get; set; }
@@ -20,6 +25,7 @@ namespace SharpCompress.Common.Tar.Headers
internal DateTime LastModifiedTime { get; set; }
internal EntryType EntryType { get; set; }
internal Stream PackedStream { get; set; }
internal ArchiveEncoding ArchiveEncoding { get; }
internal const int BlockSize = 512;
@@ -31,7 +37,7 @@ namespace SharpCompress.Common.Tar.Headers
WriteOctalBytes(0, buffer, 108, 8); // owner ID
WriteOctalBytes(0, buffer, 116, 8); // group ID
//Encoding.UTF8.GetBytes("magic").CopyTo(buffer, 257);
//ArchiveEncoding.UTF8.GetBytes("magic").CopyTo(buffer, 257);
if (Name.Length > 100)
{
// Set mock filename and filetype to indicate the next block is the actual name of the file
@@ -72,7 +78,7 @@ namespace SharpCompress.Common.Tar.Headers
private void WriteLongFilenameHeader(Stream output)
{
byte[] nameBytes = ArchiveEncoding.Default.GetBytes(Name);
byte[] nameBytes = ArchiveEncoding.Encode(Name);
output.Write(nameBytes, 0, nameBytes.Length);
// pad to multiple of BlockSize bytes, and make sure a terminating null is added
@@ -99,7 +105,7 @@ namespace SharpCompress.Common.Tar.Headers
}
else
{
Name = ArchiveEncoding.Default.GetString(buffer, 0, 100).TrimNulls();
Name = ArchiveEncoding.Decode(buffer, 0, 100).TrimNulls();
}
EntryType = ReadEntryType(buffer);
@@ -111,12 +117,12 @@ namespace SharpCompress.Common.Tar.Headers
long unixTimeStamp = ReadASCIIInt64Base8(buffer, 136, 11);
LastModifiedTime = Epoch.AddSeconds(unixTimeStamp).ToLocalTime();
Magic = ArchiveEncoding.Default.GetString(buffer, 257, 6).TrimNulls();
Magic = ArchiveEncoding.Decode(buffer, 257, 6).TrimNulls();
if (!string.IsNullOrEmpty(Magic)
&& "ustar".Equals(Magic))
{
string namePrefix = ArchiveEncoding.Default.GetString(buffer, 345, 157);
string namePrefix = ArchiveEncoding.Decode(buffer, 345, 157);
namePrefix = namePrefix.TrimNulls();
if (!string.IsNullOrEmpty(namePrefix))
{
@@ -143,7 +149,7 @@ namespace SharpCompress.Common.Tar.Headers
{
reader.ReadBytes(remainingBytesToRead);
}
return ArchiveEncoding.Default.GetString(nameBytes, 0, nameBytes.Length).TrimNulls();
return ArchiveEncoding.Decode(nameBytes, 0, nameBytes.Length).TrimNulls();
}
private static EntryType ReadEntryType(byte[] buffer)

View File

@@ -3,6 +3,7 @@ using System.Collections.Generic;
using System.IO;
using SharpCompress.Common.Tar.Headers;
using SharpCompress.IO;
using System.Text;
namespace SharpCompress.Common.Tar
{
@@ -43,9 +44,9 @@ namespace SharpCompress.Common.Tar
internal override IEnumerable<FilePart> Parts => filePart.AsEnumerable<FilePart>();
internal static IEnumerable<TarEntry> GetEntries(StreamingMode mode, Stream stream,
CompressionType compressionType)
CompressionType compressionType, ArchiveEncoding archiveEncoding)
{
foreach (TarHeader h in TarHeaderFactory.ReadHeader(mode, stream))
foreach (TarHeader h in TarHeaderFactory.ReadHeader(mode, stream, archiveEncoding))
{
if (h != null)
{

View File

@@ -6,11 +6,12 @@ namespace SharpCompress.Common.Tar
{
internal class TarFilePart : FilePart
{
private readonly Stream seekableStream;
private readonly Stream _seekableStream;
internal TarFilePart(TarHeader header, Stream seekableStream)
: base(header.ArchiveEncoding)
{
this.seekableStream = seekableStream;
this._seekableStream = seekableStream;
Header = header;
}
@@ -20,10 +21,10 @@ namespace SharpCompress.Common.Tar
internal override Stream GetCompressedStream()
{
if (seekableStream != null)
if (_seekableStream != null)
{
seekableStream.Position = Header.DataStartPosition.Value;
return new ReadOnlySubStream(seekableStream, Header.Size);
_seekableStream.Position = Header.DataStartPosition.Value;
return new ReadOnlySubStream(_seekableStream, Header.Size);
}
return Header.PackedStream;
}

View File

@@ -2,12 +2,13 @@
using System.IO;
using SharpCompress.Common.Tar.Headers;
using SharpCompress.IO;
using System.Text;
namespace SharpCompress.Common.Tar
{
internal static class TarHeaderFactory
{
internal static IEnumerable<TarHeader> ReadHeader(StreamingMode mode, Stream stream)
internal static IEnumerable<TarHeader> ReadHeader(StreamingMode mode, Stream stream, ArchiveEncoding archiveEncoding)
{
while (true)
{
@@ -15,7 +16,8 @@ namespace SharpCompress.Common.Tar
try
{
BinaryReader reader = new BinaryReader(stream);
header = new TarHeader();
header = new TarHeader(archiveEncoding);
if (!header.Read(reader))
{
yield break;
@@ -23,22 +25,22 @@ namespace SharpCompress.Common.Tar
switch (mode)
{
case StreamingMode.Seekable:
{
header.DataStartPosition = reader.BaseStream.Position;
{
header.DataStartPosition = reader.BaseStream.Position;
//skip to nearest 512
reader.BaseStream.Position += PadTo512(header.Size);
}
//skip to nearest 512
reader.BaseStream.Position += PadTo512(header.Size);
}
break;
case StreamingMode.Streaming:
{
header.PackedStream = new TarReadOnlySubStream(stream, header.Size);
}
{
header.PackedStream = new TarReadOnlySubStream(stream, header.Size);
}
break;
default:
{
throw new InvalidFormatException("Invalid StreamingMode");
}
{
throw new InvalidFormatException("Invalid StreamingMode");
}
}
}
catch

View File

@@ -6,8 +6,8 @@ namespace SharpCompress.Common.Zip.Headers
{
internal class DirectoryEntryHeader : ZipFileEntry
{
public DirectoryEntryHeader()
: base(ZipHeaderType.DirectoryEntry)
public DirectoryEntryHeader(ArchiveEncoding archiveEncoding)
: base(ZipHeaderType.DirectoryEntry, archiveEncoding)
{
}
@@ -31,10 +31,10 @@ namespace SharpCompress.Common.Zip.Headers
RelativeOffsetOfEntryHeader = reader.ReadUInt32();
byte[] name = reader.ReadBytes(nameLength);
Name = DecodeString(name);
Name = ArchiveEncoding.Decode(name);
byte[] extra = reader.ReadBytes(extraLength);
byte[] comment = reader.ReadBytes(commentLength);
Comment = DecodeString(comment);
Comment = ArchiveEncoding.Decode(comment);
LoadExtra(extra);
var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField);

View File

@@ -5,6 +5,7 @@ namespace SharpCompress.Common.Zip.Headers
[Flags]
internal enum HeaderFlags : ushort
{
None = 0,
Encrypted = 1, // http://www.pkware.com/documents/casestudies/APPNOTE.TXT
Bit1 = 2,
Bit2 = 4,

View File

@@ -1,12 +1,13 @@
using System.IO;
using System.Linq;
using System.Text;
namespace SharpCompress.Common.Zip.Headers
{
internal class LocalEntryHeader : ZipFileEntry
{
public LocalEntryHeader()
: base(ZipHeaderType.LocalEntry)
public LocalEntryHeader(ArchiveEncoding archiveEncoding)
: base(ZipHeaderType.LocalEntry, archiveEncoding)
{
}
@@ -24,7 +25,7 @@ namespace SharpCompress.Common.Zip.Headers
ushort extraLength = reader.ReadUInt16();
byte[] name = reader.ReadBytes(nameLength);
byte[] extra = reader.ReadBytes(extraLength);
Name = DecodeString(name);
Name = ArchiveEncoding.Decode(name);
LoadExtra(extra);
var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField);

View File

@@ -8,10 +8,11 @@ namespace SharpCompress.Common.Zip.Headers
{
internal abstract class ZipFileEntry : ZipHeader
{
protected ZipFileEntry(ZipHeaderType type)
protected ZipFileEntry(ZipHeaderType type, ArchiveEncoding archiveEncoding)
: base(type)
{
Extra = new List<ExtraData>();
ArchiveEncoding = archiveEncoding;
}
internal bool IsDirectory
@@ -29,28 +30,11 @@ namespace SharpCompress.Common.Zip.Headers
&& Name.EndsWith("\\");
}
}
protected string DecodeString(byte[] str)
{
if (FlagUtility.HasFlag(Flags, HeaderFlags.UTF8))
{
return Encoding.UTF8.GetString(str, 0, str.Length);
}
return ArchiveEncoding.Default.GetString(str, 0, str.Length);
}
protected byte[] EncodeString(string str)
{
if (FlagUtility.HasFlag(Flags, HeaderFlags.UTF8))
{
return Encoding.UTF8.GetBytes(str);
}
return ArchiveEncoding.Default.GetBytes(str);
}
internal Stream PackedStream { get; set; }
internal ArchiveEncoding ArchiveEncoding { get; }
internal string Name { get; set; }
internal HeaderFlags Flags { get; set; }
@@ -64,7 +48,7 @@ namespace SharpCompress.Common.Zip.Headers
internal long UncompressedSize { get; set; }
internal List<ExtraData> Extra { get; set; }
public string Password { get; set; }
internal PkwareTraditionalEncryptionData ComposeEncryptionData(Stream archiveStream)
@@ -75,10 +59,10 @@ namespace SharpCompress.Common.Zip.Headers
}
var buffer = new byte[12];
archiveStream.Read(buffer, 0, 12);
archiveStream.ReadFully(buffer);
PkwareTraditionalEncryptionData encryptionData = PkwareTraditionalEncryptionData.ForRead(Password, this, buffer);
return encryptionData;
}

View File

@@ -42,7 +42,7 @@ namespace SharpCompress.Common.Zip
if (buffer == null)
{
throw new ArgumentNullException("buffer");
throw new ArgumentNullException(nameof(buffer));
}
byte[] temp = new byte[count];

View File

@@ -9,9 +9,11 @@ namespace SharpCompress.Common.Zip
{
private static readonly CRC32 crc32 = new CRC32();
private readonly UInt32[] _Keys = {0x12345678, 0x23456789, 0x34567890};
private readonly ArchiveEncoding _archiveEncoding;
private PkwareTraditionalEncryptionData(string password)
private PkwareTraditionalEncryptionData(string password, ArchiveEncoding archiveEncoding)
{
_archiveEncoding = archiveEncoding;
Initialize(password);
}
@@ -27,7 +29,7 @@ namespace SharpCompress.Common.Zip
public static PkwareTraditionalEncryptionData ForRead(string password, ZipFileEntry header,
byte[] encryptionHeader)
{
var encryptor = new PkwareTraditionalEncryptionData(password);
var encryptor = new PkwareTraditionalEncryptionData(password, header.ArchiveEncoding);
byte[] plainTextHeader = encryptor.Decrypt(encryptionHeader, encryptionHeader.Length);
if (plainTextHeader[11] != (byte)((header.Crc >> 24) & 0xff))
{
@@ -47,7 +49,7 @@ namespace SharpCompress.Common.Zip
{
if (length > cipherText.Length)
{
throw new ArgumentOutOfRangeException("length",
throw new ArgumentOutOfRangeException(nameof(length),
"Bad length during Decryption: the length parameter must be smaller than or equal to the size of the destination array.");
}
@@ -70,7 +72,7 @@ namespace SharpCompress.Common.Zip
if (length > plainText.Length)
{
throw new ArgumentOutOfRangeException("length",
throw new ArgumentOutOfRangeException(nameof(length),
"Bad length during Encryption: The length parameter must be smaller than or equal to the size of the destination array.");
}
@@ -93,17 +95,12 @@ namespace SharpCompress.Common.Zip
}
}
internal static byte[] StringToByteArray(string value, Encoding encoding)
internal byte[] StringToByteArray(string value)
{
byte[] a = encoding.GetBytes(value);
byte[] a = _archiveEncoding.Password.GetBytes(value);
return a;
}
internal static byte[] StringToByteArray(string value)
{
return StringToByteArray(value, ArchiveEncoding.Password);
}
private void UpdateKeys(byte byteValue)
{
_Keys[0] = (UInt32)crc32.ComputeCrc32((int)_Keys[0], byteValue);

View File

@@ -5,21 +5,21 @@ namespace SharpCompress.Common.Zip
{
internal class SeekableZipFilePart : ZipFilePart
{
private bool isLocalHeaderLoaded;
private readonly SeekableZipHeaderFactory headerFactory;
private bool _isLocalHeaderLoaded;
private readonly SeekableZipHeaderFactory _headerFactory;
internal SeekableZipFilePart(SeekableZipHeaderFactory headerFactory, DirectoryEntryHeader header, Stream stream)
: base(header, stream)
{
this.headerFactory = headerFactory;
this._headerFactory = headerFactory;
}
internal override Stream GetCompressedStream()
{
if (!isLocalHeaderLoaded)
if (!_isLocalHeaderLoaded)
{
LoadLocalHeader();
isLocalHeaderLoaded = true;
_isLocalHeaderLoaded = true;
}
return base.GetCompressedStream();
}
@@ -29,7 +29,7 @@ namespace SharpCompress.Common.Zip
private void LoadLocalHeader()
{
bool hasData = Header.HasData;
Header = headerFactory.GetLocalHeader(BaseStream, Header as DirectoryEntryHeader);
Header = _headerFactory.GetLocalHeader(BaseStream, Header as DirectoryEntryHeader);
Header.HasData = hasData;
}

View File

@@ -3,16 +3,17 @@ using System.Collections.Generic;
using System.IO;
using SharpCompress.Common.Zip.Headers;
using SharpCompress.IO;
using System.Text;
namespace SharpCompress.Common.Zip
{
internal class SeekableZipHeaderFactory : ZipHeaderFactory
{
private const int MAX_ITERATIONS_FOR_DIRECTORY_HEADER = 4096;
private bool zip64;
private bool _zip64;
internal SeekableZipHeaderFactory(string password)
: base(StreamingMode.Seekable, password)
internal SeekableZipHeaderFactory(string password, ArchiveEncoding archiveEncoding)
: base(StreamingMode.Seekable, password, archiveEncoding)
{
}
@@ -26,14 +27,14 @@ namespace SharpCompress.Common.Zip
if (entry.IsZip64)
{
zip64 = true;
_zip64 = true;
SeekBackToHeader(stream, reader, ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR);
var zip64Locator = new Zip64DirectoryEndLocatorHeader();
zip64Locator.Read(reader);
stream.Seek(zip64Locator.RelativeOffsetOfTheEndOfDirectoryRecord, SeekOrigin.Begin);
uint zip64Signature = reader.ReadUInt32();
if(zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
if (zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
throw new ArchiveException("Failed to locate the Zip64 Header");
var zip64Entry = new Zip64DirectoryEndHeader();
@@ -50,7 +51,7 @@ namespace SharpCompress.Common.Zip
{
stream.Position = position;
uint signature = reader.ReadUInt32();
var directoryEntryHeader = ReadHeader(signature, reader, zip64) as DirectoryEntryHeader;
var directoryEntryHeader = ReadHeader(signature, reader, _zip64) as DirectoryEntryHeader;
position = stream.Position;
if (directoryEntryHeader == null)
{
@@ -91,7 +92,7 @@ namespace SharpCompress.Common.Zip
stream.Seek(directoryEntryHeader.RelativeOffsetOfEntryHeader, SeekOrigin.Begin);
BinaryReader reader = new BinaryReader(stream);
uint signature = reader.ReadUInt32();
var localEntryHeader = ReadHeader(signature, reader, zip64) as LocalEntryHeader;
var localEntryHeader = ReadHeader(signature, reader, _zip64) as LocalEntryHeader;
if (localEntryHeader == null)
{
throw new InvalidOperationException();

View File

@@ -39,19 +39,20 @@ namespace SharpCompress.Common.Zip
{
return new BinaryReader(rewindableStream);
}
if (Header.HasData)
if (Header.HasData && !Skipped)
{
if (decompressionStream == null)
{
decompressionStream = GetCompressedStream();
}
decompressionStream.SkipAll();
decompressionStream.Skip();
DeflateStream deflateStream = decompressionStream as DeflateStream;
if (deflateStream != null)
{
rewindableStream.Rewind(deflateStream.InputBuffer);
}
Skipped = true;
}
var reader = new BinaryReader(rewindableStream);
decompressionStream = null;

View File

@@ -2,13 +2,14 @@
using System.IO;
using SharpCompress.Common.Zip.Headers;
using SharpCompress.IO;
using System.Text;
namespace SharpCompress.Common.Zip
{
internal class StreamingZipHeaderFactory : ZipHeaderFactory
{
internal StreamingZipHeaderFactory(string password)
: base(StreamingMode.Streaming, password)
internal StreamingZipHeaderFactory(string password, ArchiveEncoding archiveEncoding)
: base(StreamingMode.Streaming, password, archiveEncoding)
{
}

View File

@@ -78,7 +78,7 @@ namespace SharpCompress.Common.Zip
{
//read out last 10 auth bytes
var ten = new byte[10];
stream.Read(ten, 0, 10);
stream.ReadFully(ten);
stream.Dispose();
}
}

View File

@@ -32,6 +32,10 @@ namespace SharpCompress.Common.Zip
{
return CompressionType.Deflate;
}
case ZipCompressionMethod.Deflate64:
{
return CompressionType.Deflate64;
}
case ZipCompressionMethod.LZMA:
{
return CompressionType.LZMA;

View File

@@ -5,6 +5,7 @@ using SharpCompress.Common.Zip.Headers;
using SharpCompress.Compressors;
using SharpCompress.Compressors.BZip2;
using SharpCompress.Compressors.Deflate;
using SharpCompress.Compressors.Deflate64;
using SharpCompress.Compressors.LZMA;
using SharpCompress.Compressors.PPMd;
using SharpCompress.Converters;
@@ -15,6 +16,7 @@ namespace SharpCompress.Common.Zip
internal abstract class ZipFilePart : FilePart
{
internal ZipFilePart(ZipFileEntry header, Stream stream)
: base(header.ArchiveEncoding)
{
Header = header;
header.Part = this;
@@ -65,6 +67,10 @@ namespace SharpCompress.Common.Zip
{
return new DeflateStream(stream, CompressionMode.Decompress);
}
case ZipCompressionMethod.Deflate64:
{
return new Deflate64Stream(stream, CompressionMode.Decompress);
}
case ZipCompressionMethod.BZip2:
{
return new BZip2Stream(stream, CompressionMode.Decompress);
@@ -88,7 +94,7 @@ namespace SharpCompress.Common.Zip
case ZipCompressionMethod.PPMd:
{
var props = new byte[2];
stream.Read(props, 0, props.Length);
stream.ReadFully(props);
return new PpmdStream(new PpmdProperties(props), stream, false);
}
case ZipCompressionMethod.WinzipAes:
@@ -175,7 +181,6 @@ namespace SharpCompress.Common.Zip
}
}
return plainStream;
}
}

View File

@@ -5,6 +5,7 @@ using System.Linq;
#endif
using SharpCompress.Common.Zip.Headers;
using SharpCompress.IO;
using System.Text;
namespace SharpCompress.Common.Zip
{
@@ -23,11 +24,13 @@ namespace SharpCompress.Common.Zip
protected LocalEntryHeader lastEntryHeader;
private readonly string password;
private readonly StreamingMode mode;
private readonly ArchiveEncoding archiveEncoding;
protected ZipHeaderFactory(StreamingMode mode, string password)
protected ZipHeaderFactory(StreamingMode mode, string password, ArchiveEncoding archiveEncoding)
{
this.mode = mode;
this.password = password;
this.archiveEncoding = archiveEncoding;
}
protected ZipHeader ReadHeader(uint headerBytes, BinaryReader reader, bool zip64 = false)
@@ -36,7 +39,7 @@ namespace SharpCompress.Common.Zip
{
case ENTRY_HEADER_BYTES:
{
var entryHeader = new LocalEntryHeader();
var entryHeader = new LocalEntryHeader(archiveEncoding);
entryHeader.Read(reader);
LoadHeader(entryHeader, reader.BaseStream);
@@ -45,48 +48,48 @@ namespace SharpCompress.Common.Zip
}
case DIRECTORY_START_HEADER_BYTES:
{
var entry = new DirectoryEntryHeader();
var entry = new DirectoryEntryHeader(archiveEncoding);
entry.Read(reader);
return entry;
}
case POST_DATA_DESCRIPTOR:
{
if (FlagUtility.HasFlag(lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor))
{
lastEntryHeader.Crc = reader.ReadUInt32();
lastEntryHeader.CompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
lastEntryHeader.UncompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
if (FlagUtility.HasFlag(lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor))
{
lastEntryHeader.Crc = reader.ReadUInt32();
lastEntryHeader.CompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
lastEntryHeader.UncompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32();
}
else
{
reader.ReadBytes(zip64 ? 20 : 12);
}
return null;
}
else
{
reader.ReadBytes(zip64 ? 20 : 12);
}
return null;
}
case DIGITAL_SIGNATURE:
return null;
case DIRECTORY_END_HEADER_BYTES:
{
var entry = new DirectoryEndHeader();
entry.Read(reader);
return entry;
}
{
var entry = new DirectoryEndHeader();
entry.Read(reader);
return entry;
}
case SPLIT_ARCHIVE_HEADER_BYTES:
{
return new SplitHeader();
}
{
return new SplitHeader();
}
case ZIP64_END_OF_CENTRAL_DIRECTORY:
{
var entry = new Zip64DirectoryEndHeader();
entry.Read(reader);
return entry;
}
{
var entry = new Zip64DirectoryEndHeader();
entry.Read(reader);
return entry;
}
case ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR:
{
var entry = new Zip64DirectoryEndLocatorHeader();
entry.Read(reader);
return entry;
}
{
var entry = new Zip64DirectoryEndLocatorHeader();
entry.Read(reader);
return entry;
}
default:
throw new NotSupportedException("Unknown header: " + headerBytes);
}
@@ -165,22 +168,22 @@ namespace SharpCompress.Common.Zip
switch (mode)
{
case StreamingMode.Seekable:
{
entryHeader.DataStartPosition = stream.Position;
stream.Position += entryHeader.CompressedSize;
break;
}
{
entryHeader.DataStartPosition = stream.Position;
stream.Position += entryHeader.CompressedSize;
break;
}
case StreamingMode.Streaming:
{
entryHeader.PackedStream = stream;
break;
}
{
entryHeader.PackedStream = stream;
break;
}
default:
{
throw new InvalidFormatException("Invalid StreamingMode");
}
{
throw new InvalidFormatException("Invalid StreamingMode");
}
}
//}

View File

@@ -105,19 +105,19 @@ namespace SharpCompress.Compressors.ADC
}
if (buffer == null)
{
throw new ArgumentNullException("buffer");
throw new ArgumentNullException(nameof(buffer));
}
if (count < 0)
{
throw new ArgumentOutOfRangeException("count");
throw new ArgumentOutOfRangeException(nameof(count));
}
if (offset < buffer.GetLowerBound(0))
{
throw new ArgumentOutOfRangeException("offset");
throw new ArgumentOutOfRangeException(nameof(offset));
}
if ((offset + count) > buffer.GetLength(0))
{
throw new ArgumentOutOfRangeException("count");
throw new ArgumentOutOfRangeException(nameof(count));
}
int size = -1;

View File

@@ -99,7 +99,7 @@ namespace SharpCompress.Compressors.Deflate
/// </summary>
/// <param name="input">The stream over which to calculate the CRC32</param>
/// <returns>the CRC32 calculation</returns>
public Int32 GetCrc32(Stream input)
public UInt32 GetCrc32(Stream input)
{
return GetCrc32AndCopy(input, null);
}
@@ -111,7 +111,7 @@ namespace SharpCompress.Compressors.Deflate
/// <param name="input">The stream over which to calculate the CRC32</param>
/// <param name="output">The stream into which to deflate the input</param>
/// <returns>the CRC32 calculation</returns>
public Int32 GetCrc32AndCopy(Stream input, Stream output)
public UInt32 GetCrc32AndCopy(Stream input, Stream output)
{
if (input == null)
{
@@ -143,7 +143,7 @@ namespace SharpCompress.Compressors.Deflate
TotalBytesRead += count;
}
return (Int32)(~runningCrc32Result);
return ~runningCrc32Result;
}
}

View File

@@ -26,6 +26,7 @@
using System;
using System.IO;
using System.Text;
namespace SharpCompress.Compressors.Deflate
{
@@ -36,9 +37,10 @@ namespace SharpCompress.Compressors.Deflate
public DeflateStream(Stream stream, CompressionMode mode,
CompressionLevel level = CompressionLevel.Default,
bool leaveOpen = false)
bool leaveOpen = false,
Encoding forceEncoding = null)
{
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen);
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen, forceEncoding);
}
#region Zlib properties

View File

@@ -30,41 +30,45 @@ using System;
using System.IO;
using SharpCompress.Common;
using SharpCompress.Converters;
using System.Text;
namespace SharpCompress.Compressors.Deflate
{
public class GZipStream : Stream
{
internal static readonly DateTime UnixEpoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
internal static readonly DateTime UNIX_EPOCH = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
public DateTime? LastModified { get; set; }
private string comment;
private string fileName;
private string _comment;
private string _fileName;
internal ZlibBaseStream BaseStream;
private bool disposed;
private bool firstReadDone;
private int headerByteCount;
private bool _disposed;
private bool _firstReadDone;
private int _headerByteCount;
private readonly Encoding _encoding;
public GZipStream(Stream stream, CompressionMode mode)
: this(stream, mode, CompressionLevel.Default, false)
: this(stream, mode, CompressionLevel.Default, false, Encoding.UTF8)
{
}
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level)
: this(stream, mode, level, false)
: this(stream, mode, level, false, Encoding.UTF8)
{
}
public GZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
: this(stream, mode, CompressionLevel.Default, leaveOpen)
: this(stream, mode, CompressionLevel.Default, leaveOpen, Encoding.UTF8)
{
}
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen, Encoding encoding)
{
BaseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen);
BaseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen, encoding);
_encoding = encoding;
}
#region Zlib properties
@@ -74,7 +78,7 @@ namespace SharpCompress.Compressors.Deflate
get => (BaseStream._flushMode);
set
{
if (disposed)
if (_disposed)
{
throw new ObjectDisposedException("GZipStream");
}
@@ -87,7 +91,7 @@ namespace SharpCompress.Compressors.Deflate
get => BaseStream._bufferSize;
set
{
if (disposed)
if (_disposed)
{
throw new ObjectDisposedException("GZipStream");
}
@@ -123,7 +127,7 @@ namespace SharpCompress.Compressors.Deflate
{
get
{
if (disposed)
if (_disposed)
{
throw new ObjectDisposedException("GZipStream");
}
@@ -149,7 +153,7 @@ namespace SharpCompress.Compressors.Deflate
{
get
{
if (disposed)
if (_disposed)
{
throw new ObjectDisposedException("GZipStream");
}
@@ -179,7 +183,7 @@ namespace SharpCompress.Compressors.Deflate
{
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Writer)
{
return BaseStream._z.TotalBytesOut + headerByteCount;
return BaseStream._z.TotalBytesOut + _headerByteCount;
}
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Reader)
{
@@ -202,14 +206,14 @@ namespace SharpCompress.Compressors.Deflate
{
try
{
if (!disposed)
if (!_disposed)
{
if (disposing && (BaseStream != null))
{
BaseStream.Dispose();
Crc32 = BaseStream.Crc32;
}
disposed = true;
_disposed = true;
}
}
finally
@@ -223,7 +227,7 @@ namespace SharpCompress.Compressors.Deflate
/// </summary>
public override void Flush()
{
if (disposed)
if (_disposed)
{
throw new ObjectDisposedException("GZipStream");
}
@@ -263,7 +267,7 @@ namespace SharpCompress.Compressors.Deflate
/// <returns>the number of bytes actually read</returns>
public override int Read(byte[] buffer, int offset, int count)
{
if (disposed)
if (_disposed)
{
throw new ObjectDisposedException("GZipStream");
}
@@ -272,9 +276,9 @@ namespace SharpCompress.Compressors.Deflate
// Console.WriteLine("GZipStream::Read(buffer, off({0}), c({1}) = {2}", offset, count, n);
// Console.WriteLine( Util.FormatByteArray(buffer, offset, n) );
if (!firstReadDone)
if (!_firstReadDone)
{
firstReadDone = true;
_firstReadDone = true;
FileName = BaseStream._GzipFileName;
Comment = BaseStream._GzipComment;
}
@@ -325,7 +329,7 @@ namespace SharpCompress.Compressors.Deflate
/// <param name="count">the number of bytes to write.</param>
public override void Write(byte[] buffer, int offset, int count)
{
if (disposed)
if (_disposed)
{
throw new ObjectDisposedException("GZipStream");
}
@@ -335,7 +339,7 @@ namespace SharpCompress.Compressors.Deflate
if (BaseStream._wantCompress)
{
// first write in compression, therefore, emit the GZIP header
headerByteCount = EmitHeader();
_headerByteCount = EmitHeader();
}
else
{
@@ -346,56 +350,56 @@ namespace SharpCompress.Compressors.Deflate
BaseStream.Write(buffer, offset, count);
}
#endregion
#endregion Stream methods
public String Comment
{
get => comment;
get => _comment;
set
{
if (disposed)
if (_disposed)
{
throw new ObjectDisposedException("GZipStream");
}
comment = value;
_comment = value;
}
}
public string FileName
{
get => fileName;
get => _fileName;
set
{
if (disposed)
if (_disposed)
{
throw new ObjectDisposedException("GZipStream");
}
fileName = value;
if (fileName == null)
_fileName = value;
if (_fileName == null)
{
return;
}
if (fileName.IndexOf("/") != -1)
if (_fileName.IndexOf("/") != -1)
{
fileName = fileName.Replace("/", "\\");
_fileName = _fileName.Replace("/", "\\");
}
if (fileName.EndsWith("\\"))
if (_fileName.EndsWith("\\"))
{
throw new InvalidOperationException("Illegal filename");
}
var index = fileName.IndexOf("\\");
var index = _fileName.IndexOf("\\");
if (index != -1)
{
// trim any leading path
int length = fileName.Length;
int length = _fileName.Length;
int num = length;
while (--num >= 0)
{
char c = fileName[num];
char c = _fileName[num];
if (c == '\\')
{
fileName = fileName.Substring(num + 1, length - num - 1);
_fileName = _fileName.Substring(num + 1, length - num - 1);
}
}
}
@@ -406,8 +410,10 @@ namespace SharpCompress.Compressors.Deflate
private int EmitHeader()
{
byte[] commentBytes = (Comment == null) ? null : ArchiveEncoding.Default.GetBytes(Comment);
byte[] filenameBytes = (FileName == null) ? null : ArchiveEncoding.Default.GetBytes(FileName);
byte[] commentBytes = (Comment == null) ? null
: _encoding.GetBytes(Comment);
byte[] filenameBytes = (FileName == null) ? null
: _encoding.GetBytes(FileName);
int cbLength = (Comment == null) ? 0 : commentBytes.Length + 1;
int fnLength = (FileName == null) ? 0 : filenameBytes.Length + 1;
@@ -440,7 +446,7 @@ namespace SharpCompress.Compressors.Deflate
{
LastModified = DateTime.Now;
}
TimeSpan delta = LastModified.Value - UnixEpoch;
TimeSpan delta = LastModified.Value - UNIX_EPOCH;
var timet = (Int32)delta.TotalSeconds;
DataConverter.LittleEndian.PutBytes(header, i, timet);
i += 4;

View File

@@ -418,7 +418,7 @@ namespace SharpCompress.Compressors.Deflate
internal sealed class Adler
{
// largest prime smaller than 65536
private static readonly int BASE = 65521;
private static readonly uint BASE = 65521U;
// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
private static readonly int NMAX = 5552;
@@ -430,8 +430,8 @@ namespace SharpCompress.Compressors.Deflate
return 1;
}
int s1 = (int)(adler & 0xffff);
int s2 = (int)((adler >> 16) & 0xffff);
uint s1 = adler & 0xffffU;
uint s2 = (adler >> 16) & 0xffffU;
while (len > 0)
{
@@ -486,7 +486,7 @@ namespace SharpCompress.Compressors.Deflate
s1 %= BASE;
s2 %= BASE;
}
return (uint)((s2 << 16) | s1);
return (s2 << 16) | s1;
}
}
}

View File

@@ -1,20 +1,20 @@
// ZlibBaseStream.cs
// ------------------------------------------------------------------
//
// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
// All rights reserved.
//
// This code module is part of DotNetZip, a zipfile class library.
//
// ------------------------------------------------------------------
//
// This code is licensed under the Microsoft Public License.
// This code is licensed under the Microsoft Public License.
// See the file License.txt for the license details.
// More info on: http://dotnetzip.codeplex.com
//
// ------------------------------------------------------------------
//
// last saved (in emacs):
// last saved (in emacs):
// Time-stamp: <2009-October-28 15:45:15>
//
// ------------------------------------------------------------------
@@ -30,6 +30,7 @@ using System.IO;
using SharpCompress.Common;
using SharpCompress.Common.Tar.Headers;
using SharpCompress.Converters;
using System.Text;
namespace SharpCompress.Compressors.Deflate
{
@@ -64,6 +65,8 @@ namespace SharpCompress.Compressors.Deflate
protected internal DateTime _GzipMtime;
protected internal int _gzipHeaderByteCount;
private readonly Encoding _encoding;
internal int Crc32
{
get
@@ -80,7 +83,8 @@ namespace SharpCompress.Compressors.Deflate
CompressionMode compressionMode,
CompressionLevel level,
ZlibStreamFlavor flavor,
bool leaveOpen)
bool leaveOpen,
Encoding encoding)
{
_flushMode = FlushType.None;
@@ -91,6 +95,8 @@ namespace SharpCompress.Compressors.Deflate
_flavor = flavor;
_level = level;
_encoding = encoding;
// workitem 7159
if (flavor == ZlibStreamFlavor.GZIP)
{
@@ -418,8 +424,8 @@ namespace SharpCompress.Compressors.Deflate
}
}
while (!done);
byte[] a = list.ToArray();
return ArchiveEncoding.Default.GetString(a, 0, a.Length);
byte[] buffer = list.ToArray();
return _encoding.GetString(buffer, 0, buffer.Length);
}
private int _ReadAndValidateGzipHeader()
@@ -528,19 +534,19 @@ namespace SharpCompress.Compressors.Deflate
}
if (buffer == null)
{
throw new ArgumentNullException("buffer");
throw new ArgumentNullException(nameof(buffer));
}
if (count < 0)
{
throw new ArgumentOutOfRangeException("count");
throw new ArgumentOutOfRangeException(nameof(count));
}
if (offset < buffer.GetLowerBound(0))
{
throw new ArgumentOutOfRangeException("offset");
throw new ArgumentOutOfRangeException(nameof(offset));
}
if ((offset + count) > buffer.GetLength(0))
{
throw new ArgumentOutOfRangeException("count");
throw new ArgumentOutOfRangeException(nameof(count));
}
int rc = 0;
@@ -593,7 +599,7 @@ namespace SharpCompress.Compressors.Deflate
while (_z.AvailableBytesOut > 0 && !nomoreinput && rc == ZlibConstants.Z_OK);
// workitem 8557
// is there more room in output?
// is there more room in output?
if (_z.AvailableBytesOut > 0)
{
if (rc == ZlibConstants.Z_OK && _z.AvailableBytesIn == 0)

View File

@@ -27,6 +27,7 @@
using System;
using System.IO;
using System.Text;
namespace SharpCompress.Compressors.Deflate
{
@@ -36,23 +37,23 @@ namespace SharpCompress.Compressors.Deflate
private bool _disposed;
public ZlibStream(Stream stream, CompressionMode mode)
: this(stream, mode, CompressionLevel.Default, false)
: this(stream, mode, CompressionLevel.Default, false, Encoding.UTF8)
{
}
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level)
: this(stream, mode, level, false)
: this(stream, mode, level, false, Encoding.UTF8)
{
}
public ZlibStream(Stream stream, CompressionMode mode, bool leaveOpen)
: this(stream, mode, CompressionLevel.Default, leaveOpen)
: this(stream, mode, CompressionLevel.Default, leaveOpen, Encoding.UTF8)
{
}
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen, Encoding encoding)
{
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen);
_baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen, encoding);
}
#region Zlib properties
@@ -326,6 +327,6 @@ namespace SharpCompress.Compressors.Deflate
_baseStream.Write(buffer, offset, count);
}
#endregion
#endregion System.IO.Stream methods
}
}

View File

@@ -0,0 +1,13 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
namespace SharpCompress.Compressors.Deflate64
{
internal enum BlockType
{
Uncompressed = 0,
Static = 1,
Dynamic = 2
}
}

View File

@@ -0,0 +1,257 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using SharpCompress.Common.Zip;
using SharpCompress.Compressors.Deflate;
using System;
using System.Diagnostics;
using System.IO;
using System.Runtime.CompilerServices;
namespace SharpCompress.Compressors.Deflate64
{
public sealed partial class Deflate64Stream : Stream
{
internal const int DefaultBufferSize = 8192;
private Stream _stream;
private CompressionMode _mode;
private bool _leaveOpen;
private InflaterManaged _inflater;
private byte[] _buffer;
public Deflate64Stream(Stream stream, CompressionMode mode,
CompressionLevel level = CompressionLevel.Default,
bool leaveOpen = false)
{
if (stream == null)
throw new ArgumentNullException(nameof(stream));
if (mode != CompressionMode.Decompress)
throw new NotImplementedException("Deflate64: this implementation only supports decompression");
if (!stream.CanRead)
throw new ArgumentException("Deflate64: input stream is not readable", nameof(stream));
InitializeInflater(stream, leaveOpen, ZipCompressionMethod.Deflate64);
}
/// <summary>
/// Sets up this DeflateManagedStream to be used for Inflation/Decompression
/// </summary>
internal void InitializeInflater(Stream stream, bool leaveOpen, ZipCompressionMethod method = ZipCompressionMethod.Deflate)
{
Debug.Assert(stream != null);
Debug.Assert(method == ZipCompressionMethod.Deflate || method == ZipCompressionMethod.Deflate64);
if (!stream.CanRead)
throw new ArgumentException("Deflate64: input stream is not readable", nameof(stream));
_inflater = new InflaterManaged(method == ZipCompressionMethod.Deflate64);
_stream = stream;
_mode = CompressionMode.Decompress;
_leaveOpen = leaveOpen;
_buffer = new byte[DefaultBufferSize];
}
public override bool CanRead
{
get
{
if (_stream == null)
{
return false;
}
return (_mode == CompressionMode.Decompress && _stream.CanRead);
}
}
public override bool CanWrite
{
get
{
if (_stream == null)
{
return false;
}
return (_mode == CompressionMode.Compress && _stream.CanWrite);
}
}
public override bool CanSeek => false;
public override long Length
{
get { throw new NotSupportedException("Deflate64: not supported"); }
}
public override long Position
{
get { throw new NotSupportedException("Deflate64: not supported"); }
set { throw new NotSupportedException("Deflate64: not supported"); }
}
public override void Flush()
{
EnsureNotDisposed();
}
public override long Seek(long offset, SeekOrigin origin)
{
throw new NotSupportedException("Deflate64: not supported");
}
public override void SetLength(long value)
{
throw new NotSupportedException("Deflate64: not supported");
}
public override int Read(byte[] array, int offset, int count)
{
EnsureDecompressionMode();
ValidateParameters(array, offset, count);
EnsureNotDisposed();
int bytesRead;
int currentOffset = offset;
int remainingCount = count;
while (true)
{
bytesRead = _inflater.Inflate(array, currentOffset, remainingCount);
currentOffset += bytesRead;
remainingCount -= bytesRead;
if (remainingCount == 0)
{
break;
}
if (_inflater.Finished())
{
// if we finished decompressing, we can't have anything left in the outputwindow.
Debug.Assert(_inflater.AvailableOutput == 0, "We should have copied all stuff out!");
break;
}
int bytes = _stream.Read(_buffer, 0, _buffer.Length);
if (bytes <= 0)
{
break;
}
else if (bytes > _buffer.Length)
{
// The stream is either malicious or poorly implemented and returned a number of
// bytes larger than the buffer supplied to it.
throw new InvalidDataException("Deflate64: invalid data");
}
_inflater.SetInput(_buffer, 0, bytes);
}
return count - remainingCount;
}
private void ValidateParameters(byte[] array, int offset, int count)
{
if (array == null)
throw new ArgumentNullException(nameof(array));
if (offset < 0)
throw new ArgumentOutOfRangeException(nameof(offset));
if (count < 0)
throw new ArgumentOutOfRangeException(nameof(count));
if (array.Length - offset < count)
throw new ArgumentException("Deflate64: invalid offset/count combination");
}
private void EnsureNotDisposed()
{
if (_stream == null)
ThrowStreamClosedException();
}
[MethodImpl(MethodImplOptions.NoInlining)]
private static void ThrowStreamClosedException()
{
throw new ObjectDisposedException(null, "Deflate64: stream has been disposed");
}
private void EnsureDecompressionMode()
{
if (_mode != CompressionMode.Decompress)
ThrowCannotReadFromDeflateManagedStreamException();
}
[MethodImpl(MethodImplOptions.NoInlining)]
private static void ThrowCannotReadFromDeflateManagedStreamException()
{
throw new InvalidOperationException("Deflate64: cannot read from this stream");
}
private void EnsureCompressionMode()
{
if (_mode != CompressionMode.Compress)
ThrowCannotWriteToDeflateManagedStreamException();
}
[MethodImpl(MethodImplOptions.NoInlining)]
private static void ThrowCannotWriteToDeflateManagedStreamException()
{
throw new InvalidOperationException("Deflate64: cannot write to this stream");
}
public override void Write(byte[] array, int offset, int count)
{
ThrowCannotWriteToDeflateManagedStreamException();
}
// This is called by Dispose:
private void PurgeBuffers(bool disposing)
{
if (!disposing)
return;
if (_stream == null)
return;
Flush();
}
protected override void Dispose(bool disposing)
{
try
{
PurgeBuffers(disposing);
}
finally
{
// Close the underlying stream even if PurgeBuffers threw.
// Stream.Close() may throw here (may or may not be due to the same error).
// In this case, we still need to clean up internal resources, hence the inner finally blocks.
try
{
if (disposing && !_leaveOpen && _stream != null)
_stream.Dispose();
}
finally
{
_stream = null;
try
{
_inflater?.Dispose();
}
finally
{
_inflater = null;
base.Dispose(disposing);
}
}
}
}
}
}

View File

@@ -0,0 +1,43 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System.Diagnostics;
namespace SharpCompress.Compressors.Deflate64
{
internal sealed class DeflateInput
{
internal byte[] Buffer { get; set; }
internal int Count { get; set; }
internal int StartIndex { get; set; }
internal void ConsumeBytes(int n)
{
Debug.Assert(n <= Count, "Should use more bytes than what we have in the buffer");
StartIndex += n;
Count -= n;
Debug.Assert(StartIndex + Count <= Buffer.Length, "Input buffer is in invalid state!");
}
internal InputState DumpState() => new InputState(Count, StartIndex);
internal void RestoreState(InputState state)
{
Count = state._count;
StartIndex = state._startIndex;
}
internal /*readonly */struct InputState
{
internal readonly int _count;
internal readonly int _startIndex;
internal InputState(int count, int startIndex)
{
_count = count;
_startIndex = startIndex;
}
}
}
}

View File

@@ -0,0 +1,245 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System.Diagnostics;
namespace SharpCompress.Compressors.Deflate64
{
internal static class FastEncoderStatics
{
// static information for encoding, DO NOT MODIFY
internal static readonly byte[] FastEncoderTreeStructureData =
{
0xec,0xbd,0x07,0x60,0x1c,0x49,0x96,0x25,0x26,0x2f,0x6d,0xca,
0x7b,0x7f,0x4a,0xf5,0x4a,0xd7,0xe0,0x74,0xa1,0x08,0x80,0x60,
0x13,0x24,0xd8,0x90,0x40,0x10,0xec,0xc1,0x88,0xcd,0xe6,0x92,
0xec,0x1d,0x69,0x47,0x23,0x29,0xab,0x2a,0x81,0xca,0x65,0x56,
0x65,0x5d,0x66,0x16,0x40,0xcc,0xed,0x9d,0xbc,0xf7,0xde,0x7b,
0xef,0xbd,0xf7,0xde,0x7b,0xef,0xbd,0xf7,0xba,0x3b,0x9d,0x4e,
0x27,0xf7,0xdf,0xff,0x3f,0x5c,0x66,0x64,0x01,0x6c,0xf6,0xce,
0x4a,0xda,0xc9,0x9e,0x21,0x80,0xaa,0xc8,0x1f,0x3f,0x7e,0x7c,
0x1f,0x3f
};
internal static readonly byte[] BFinalFastEncoderTreeStructureData =
{
0xed,0xbd,0x07,0x60,0x1c,0x49,0x96,0x25,0x26,0x2f,0x6d,0xca,
0x7b,0x7f,0x4a,0xf5,0x4a,0xd7,0xe0,0x74,0xa1,0x08,0x80,0x60,
0x13,0x24,0xd8,0x90,0x40,0x10,0xec,0xc1,0x88,0xcd,0xe6,0x92,
0xec,0x1d,0x69,0x47,0x23,0x29,0xab,0x2a,0x81,0xca,0x65,0x56,
0x65,0x5d,0x66,0x16,0x40,0xcc,0xed,0x9d,0xbc,0xf7,0xde,0x7b,
0xef,0xbd,0xf7,0xde,0x7b,0xef,0xbd,0xf7,0xba,0x3b,0x9d,0x4e,
0x27,0xf7,0xdf,0xff,0x3f,0x5c,0x66,0x64,0x01,0x6c,0xf6,0xce,
0x4a,0xda,0xc9,0x9e,0x21,0x80,0xaa,0xc8,0x1f,0x3f,0x7e,0x7c,
0x1f,0x3f
};
// Output a currentMatch with length matchLen (>= MIN_MATCH) and displacement matchPos
//
// Optimisation: unlike the other encoders, here we have an array of codes for each currentMatch
// length (not just each currentMatch length slot), complete with all the extra bits filled in, in
// a single array element.
//
// There are many advantages to doing this:
//
// 1. A single array lookup on g_FastEncoderLiteralCodeInfo, instead of separate array lookups
// on g_LengthLookup (to get the length slot), g_FastEncoderLiteralTreeLength,
// g_FastEncoderLiteralTreeCode, g_ExtraLengthBits, and g_BitMask
//
// 2. The array is an array of ULONGs, so no access penalty, unlike for accessing those USHORT
// code arrays in the other encoders (although they could be made into ULONGs with some
// modifications to the source).
//
// Note, if we could guarantee that codeLen <= 16 always, then we could skip an if statement here.
//
// A completely different optimisation is used for the distance codes since, obviously, a table for
// all 8192 distances combining their extra bits is not feasible. The distance codeinfo table is
// made up of code[], len[] and # extraBits for this code.
//
// The advantages are similar to the above; a ULONG array instead of a USHORT and BYTE array, better
// cache locality, fewer memory operations.
//
// Encoding information for literal and Length.
// The least 5 significant bits are the length
// and the rest is the code bits.
internal static readonly uint[] FastEncoderLiteralCodeInfo =
{
0x0000d7ee,0x0004d7ee,0x0002d7ee,0x0006d7ee,0x0001d7ee,0x0005d7ee,0x0003d7ee,
0x0007d7ee,0x000037ee,0x0000c7ec,0x00000126,0x000437ee,0x000237ee,0x000637ee,
0x000137ee,0x000537ee,0x000337ee,0x000737ee,0x0000b7ee,0x0004b7ee,0x0002b7ee,
0x0006b7ee,0x0001b7ee,0x0005b7ee,0x0003b7ee,0x0007b7ee,0x000077ee,0x000477ee,
0x000277ee,0x000677ee,0x000017ed,0x000177ee,0x00000526,0x000577ee,0x000023ea,
0x0001c7ec,0x000377ee,0x000777ee,0x000217ed,0x000063ea,0x00000b68,0x00000ee9,
0x00005beb,0x000013ea,0x00000467,0x00001b68,0x00000c67,0x00002ee9,0x00000768,
0x00001768,0x00000f68,0x00001ee9,0x00001f68,0x00003ee9,0x000053ea,0x000001e9,
0x000000e8,0x000021e9,0x000011e9,0x000010e8,0x000031e9,0x000033ea,0x000008e8,
0x0000f7ee,0x0004f7ee,0x000018e8,0x000009e9,0x000004e8,0x000029e9,0x000014e8,
0x000019e9,0x000073ea,0x0000dbeb,0x00000ce8,0x00003beb,0x0002f7ee,0x000039e9,
0x00000bea,0x000005e9,0x00004bea,0x000025e9,0x000027ec,0x000015e9,0x000035e9,
0x00000de9,0x00002bea,0x000127ec,0x0000bbeb,0x0006f7ee,0x0001f7ee,0x0000a7ec,
0x00007beb,0x0005f7ee,0x0000fbeb,0x0003f7ee,0x0007f7ee,0x00000fee,0x00000326,
0x00000267,0x00000a67,0x00000667,0x00000726,0x00001ce8,0x000002e8,0x00000e67,
0x000000a6,0x0001a7ec,0x00002de9,0x000004a6,0x00000167,0x00000967,0x000002a6,
0x00000567,0x000117ed,0x000006a6,0x000001a6,0x000005a6,0x00000d67,0x000012e8,
0x00000ae8,0x00001de9,0x00001ae8,0x000007eb,0x000317ed,0x000067ec,0x000097ed,
0x000297ed,0x00040fee,0x00020fee,0x00060fee,0x00010fee,0x00050fee,0x00030fee,
0x00070fee,0x00008fee,0x00048fee,0x00028fee,0x00068fee,0x00018fee,0x00058fee,
0x00038fee,0x00078fee,0x00004fee,0x00044fee,0x00024fee,0x00064fee,0x00014fee,
0x00054fee,0x00034fee,0x00074fee,0x0000cfee,0x0004cfee,0x0002cfee,0x0006cfee,
0x0001cfee,0x0005cfee,0x0003cfee,0x0007cfee,0x00002fee,0x00042fee,0x00022fee,
0x00062fee,0x00012fee,0x00052fee,0x00032fee,0x00072fee,0x0000afee,0x0004afee,
0x0002afee,0x0006afee,0x0001afee,0x0005afee,0x0003afee,0x0007afee,0x00006fee,
0x00046fee,0x00026fee,0x00066fee,0x00016fee,0x00056fee,0x00036fee,0x00076fee,
0x0000efee,0x0004efee,0x0002efee,0x0006efee,0x0001efee,0x0005efee,0x0003efee,
0x0007efee,0x00001fee,0x00041fee,0x00021fee,0x00061fee,0x00011fee,0x00051fee,
0x00031fee,0x00071fee,0x00009fee,0x00049fee,0x00029fee,0x00069fee,0x00019fee,
0x00059fee,0x00039fee,0x00079fee,0x00005fee,0x00045fee,0x00025fee,0x00065fee,
0x00015fee,0x00055fee,0x00035fee,0x00075fee,0x0000dfee,0x0004dfee,0x0002dfee,
0x0006dfee,0x0001dfee,0x0005dfee,0x0003dfee,0x0007dfee,0x00003fee,0x00043fee,
0x00023fee,0x00063fee,0x00013fee,0x00053fee,0x00033fee,0x00073fee,0x0000bfee,
0x0004bfee,0x0002bfee,0x0006bfee,0x0001bfee,0x0005bfee,0x0003bfee,0x0007bfee,
0x00007fee,0x00047fee,0x00027fee,0x00067fee,0x00017fee,0x000197ed,0x000397ed,
0x000057ed,0x00057fee,0x000257ed,0x00037fee,0x000157ed,0x00077fee,0x000357ed,
0x0000ffee,0x0004ffee,0x0002ffee,0x0006ffee,0x0001ffee,0x00000084,0x00000003,
0x00000184,0x00000044,0x00000144,0x000000c5,0x000002c5,0x000001c5,0x000003c6,
0x000007c6,0x00000026,0x00000426,0x000003a7,0x00000ba7,0x000007a7,0x00000fa7,
0x00000227,0x00000627,0x00000a27,0x00000e27,0x00000068,0x00000868,0x00001068,
0x00001868,0x00000369,0x00001369,0x00002369,0x00003369,0x000006ea,0x000026ea,
0x000046ea,0x000066ea,0x000016eb,0x000036eb,0x000056eb,0x000076eb,0x000096eb,
0x0000b6eb,0x0000d6eb,0x0000f6eb,0x00003dec,0x00007dec,0x0000bdec,0x0000fdec,
0x00013dec,0x00017dec,0x0001bdec,0x0001fdec,0x00006bed,0x0000ebed,0x00016bed,
0x0001ebed,0x00026bed,0x0002ebed,0x00036bed,0x0003ebed,0x000003ec,0x000043ec,
0x000083ec,0x0000c3ec,0x000103ec,0x000143ec,0x000183ec,0x0001c3ec,0x00001bee,
0x00009bee,0x00011bee,0x00019bee,0x00021bee,0x00029bee,0x00031bee,0x00039bee,
0x00041bee,0x00049bee,0x00051bee,0x00059bee,0x00061bee,0x00069bee,0x00071bee,
0x00079bee,0x000167f0,0x000367f0,0x000567f0,0x000767f0,0x000967f0,0x000b67f0,
0x000d67f0,0x000f67f0,0x001167f0,0x001367f0,0x001567f0,0x001767f0,0x001967f0,
0x001b67f0,0x001d67f0,0x001f67f0,0x000087ef,0x000187ef,0x000287ef,0x000387ef,
0x000487ef,0x000587ef,0x000687ef,0x000787ef,0x000887ef,0x000987ef,0x000a87ef,
0x000b87ef,0x000c87ef,0x000d87ef,0x000e87ef,0x000f87ef,0x0000e7f0,0x0002e7f0,
0x0004e7f0,0x0006e7f0,0x0008e7f0,0x000ae7f0,0x000ce7f0,0x000ee7f0,0x0010e7f0,
0x0012e7f0,0x0014e7f0,0x0016e7f0,0x0018e7f0,0x001ae7f0,0x001ce7f0,0x001ee7f0,
0x0005fff3,0x000dfff3,0x0015fff3,0x001dfff3,0x0025fff3,0x002dfff3,0x0035fff3,
0x003dfff3,0x0045fff3,0x004dfff3,0x0055fff3,0x005dfff3,0x0065fff3,0x006dfff3,
0x0075fff3,0x007dfff3,0x0085fff3,0x008dfff3,0x0095fff3,0x009dfff3,0x00a5fff3,
0x00adfff3,0x00b5fff3,0x00bdfff3,0x00c5fff3,0x00cdfff3,0x00d5fff3,0x00ddfff3,
0x00e5fff3,0x00edfff3,0x00f5fff3,0x00fdfff3,0x0003fff3,0x000bfff3,0x0013fff3,
0x001bfff3,0x0023fff3,0x002bfff3,0x0033fff3,0x003bfff3,0x0043fff3,0x004bfff3,
0x0053fff3,0x005bfff3,0x0063fff3,0x006bfff3,0x0073fff3,0x007bfff3,0x0083fff3,
0x008bfff3,0x0093fff3,0x009bfff3,0x00a3fff3,0x00abfff3,0x00b3fff3,0x00bbfff3,
0x00c3fff3,0x00cbfff3,0x00d3fff3,0x00dbfff3,0x00e3fff3,0x00ebfff3,0x00f3fff3,
0x00fbfff3,0x0007fff3,0x000ffff3,0x0017fff3,0x001ffff3,0x0027fff3,0x002ffff3,
0x0037fff3,0x003ffff3,0x0047fff3,0x004ffff3,0x0057fff3,0x005ffff3,0x0067fff3,
0x006ffff3,0x0077fff3,0x007ffff3,0x0087fff3,0x008ffff3,0x0097fff3,0x009ffff3,
0x00a7fff3,0x00affff3,0x00b7fff3,0x00bffff3,0x00c7fff3,0x00cffff3,0x00d7fff3,
0x00dffff3,0x00e7fff3,0x00effff3,0x00f7fff3,0x00fffff3,0x0001e7f1,0x0003e7f1,
0x0005e7f1,0x0007e7f1,0x0009e7f1,0x000be7f1,0x000de7f1,0x000fe7f1,0x0011e7f1,
0x0013e7f1,0x0015e7f1,0x0017e7f1,0x0019e7f1,0x001be7f1,0x001de7f1,0x001fe7f1,
0x0021e7f1,0x0023e7f1,0x0025e7f1,0x0027e7f1,0x0029e7f1,0x002be7f1,0x002de7f1,
0x002fe7f1,0x0031e7f1,0x0033e7f1,0x0035e7f1,0x0037e7f1,0x0039e7f1,0x003be7f1,
0x003de7f1,0x000047eb
};
internal static readonly uint[] FastEncoderDistanceCodeInfo =
{
0x00000f06,0x0001ff0a,0x0003ff0b,0x0007ff0b,0x0000ff19,0x00003f18,0x0000bf28,
0x00007f28,0x00001f37,0x00005f37,0x00000d45,0x00002f46,0x00000054,0x00001d55,
0x00000864,0x00000365,0x00000474,0x00001375,0x00000c84,0x00000284,0x00000a94,
0x00000694,0x00000ea4,0x000001a4,0x000009b4,0x00000bb5,0x000005c4,0x00001bc5,
0x000007d5,0x000017d5,0x00000000,0x00000100
};
internal static readonly uint[] BitMask = { 0, 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767 };
internal static readonly byte[] ExtraLengthBits = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 };
internal static readonly byte[] ExtraDistanceBits = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 0, 0 };
internal const int NumChars = 256;
internal const int NumLengthBaseCodes = 29;
internal const int NumDistBaseCodes = 30;
internal const uint FastEncoderPostTreeBitBuf = 0x0022;
internal const int FastEncoderPostTreeBitCount = 9;
internal const uint NoCompressionHeader = 0x0;
internal const int NoCompressionHeaderBitCount = 3;
internal const uint BFinalNoCompressionHeader = 0x1;
internal const int BFinalNoCompressionHeaderBitCount = 3;
internal const int MaxCodeLen = 16;
private static readonly byte[] s_distLookup = CreateDistanceLookup();
private static byte[] CreateDistanceLookup()
{
byte[] result = new byte[512];
// Generate the global slot tables which allow us to convert a distance
// (0..32K) to a distance slot (0..29)
//
// Distance table
// Extra Extra Extra
// Code Bits Dist Code Bits Dist Code Bits Distance
// ---- ---- ---- ---- ---- ------ ---- ---- --------
// 0 0 1 10 4 33-48 20 9 1025-1536
// 1 0 2 11 4 49-64 21 9 1537-2048
// 2 0 3 12 5 65-96 22 10 2049-3072
// 3 0 4 13 5 97-128 23 10 3073-4096
// 4 1 5,6 14 6 129-192 24 11 4097-6144
// 5 1 7,8 15 6 193-256 25 11 6145-8192
// 6 2 9-12 16 7 257-384 26 12 8193-12288
// 7 2 13-16 17 7 385-512 27 12 12289-16384
// 8 3 17-24 18 8 513-768 28 13 16385-24576
// 9 3 25-32 19 8 769-1024 29 13 24577-32768
// Initialize the mapping length (0..255) -> length code (0..28)
//int length = 0;
//for (code = 0; code < FastEncoderStatics.NumLengthBaseCodes-1; code++) {
// for (int n = 0; n < (1 << FastEncoderStatics.ExtraLengthBits[code]); n++)
// lengthLookup[length++] = (byte) code;
//}
//lengthLookup[length-1] = (byte) code;
// Initialize the mapping dist (0..32K) -> dist code (0..29)
int dist = 0;
int code;
for (code = 0; code < 16; code++)
{
for (int n = 0; n < (1 << ExtraDistanceBits[code]); n++)
result[dist++] = (byte)code;
}
dist >>= 7; // from now on, all distances are divided by 128
for (; code < NumDistBaseCodes; code++)
{
for (int n = 0; n < (1 << (ExtraDistanceBits[code] - 7)); n++)
result[256 + dist++] = (byte)code;
}
return result;
}
// Return the position slot (0...29) of a match offset (0...32767)
internal static int GetSlot(int pos) =>
s_distLookup[((pos) < 256) ? (pos) : (256 + ((pos) >> 7))];
// Reverse 'length' of the bits in code
public static uint BitReverse(uint code, int length)
{
uint new_code = 0;
Debug.Assert(length > 0 && length <= 16, "Invalid len");
do
{
new_code |= (code & 1);
new_code <<= 1;
code >>= 1;
} while (--length > 0);
return new_code >> 1;
}
}
}

View File

@@ -0,0 +1,311 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System.Diagnostics;
using System.IO;
namespace SharpCompress.Compressors.Deflate64
{
// Strictly speaking this class is not a HuffmanTree, this class is
// a lookup table combined with a HuffmanTree. The idea is to speed up
// the lookup for short symbols (they should appear more frequently ideally.)
// However we don't want to create a huge table since it might take longer to
// build the table than decoding (Deflate usually generates new tables frequently.)
//
// Jean-loup Gailly and Mark Adler gave a very good explanation about this.
// The full text (algorithm.txt) can be found inside
// ftp://ftp.uu.net/pub/archiving/zip/zlib/zlib.zip.
//
// Following paper explains decoding in details:
// Hirschberg and Lelewer, "Efficient decoding of prefix codes,"
// Comm. ACM, 33,4, April 1990, pp. 449-459.
//
internal sealed class HuffmanTree
{
internal const int MaxLiteralTreeElements = 288;
internal const int MaxDistTreeElements = 32;
internal const int EndOfBlockCode = 256;
internal const int NumberOfCodeLengthTreeElements = 19;
private readonly int _tableBits;
private readonly short[] _table;
private readonly short[] _left;
private readonly short[] _right;
private readonly byte[] _codeLengthArray;
#if DEBUG
private uint[] _codeArrayDebug;
#endif
private readonly int _tableMask;
// huffman tree for static block
public static HuffmanTree StaticLiteralLengthTree { get; } = new HuffmanTree(GetStaticLiteralTreeLength());
public static HuffmanTree StaticDistanceTree { get; } = new HuffmanTree(GetStaticDistanceTreeLength());
public HuffmanTree(byte[] codeLengths)
{
Debug.Assert(
codeLengths.Length == MaxLiteralTreeElements ||
codeLengths.Length == MaxDistTreeElements ||
codeLengths.Length == NumberOfCodeLengthTreeElements,
"we only expect three kinds of Length here");
_codeLengthArray = codeLengths;
if (_codeLengthArray.Length == MaxLiteralTreeElements)
{
// bits for Literal/Length tree table
_tableBits = 9;
}
else
{
// bits for distance tree table and code length tree table
_tableBits = 7;
}
_tableMask = (1 << _tableBits) - 1;
_table = new short[1 << _tableBits];
// I need to find proof that left and right array will always be
// enough. I think they are.
_left = new short[2 * _codeLengthArray.Length];
_right = new short[2 * _codeLengthArray.Length];
CreateTable();
}
// Generate the array contains huffman codes lengths for static huffman tree.
// The data is in RFC 1951.
private static byte[] GetStaticLiteralTreeLength()
{
byte[] literalTreeLength = new byte[MaxLiteralTreeElements];
for (int i = 0; i <= 143; i++)
literalTreeLength[i] = 8;
for (int i = 144; i <= 255; i++)
literalTreeLength[i] = 9;
for (int i = 256; i <= 279; i++)
literalTreeLength[i] = 7;
for (int i = 280; i <= 287; i++)
literalTreeLength[i] = 8;
return literalTreeLength;
}
private static byte[] GetStaticDistanceTreeLength()
{
byte[] staticDistanceTreeLength = new byte[MaxDistTreeElements];
for (int i = 0; i < MaxDistTreeElements; i++)
{
staticDistanceTreeLength[i] = 5;
}
return staticDistanceTreeLength;
}
// Calculate the huffman code for each character based on the code length for each character.
// This algorithm is described in standard RFC 1951
private uint[] CalculateHuffmanCode()
{
uint[] bitLengthCount = new uint[17];
foreach (int codeLength in _codeLengthArray)
{
bitLengthCount[codeLength]++;
}
bitLengthCount[0] = 0; // clear count for length 0
uint[] nextCode = new uint[17];
uint tempCode = 0;
for (int bits = 1; bits <= 16; bits++)
{
tempCode = (tempCode + bitLengthCount[bits - 1]) << 1;
nextCode[bits] = tempCode;
}
uint[] code = new uint[MaxLiteralTreeElements];
for (int i = 0; i < _codeLengthArray.Length; i++)
{
int len = _codeLengthArray[i];
if (len > 0)
{
code[i] = FastEncoderStatics.BitReverse(nextCode[len], len);
nextCode[len]++;
}
}
return code;
}
private void CreateTable()
{
uint[] codeArray = CalculateHuffmanCode();
#if DEBUG
_codeArrayDebug = codeArray;
#endif
short avail = (short)_codeLengthArray.Length;
for (int ch = 0; ch < _codeLengthArray.Length; ch++)
{
// length of this code
int len = _codeLengthArray[ch];
if (len > 0)
{
// start value (bit reversed)
int start = (int)codeArray[ch];
if (len <= _tableBits)
{
// If a particular symbol is shorter than nine bits,
// then that symbol's translation is duplicated
// in all those entries that start with that symbol's bits.
// For example, if the symbol is four bits, then it's duplicated
// 32 times in a nine-bit table. If a symbol is nine bits long,
// it appears in the table once.
//
// Make sure that in the loop below, code is always
// less than table_size.
//
// On last iteration we store at array index:
// initial_start_at + (locs-1)*increment
// = initial_start_at + locs*increment - increment
// = initial_start_at + (1 << tableBits) - increment
// = initial_start_at + table_size - increment
//
// Therefore we must ensure:
// initial_start_at + table_size - increment < table_size
// or: initial_start_at < increment
//
int increment = 1 << len;
if (start >= increment)
{
throw new InvalidDataException("Deflate64: invalid Huffman data");
}
// Note the bits in the table are reverted.
int locs = 1 << (_tableBits - len);
for (int j = 0; j < locs; j++)
{
_table[start] = (short)ch;
start += increment;
}
}
else
{
// For any code which has length longer than num_elements,
// build a binary tree.
int overflowBits = len - _tableBits; // the nodes we need to respent the data.
int codeBitMask = 1 << _tableBits; // mask to get current bit (the bits can't fit in the table)
// the left, right table is used to repesent the
// the rest bits. When we got the first part (number bits.) and look at
// tbe table, we will need to follow the tree to find the real character.
// This is in place to avoid bloating the table if there are
// a few ones with long code.
int index = start & ((1 << _tableBits) - 1);
short[] array = _table;
do
{
short value = array[index];
if (value == 0)
{
// set up next pointer if this node is not used before.
array[index] = (short)-avail; // use next available slot.
value = (short)-avail;
avail++;
}
if (value > 0)
{
// prevent an IndexOutOfRangeException from array[index]
throw new InvalidDataException("Deflate64: invalid Huffman data");
}
Debug.Assert(value < 0, "CreateTable: Only negative numbers are used for tree pointers!");
if ((start & codeBitMask) == 0)
{
// if current bit is 0, go change the left array
array = _left;
}
else
{
// if current bit is 1, set value in the right array
array = _right;
}
index = -value; // go to next node
codeBitMask <<= 1;
overflowBits--;
} while (overflowBits != 0);
array[index] = (short)ch;
}
}
}
}
//
// This function will try to get enough bits from input and
// try to decode the bits.
// If there are no enought bits in the input, this function will return -1.
//
public int GetNextSymbol(InputBuffer input)
{
// Try to load 16 bits into input buffer if possible and get the bitBuffer value.
// If there aren't 16 bits available we will return all we have in the
// input buffer.
uint bitBuffer = input.TryLoad16Bits();
if (input.AvailableBits == 0)
{ // running out of input.
return -1;
}
// decode an element
int symbol = _table[bitBuffer & _tableMask];
if (symbol < 0)
{ // this will be the start of the binary tree
// navigate the tree
uint mask = (uint)1 << _tableBits;
do
{
symbol = -symbol;
if ((bitBuffer & mask) == 0)
symbol = _left[symbol];
else
symbol = _right[symbol];
mask <<= 1;
} while (symbol < 0);
}
int codeLength = _codeLengthArray[symbol];
// huffman code lengths must be at least 1 bit long
if (codeLength <= 0)
{
throw new InvalidDataException("Deflate64: invalid Huffman data");
}
//
// If this code is longer than the # bits we had in the bit buffer (i.e.
// we read only part of the code), we can hit the entry in the table or the tree
// for another symbol. However the length of another symbol will not match the
// available bits count.
if (codeLength > input.AvailableBits)
{
// We already tried to load 16 bits and maximum length is 15,
// so this means we are running out of input.
return -1;
}
input.SkipBits(codeLength);
return symbol;
}
}
}

View File

@@ -0,0 +1,738 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//
// zlib.h -- interface of the 'zlib' general purpose compression library
// version 1.2.1, November 17th, 2003
//
// Copyright (C) 1995-2003 Jean-loup Gailly and Mark Adler
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
//
//
using System;
using System.Diagnostics;
using System.IO;
namespace SharpCompress.Compressors.Deflate64
{
internal sealed class InflaterManaged
{
// const tables used in decoding:
// Extra bits for length code 257 - 285.
private static readonly byte[] s_extraLengthBits =
{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,16 };
// The base length for length code 257 - 285.
// The formula to get the real length for a length code is lengthBase[code - 257] + (value stored in extraBits)
private static readonly int[] s_lengthBase =
{ 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,3};
// The base distance for distance code 0 - 31
// The real distance for a distance code is distanceBasePosition[code] + (value stored in extraBits)
private static readonly int[] s_distanceBasePosition =
{ 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,32769,49153 };
// code lengths for code length alphabet is stored in following order
private static readonly byte[] s_codeOrder = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
private static readonly byte[] s_staticDistanceTreeTable =
{
0x00,0x10,0x08,0x18,0x04,0x14,0x0c,0x1c,0x02,0x12,0x0a,0x1a,
0x06,0x16,0x0e,0x1e,0x01,0x11,0x09,0x19,0x05,0x15,0x0d,0x1d,
0x03,0x13,0x0b,0x1b,0x07,0x17,0x0f,0x1f
};
private readonly OutputWindow _output;
private readonly InputBuffer _input;
private HuffmanTree _literalLengthTree;
private HuffmanTree _distanceTree;
private InflaterState _state;
//private bool _hasFormatReader;
private int _bfinal;
private BlockType _blockType;
// uncompressed block
private readonly byte[] _blockLengthBuffer = new byte[4];
private int _blockLength;
// compressed block
private int _length;
private int _distanceCode;
private int _extraBits;
private int _loopCounter;
private int _literalLengthCodeCount;
private int _distanceCodeCount;
private int _codeLengthCodeCount;
private int _codeArraySize;
private int _lengthCode;
private readonly byte[] _codeList; // temporary array to store the code length for literal/Length and distance
private readonly byte[] _codeLengthTreeCodeLength;
private readonly bool _deflate64;
private HuffmanTree _codeLengthTree;
//private IFileFormatReader _formatReader; // class to decode header and footer (e.g. gzip)
internal InflaterManaged(/*IFileFormatReader reader, */bool deflate64)
{
_output = new OutputWindow();
_input = new InputBuffer();
_codeList = new byte[HuffmanTree.MaxLiteralTreeElements + HuffmanTree.MaxDistTreeElements];
_codeLengthTreeCodeLength = new byte[HuffmanTree.NumberOfCodeLengthTreeElements];
_deflate64 = deflate64;
//if (reader != null)
//{
// _formatReader = reader;
// _hasFormatReader = true;
//}
Reset();
}
private void Reset()
{
_state = //_hasFormatReader ?
//InflaterState.ReadingHeader : // start by reading Header info
InflaterState.ReadingBFinal; // start by reading BFinal bit
}
public void SetInput(byte[] inputBytes, int offset, int length) =>
_input.SetInput(inputBytes, offset, length); // append the bytes
public bool Finished() => _state == InflaterState.Done || _state == InflaterState.VerifyingFooter;
public int AvailableOutput => _output.AvailableBytes;
public int Inflate(byte[] bytes, int offset, int length)
{
// copy bytes from output to outputbytes if we have available bytes
// if buffer is not filled up. keep decoding until no input are available
// if decodeBlock returns false. Throw an exception.
int count = 0;
do
{
int copied = _output.CopyTo(bytes, offset, length);
if (copied > 0)
{
//if (_hasFormatReader)
//{
// _formatReader.UpdateWithBytesRead(bytes, offset, copied);
//}
offset += copied;
count += copied;
length -= copied;
}
if (length == 0)
{ // filled in the bytes array
break;
}
// Decode will return false when more input is needed
} while (!Finished() && Decode());
if (_state == InflaterState.VerifyingFooter)
{ // finished reading CRC
// In this case finished is true and output window has all the data.
// But some data in output window might not be copied out.
if (_output.AvailableBytes == 0)
{
//_formatReader.Validate();
}
}
return count;
}
//Each block of compressed data begins with 3 header bits
// containing the following data:
// first bit BFINAL
// next 2 bits BTYPE
// Note that the header bits do not necessarily begin on a byte
// boundary, since a block does not necessarily occupy an integral
// number of bytes.
// BFINAL is set if and only if this is the last block of the data
// set.
// BTYPE specifies how the data are compressed, as follows:
// 00 - no compression
// 01 - compressed with fixed Huffman codes
// 10 - compressed with dynamic Huffman codes
// 11 - reserved (error)
// The only difference between the two compressed cases is how the
// Huffman codes for the literal/length and distance alphabets are
// defined.
//
// This function returns true for success (end of block or output window is full,)
// false if we are short of input
//
private bool Decode()
{
bool eob = false;
bool result = false;
if (Finished())
{
return true;
}
//if (_hasFormatReader)
//{
// if (_state == InflaterState.ReadingHeader)
// {
// if (!_formatReader.ReadHeader(_input))
// {
// return false;
// }
// _state = InflaterState.ReadingBFinal;
// }
// else if (_state == InflaterState.StartReadingFooter || _state == InflaterState.ReadingFooter)
// {
// if (!_formatReader.ReadFooter(_input))
// return false;
// _state = InflaterState.VerifyingFooter;
// return true;
// }
//}
if (_state == InflaterState.ReadingBFinal)
{
// reading bfinal bit
// Need 1 bit
if (!_input.EnsureBitsAvailable(1))
return false;
_bfinal = _input.GetBits(1);
_state = InflaterState.ReadingBType;
}
if (_state == InflaterState.ReadingBType)
{
// Need 2 bits
if (!_input.EnsureBitsAvailable(2))
{
_state = InflaterState.ReadingBType;
return false;
}
_blockType = (BlockType)_input.GetBits(2);
if (_blockType == BlockType.Dynamic)
{
_state = InflaterState.ReadingNumLitCodes;
}
else if (_blockType == BlockType.Static)
{
_literalLengthTree = HuffmanTree.StaticLiteralLengthTree;
_distanceTree = HuffmanTree.StaticDistanceTree;
_state = InflaterState.DecodeTop;
}
else if (_blockType == BlockType.Uncompressed)
{
_state = InflaterState.UncompressedAligning;
}
else
{
throw new InvalidDataException("Deflate64: unknown block type");
}
}
if (_blockType == BlockType.Dynamic)
{
if (_state < InflaterState.DecodeTop)
{
// we are reading the header
result = DecodeDynamicBlockHeader();
}
else
{
result = DecodeBlock(out eob); // this can returns true when output is full
}
}
else if (_blockType == BlockType.Static)
{
result = DecodeBlock(out eob);
}
else if (_blockType == BlockType.Uncompressed)
{
result = DecodeUncompressedBlock(out eob);
}
else
{
throw new InvalidDataException("Deflate64: unknown block type");
}
//
// If we reached the end of the block and the block we were decoding had
// bfinal=1 (final block)
//
if (eob && (_bfinal != 0))
{
//if (_hasFormatReader)
// _state = InflaterState.StartReadingFooter;
//else
_state = InflaterState.Done;
}
return result;
}
// Format of Non-compressed blocks (BTYPE=00):
//
// Any bits of input up to the next byte boundary are ignored.
// The rest of the block consists of the following information:
//
// 0 1 2 3 4...
// +---+---+---+---+================================+
// | LEN | NLEN |... LEN bytes of literal data...|
// +---+---+---+---+================================+
//
// LEN is the number of data bytes in the block. NLEN is the
// one's complement of LEN.
private bool DecodeUncompressedBlock(out bool end_of_block)
{
end_of_block = false;
while (true)
{
switch (_state)
{
case InflaterState.UncompressedAligning: // initial state when calling this function
// we must skip to a byte boundary
_input.SkipToByteBoundary();
_state = InflaterState.UncompressedByte1;
goto case InflaterState.UncompressedByte1;
case InflaterState.UncompressedByte1: // decoding block length
case InflaterState.UncompressedByte2:
case InflaterState.UncompressedByte3:
case InflaterState.UncompressedByte4:
int bits = _input.GetBits(8);
if (bits < 0)
{
return false;
}
_blockLengthBuffer[_state - InflaterState.UncompressedByte1] = (byte)bits;
if (_state == InflaterState.UncompressedByte4)
{
_blockLength = _blockLengthBuffer[0] + ((int)_blockLengthBuffer[1]) * 256;
int blockLengthComplement = _blockLengthBuffer[2] + ((int)_blockLengthBuffer[3]) * 256;
// make sure complement matches
if ((ushort)_blockLength != (ushort)(~blockLengthComplement))
{
throw new InvalidDataException("Deflate64: invalid block length");
}
}
_state += 1;
break;
case InflaterState.DecodingUncompressed: // copying block data
// Directly copy bytes from input to output.
int bytesCopied = _output.CopyFrom(_input, _blockLength);
_blockLength -= bytesCopied;
if (_blockLength == 0)
{
// Done with this block, need to re-init bit buffer for next block
_state = InflaterState.ReadingBFinal;
end_of_block = true;
return true;
}
// We can fail to copy all bytes for two reasons:
// Running out of Input
// running out of free space in output window
if (_output.FreeBytes == 0)
{
return true;
}
return false;
default:
Debug./*Fail*/Assert(false, "check why we are here!");
throw new InvalidDataException("Deflate64: unknown state");
}
}
}
private bool DecodeBlock(out bool end_of_block_code_seen)
{
end_of_block_code_seen = false;
int freeBytes = _output.FreeBytes; // it is a little bit faster than frequently accessing the property
while (freeBytes > 65536)
{
// With Deflate64 we can have up to a 64kb length, so we ensure at least that much space is available
// in the OutputWindow to avoid overwriting previous unflushed output data.
int symbol;
switch (_state)
{
case InflaterState.DecodeTop:
// decode an element from the literal tree
// TODO: optimize this!!!
symbol = _literalLengthTree.GetNextSymbol(_input);
if (symbol < 0)
{
// running out of input
return false;
}
if (symbol < 256)
{
// literal
_output.Write((byte)symbol);
--freeBytes;
}
else if (symbol == 256)
{
// end of block
end_of_block_code_seen = true;
// Reset state
_state = InflaterState.ReadingBFinal;
return true;
}
else
{
// length/distance pair
symbol -= 257; // length code started at 257
if (symbol < 8)
{
symbol += 3; // match length = 3,4,5,6,7,8,9,10
_extraBits = 0;
}
else if (!_deflate64 && symbol == 28)
{
// extra bits for code 285 is 0
symbol = 258; // code 285 means length 258
_extraBits = 0;
}
else
{
if (symbol < 0 || symbol >= s_extraLengthBits.Length)
{
throw new InvalidDataException("Deflate64: invalid data");
}
_extraBits = s_extraLengthBits[symbol];
Debug.Assert(_extraBits != 0, "We handle other cases separately!");
}
_length = symbol;
goto case InflaterState.HaveInitialLength;
}
break;
case InflaterState.HaveInitialLength:
if (_extraBits > 0)
{
_state = InflaterState.HaveInitialLength;
int bits = _input.GetBits(_extraBits);
if (bits < 0)
{
return false;
}
if (_length < 0 || _length >= s_lengthBase.Length)
{
throw new InvalidDataException("Deflate64: invalid data");
}
_length = s_lengthBase[_length] + bits;
}
_state = InflaterState.HaveFullLength;
goto case InflaterState.HaveFullLength;
case InflaterState.HaveFullLength:
if (_blockType == BlockType.Dynamic)
{
_distanceCode = _distanceTree.GetNextSymbol(_input);
}
else
{
// get distance code directly for static block
_distanceCode = _input.GetBits(5);
if (_distanceCode >= 0)
{
_distanceCode = s_staticDistanceTreeTable[_distanceCode];
}
}
if (_distanceCode < 0)
{
// running out input
return false;
}
_state = InflaterState.HaveDistCode;
goto case InflaterState.HaveDistCode;
case InflaterState.HaveDistCode:
// To avoid a table lookup we note that for distanceCode > 3,
// extra_bits = (distanceCode-2) >> 1
int offset;
if (_distanceCode > 3)
{
_extraBits = (_distanceCode - 2) >> 1;
int bits = _input.GetBits(_extraBits);
if (bits < 0)
{
return false;
}
offset = s_distanceBasePosition[_distanceCode] + bits;
}
else
{
offset = _distanceCode + 1;
}
_output.WriteLengthDistance(_length, offset);
freeBytes -= _length;
_state = InflaterState.DecodeTop;
break;
default:
Debug./*Fail*/Assert(false, "check why we are here!");
throw new InvalidDataException("Deflate64: unknown state");
}
}
return true;
}
// Format of the dynamic block header:
// 5 Bits: HLIT, # of Literal/Length codes - 257 (257 - 286)
// 5 Bits: HDIST, # of Distance codes - 1 (1 - 32)
// 4 Bits: HCLEN, # of Code Length codes - 4 (4 - 19)
//
// (HCLEN + 4) x 3 bits: code lengths for the code length
// alphabet given just above, in the order: 16, 17, 18,
// 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
//
// These code lengths are interpreted as 3-bit integers
// (0-7); as above, a code length of 0 means the
// corresponding symbol (literal/length or distance code
// length) is not used.
//
// HLIT + 257 code lengths for the literal/length alphabet,
// encoded using the code length Huffman code
//
// HDIST + 1 code lengths for the distance alphabet,
// encoded using the code length Huffman code
//
// The code length repeat codes can cross from HLIT + 257 to the
// HDIST + 1 code lengths. In other words, all code lengths form
// a single sequence of HLIT + HDIST + 258 values.
private bool DecodeDynamicBlockHeader()
{
switch (_state)
{
case InflaterState.ReadingNumLitCodes:
_literalLengthCodeCount = _input.GetBits(5);
if (_literalLengthCodeCount < 0)
{
return false;
}
_literalLengthCodeCount += 257;
_state = InflaterState.ReadingNumDistCodes;
goto case InflaterState.ReadingNumDistCodes;
case InflaterState.ReadingNumDistCodes:
_distanceCodeCount = _input.GetBits(5);
if (_distanceCodeCount < 0)
{
return false;
}
_distanceCodeCount += 1;
_state = InflaterState.ReadingNumCodeLengthCodes;
goto case InflaterState.ReadingNumCodeLengthCodes;
case InflaterState.ReadingNumCodeLengthCodes:
_codeLengthCodeCount = _input.GetBits(4);
if (_codeLengthCodeCount < 0)
{
return false;
}
_codeLengthCodeCount += 4;
_loopCounter = 0;
_state = InflaterState.ReadingCodeLengthCodes;
goto case InflaterState.ReadingCodeLengthCodes;
case InflaterState.ReadingCodeLengthCodes:
while (_loopCounter < _codeLengthCodeCount)
{
int bits = _input.GetBits(3);
if (bits < 0)
{
return false;
}
_codeLengthTreeCodeLength[s_codeOrder[_loopCounter]] = (byte)bits;
++_loopCounter;
}
for (int i = _codeLengthCodeCount; i < s_codeOrder.Length; i++)
{
_codeLengthTreeCodeLength[s_codeOrder[i]] = 0;
}
// create huffman tree for code length
_codeLengthTree = new HuffmanTree(_codeLengthTreeCodeLength);
_codeArraySize = _literalLengthCodeCount + _distanceCodeCount;
_loopCounter = 0; // reset loop count
_state = InflaterState.ReadingTreeCodesBefore;
goto case InflaterState.ReadingTreeCodesBefore;
case InflaterState.ReadingTreeCodesBefore:
case InflaterState.ReadingTreeCodesAfter:
while (_loopCounter < _codeArraySize)
{
if (_state == InflaterState.ReadingTreeCodesBefore)
{
if ((_lengthCode = _codeLengthTree.GetNextSymbol(_input)) < 0)
{
return false;
}
}
// The alphabet for code lengths is as follows:
// 0 - 15: Represent code lengths of 0 - 15
// 16: Copy the previous code length 3 - 6 times.
// The next 2 bits indicate repeat length
// (0 = 3, ... , 3 = 6)
// Example: Codes 8, 16 (+2 bits 11),
// 16 (+2 bits 10) will expand to
// 12 code lengths of 8 (1 + 6 + 5)
// 17: Repeat a code length of 0 for 3 - 10 times.
// (3 bits of length)
// 18: Repeat a code length of 0 for 11 - 138 times
// (7 bits of length)
if (_lengthCode <= 15)
{
_codeList[_loopCounter++] = (byte)_lengthCode;
}
else
{
int repeatCount;
if (_lengthCode == 16)
{
if (!_input.EnsureBitsAvailable(2))
{
_state = InflaterState.ReadingTreeCodesAfter;
return false;
}
if (_loopCounter == 0)
{
// can't have "prev code" on first code
throw new InvalidDataException();
}
byte previousCode = _codeList[_loopCounter - 1];
repeatCount = _input.GetBits(2) + 3;
if (_loopCounter + repeatCount > _codeArraySize)
{
throw new InvalidDataException();
}
for (int j = 0; j < repeatCount; j++)
{
_codeList[_loopCounter++] = previousCode;
}
}
else if (_lengthCode == 17)
{
if (!_input.EnsureBitsAvailable(3))
{
_state = InflaterState.ReadingTreeCodesAfter;
return false;
}
repeatCount = _input.GetBits(3) + 3;
if (_loopCounter + repeatCount > _codeArraySize)
{
throw new InvalidDataException();
}
for (int j = 0; j < repeatCount; j++)
{
_codeList[_loopCounter++] = 0;
}
}
else
{
// code == 18
if (!_input.EnsureBitsAvailable(7))
{
_state = InflaterState.ReadingTreeCodesAfter;
return false;
}
repeatCount = _input.GetBits(7) + 11;
if (_loopCounter + repeatCount > _codeArraySize)
{
throw new InvalidDataException();
}
for (int j = 0; j < repeatCount; j++)
{
_codeList[_loopCounter++] = 0;
}
}
}
_state = InflaterState.ReadingTreeCodesBefore; // we want to read the next code.
}
break;
default:
Debug./*Fail*/Assert(false, "check why we are here!");
throw new InvalidDataException("Deflate64: unknown state");
}
byte[] literalTreeCodeLength = new byte[HuffmanTree.MaxLiteralTreeElements];
byte[] distanceTreeCodeLength = new byte[HuffmanTree.MaxDistTreeElements];
// Create literal and distance tables
Array.Copy(_codeList, 0, literalTreeCodeLength, 0, _literalLengthCodeCount);
Array.Copy(_codeList, _literalLengthCodeCount, distanceTreeCodeLength, 0, _distanceCodeCount);
// Make sure there is an end-of-block code, otherwise how could we ever end?
if (literalTreeCodeLength[HuffmanTree.EndOfBlockCode] == 0)
{
throw new InvalidDataException();
}
_literalLengthTree = new HuffmanTree(literalTreeCodeLength);
_distanceTree = new HuffmanTree(distanceTreeCodeLength);
_state = InflaterState.DecodeTop;
return true;
}
public void Dispose() { }
}
}

View File

@@ -0,0 +1,42 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
namespace SharpCompress.Compressors.Deflate64
{
// Do not rearrange the enum values.
internal enum InflaterState
{
ReadingHeader = 0, // Only applies to GZIP
ReadingBFinal = 2, // About to read bfinal bit
ReadingBType = 3, // About to read blockType bits
ReadingNumLitCodes = 4, // About to read # literal codes
ReadingNumDistCodes = 5, // About to read # dist codes
ReadingNumCodeLengthCodes = 6, // About to read # code length codes
ReadingCodeLengthCodes = 7, // In the middle of reading the code length codes
ReadingTreeCodesBefore = 8, // In the middle of reading tree codes (loop top)
ReadingTreeCodesAfter = 9, // In the middle of reading tree codes (extension; code > 15)
DecodeTop = 10, // About to decode a literal (char/match) in a compressed block
HaveInitialLength = 11, // Decoding a match, have the literal code (base length)
HaveFullLength = 12, // Ditto, now have the full match length (incl. extra length bits)
HaveDistCode = 13, // Ditto, now have the distance code also, need extra dist bits
/* uncompressed blocks */
UncompressedAligning = 15,
UncompressedByte1 = 16,
UncompressedByte2 = 17,
UncompressedByte3 = 18,
UncompressedByte4 = 19,
DecodingUncompressed = 20,
// These three apply only to GZIP
StartReadingFooter = 21, // (Initialisation for reading footer)
ReadingFooter = 22,
VerifyingFooter = 23,
Done = 24 // Finished
}
}

View File

@@ -0,0 +1,202 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.Diagnostics;
namespace SharpCompress.Compressors.Deflate64
{
// This class can be used to read bits from an byte array quickly.
// Normally we get bits from 'bitBuffer' field and bitsInBuffer stores
// the number of bits available in 'BitBuffer'.
// When we used up the bits in bitBuffer, we will try to get byte from
// the byte array and copy the byte to appropiate position in bitBuffer.
//
// The byte array is not reused. We will go from 'start' to 'end'.
// When we reach the end, most read operations will return -1,
// which means we are running out of input.
internal sealed class InputBuffer
{
private byte[] _buffer; // byte array to store input
private int _start; // start poisition of the buffer
private int _end; // end position of the buffer
private uint _bitBuffer = 0; // store the bits here, we can quickly shift in this buffer
private int _bitsInBuffer = 0; // number of bits available in bitBuffer
/// <summary>Total bits available in the input buffer.</summary>
public int AvailableBits => _bitsInBuffer;
/// <summary>Total bytes available in the input buffer.</summary>
public int AvailableBytes => (_end - _start) + (_bitsInBuffer / 8);
/// <summary>Ensure that count bits are in the bit buffer.</summary>
/// <param name="count">Can be up to 16.</param>
/// <returns>Returns false if input is not sufficient to make this true.</returns>
public bool EnsureBitsAvailable(int count)
{
Debug.Assert(0 < count && count <= 16, "count is invalid.");
// manual inlining to improve perf
if (_bitsInBuffer < count)
{
if (NeedsInput())
{
return false;
}
// insert a byte to bitbuffer
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
_bitsInBuffer += 8;
if (_bitsInBuffer < count)
{
if (NeedsInput())
{
return false;
}
// insert a byte to bitbuffer
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
_bitsInBuffer += 8;
}
}
return true;
}
/// <summary>
/// This function will try to load 16 or more bits into bitBuffer.
/// It returns whatever is contained in bitBuffer after loading.
/// The main difference between this and GetBits is that this will
/// never return -1. So the caller needs to check AvailableBits to
/// see how many bits are available.
/// </summary>
public uint TryLoad16Bits()
{
if (_bitsInBuffer < 8)
{
if (_start < _end)
{
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
_bitsInBuffer += 8;
}
if (_start < _end)
{
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
_bitsInBuffer += 8;
}
}
else if (_bitsInBuffer < 16)
{
if (_start < _end)
{
_bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer;
_bitsInBuffer += 8;
}
}
return _bitBuffer;
}
private uint GetBitMask(int count) => ((uint)1 << count) - 1;
/// <summary>Gets count bits from the input buffer. Returns -1 if not enough bits available.</summary>
public int GetBits(int count)
{
Debug.Assert(0 < count && count <= 16, "count is invalid.");
if (!EnsureBitsAvailable(count))
{
return -1;
}
int result = (int)(_bitBuffer & GetBitMask(count));
_bitBuffer >>= count;
_bitsInBuffer -= count;
return result;
}
/// <summary>
/// Copies length bytes from input buffer to output buffer starting at output[offset].
/// You have to make sure, that the buffer is byte aligned. If not enough bytes are
/// available, copies fewer bytes.
/// </summary>
/// <returns>Returns the number of bytes copied, 0 if no byte is available.</returns>
public int CopyTo(byte[] output, int offset, int length)
{
Debug.Assert(output != null);
Debug.Assert(offset >= 0);
Debug.Assert(length >= 0);
Debug.Assert(offset <= output.Length - length);
Debug.Assert((_bitsInBuffer % 8) == 0);
// Copy the bytes in bitBuffer first.
int bytesFromBitBuffer = 0;
while (_bitsInBuffer > 0 && length > 0)
{
output[offset++] = (byte)_bitBuffer;
_bitBuffer >>= 8;
_bitsInBuffer -= 8;
length--;
bytesFromBitBuffer++;
}
if (length == 0)
{
return bytesFromBitBuffer;
}
int avail = _end - _start;
if (length > avail)
{
length = avail;
}
Array.Copy(_buffer, _start, output, offset, length);
_start += length;
return bytesFromBitBuffer + length;
}
/// <summary>
/// Return true is all input bytes are used.
/// This means the caller can call SetInput to add more input.
/// </summary>
public bool NeedsInput() => _start == _end;
/// <summary>
/// Set the byte array to be processed.
/// All the bits remained in bitBuffer will be processed before the new bytes.
/// We don't clone the byte array here since it is expensive.
/// The caller should make sure after a buffer is passed in.
/// It will not be changed before calling this function again.
/// </summary>
public void SetInput(byte[] buffer, int offset, int length)
{
Debug.Assert(buffer != null);
Debug.Assert(offset >= 0);
Debug.Assert(length >= 0);
Debug.Assert(offset <= buffer.Length - length);
Debug.Assert(_start == _end);
_buffer = buffer;
_start = offset;
_end = offset + length;
}
/// <summary>Skip n bits in the buffer.</summary>
public void SkipBits(int n)
{
Debug.Assert(_bitsInBuffer >= n, "No enough bits in the buffer, Did you call EnsureBitsAvailable?");
_bitBuffer >>= n;
_bitsInBuffer -= n;
}
/// <summary>Skips to the next byte boundary.</summary>
public void SkipToByteBoundary()
{
_bitBuffer >>= (_bitsInBuffer % 8);
_bitsInBuffer = _bitsInBuffer - (_bitsInBuffer % 8);
}
}
}

View File

@@ -0,0 +1,17 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
namespace SharpCompress.Compressors.Deflate64
{
/// <summary>
/// This class represents a match in the history window.
/// </summary>
internal sealed class Match
{
internal MatchState State { get; set; }
internal int Position { get; set; }
internal int Length { get; set; }
internal byte Symbol { get; set; }
}
}

View File

@@ -0,0 +1,13 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
namespace SharpCompress.Compressors.Deflate64
{
internal enum MatchState
{
HasSymbol = 1,
HasMatch = 2,
HasSymbolAndMatch = 3
}
}

View File

@@ -0,0 +1,151 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.Diagnostics;
namespace SharpCompress.Compressors.Deflate64
{
/// <summary>
/// This class maintains a window for decompressed output.
/// We need to keep this because the decompressed information can be
/// a literal or a length/distance pair. For length/distance pair,
/// we need to look back in the output window and copy bytes from there.
/// We use a byte array of WindowSize circularly.
/// </summary>
internal sealed class OutputWindow
{
// With Deflate64 we can have up to a 65536 length as well as up to a 65538 distance. This means we need a Window that is at
// least 131074 bytes long so we have space to retrieve up to a full 64kb in lookback and place it in our buffer without
// overwriting existing data. OutputWindow requires that the WindowSize be an exponent of 2, so we round up to 2^18.
private const int WindowSize = 262144;
private const int WindowMask = 262143;
private readonly byte[] _window = new byte[WindowSize]; // The window is 2^18 bytes
private int _end; // this is the position to where we should write next byte
private int _bytesUsed; // The number of bytes in the output window which is not consumed.
/// <summary>Add a byte to output window.</summary>
public void Write(byte b)
{
Debug.Assert(_bytesUsed < WindowSize, "Can't add byte when window is full!");
_window[_end++] = b;
_end &= WindowMask;
++_bytesUsed;
}
public void WriteLengthDistance(int length, int distance)
{
Debug.Assert((_bytesUsed + length) <= WindowSize, "No Enough space");
// move backwards distance bytes in the output stream,
// and copy length bytes from this position to the output stream.
_bytesUsed += length;
int copyStart = (_end - distance) & WindowMask; // start position for coping.
int border = WindowSize - length;
if (copyStart <= border && _end < border)
{
if (length <= distance)
{
Array.Copy(_window, copyStart, _window, _end, length);
_end += length;
}
else
{
// The referenced string may overlap the current
// position; for example, if the last 2 bytes decoded have values
// X and Y, a string reference with <length = 5, distance = 2>
// adds X,Y,X,Y,X to the output stream.
while (length-- > 0)
{
_window[_end++] = _window[copyStart++];
}
}
}
else
{
// copy byte by byte
while (length-- > 0)
{
_window[_end++] = _window[copyStart++];
_end &= WindowMask;
copyStart &= WindowMask;
}
}
}
/// <summary>
/// Copy up to length of bytes from input directly.
/// This is used for uncompressed block.
/// </summary>
public int CopyFrom(InputBuffer input, int length)
{
length = Math.Min(Math.Min(length, WindowSize - _bytesUsed), input.AvailableBytes);
int copied;
// We might need wrap around to copy all bytes.
int tailLen = WindowSize - _end;
if (length > tailLen)
{
// copy the first part
copied = input.CopyTo(_window, _end, tailLen);
if (copied == tailLen)
{
// only try to copy the second part if we have enough bytes in input
copied += input.CopyTo(_window, 0, length - tailLen);
}
}
else
{
// only one copy is needed if there is no wrap around.
copied = input.CopyTo(_window, _end, length);
}
_end = (_end + copied) & WindowMask;
_bytesUsed += copied;
return copied;
}
/// <summary>Free space in output window.</summary>
public int FreeBytes => WindowSize - _bytesUsed;
/// <summary>Bytes not consumed in output window.</summary>
public int AvailableBytes => _bytesUsed;
/// <summary>Copy the decompressed bytes to output array.</summary>
public int CopyTo(byte[] output, int offset, int length)
{
int copy_end;
if (length > _bytesUsed)
{
// we can copy all the decompressed bytes out
copy_end = _end;
length = _bytesUsed;
}
else
{
copy_end = (_end - _bytesUsed + length) & WindowMask; // copy length of bytes
}
int copied = length;
int tailLen = length - copy_end;
if (tailLen > 0)
{
// this means we need to copy two parts separately
// copy tailLen bytes from the end of output window
Array.Copy(_window, WindowSize - tailLen,
output, offset, tailLen);
offset += tailLen;
length = copy_end;
}
Array.Copy(_window, copy_end - length, output, offset, length);
_bytesUsed -= copied;
Debug.Assert(_bytesUsed >= 0, "check this function and find why we copied more bytes than we have");
return copied;
}
}
}

View File

@@ -58,7 +58,7 @@ namespace SharpCompress.Compressors.LZMA
{
if (index < 0 || index >= Length)
{
throw new ArgumentOutOfRangeException("index");
throw new ArgumentOutOfRangeException(nameof(index));
}
return (mBits[index >> 5] & (1u << (index & 31))) != 0;
@@ -69,7 +69,7 @@ namespace SharpCompress.Compressors.LZMA
{
if (index < 0 || index >= Length)
{
throw new ArgumentOutOfRangeException("index");
throw new ArgumentOutOfRangeException(nameof(index));
}
mBits[index >> 5] |= 1u << (index & 31);
@@ -79,7 +79,7 @@ namespace SharpCompress.Compressors.LZMA
{
if (index < 0 || index >= Length)
{
throw new ArgumentOutOfRangeException("index");
throw new ArgumentOutOfRangeException(nameof(index));
}
uint bits = mBits[index >> 5];

View File

@@ -1,5 +1,8 @@
using System;
using System.IO;
using SharpCompress.Converters;
using SharpCompress.Crypto;
using SharpCompress.IO;
namespace SharpCompress.Compressors.LZMA
{
@@ -14,29 +17,62 @@ namespace SharpCompress.Compressors.LZMA
public class LZipStream : Stream
{
private readonly Stream stream;
private readonly CountingWritableSubStream rawStream;
private bool disposed;
private readonly bool leaveOpen;
private bool finished;
public LZipStream(Stream stream, CompressionMode mode)
: this(stream, mode, false)
{
}
private long writeCount;
public LZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
public LZipStream(Stream stream, CompressionMode mode, bool leaveOpen = false)
{
if (mode != CompressionMode.Decompress)
{
throw new NotImplementedException("Only LZip decompression is currently supported");
}
Mode = mode;
this.leaveOpen = leaveOpen;
int dictionarySize = ValidateAndReadSize(stream);
if (dictionarySize == 0)
if (mode == CompressionMode.Decompress)
{
throw new IOException("Not an LZip stream");
int dSize = ValidateAndReadSize(stream);
if (dSize == 0)
{
throw new IOException("Not an LZip stream");
}
byte[] properties = GetProperties(dSize);
this.stream = new LzmaStream(properties, stream);
}
else
{
//default
int dSize = 104 * 1024;
WriteHeaderSize(stream);
rawStream = new CountingWritableSubStream(stream);
this.stream = new Crc32Stream(new LzmaStream(new LzmaEncoderProperties(true, dSize), false, rawStream));
}
}
public void Finish()
{
if (!finished)
{
if (Mode == CompressionMode.Compress)
{
var crc32Stream = (Crc32Stream)stream;
crc32Stream.WrappedStream.Dispose();
crc32Stream.Dispose();
var compressedCount = rawStream.Count;
var bytes = DataConverter.LittleEndian.GetBytes(crc32Stream.Crc);
rawStream.Write(bytes, 0, bytes.Length);
bytes = DataConverter.LittleEndian.GetBytes(writeCount);
rawStream.Write(bytes, 0, bytes.Length);
//total with headers
bytes = DataConverter.LittleEndian.GetBytes(compressedCount + 6 + 20);
rawStream.Write(bytes, 0, bytes.Length);
}
finished = true;
}
byte[] properties = GetProperties(dictionarySize);
this.stream = new LzmaStream(properties, stream);
}
#region Stream methods
@@ -48,19 +84,23 @@ namespace SharpCompress.Compressors.LZMA
return;
}
disposed = true;
if (disposing && !leaveOpen)
if (disposing)
{
stream.Dispose();
Finish();
if (!leaveOpen)
{
rawStream.Dispose();
}
}
}
public CompressionMode Mode { get; }
public override bool CanRead => stream.CanRead;
public override bool CanRead => Mode == CompressionMode.Decompress;
public override bool CanSeek => false;
public override bool CanWrite => false;
public override bool CanWrite => Mode == CompressionMode.Compress;
public override void Flush()
{
@@ -75,20 +115,16 @@ namespace SharpCompress.Compressors.LZMA
public override int Read(byte[] buffer, int offset, int count) => stream.Read(buffer, offset, count);
public override long Seek(long offset, SeekOrigin origin)
{
throw new NotSupportedException();
}
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
public override void SetLength(long value)
{
throw new NotImplementedException();
}
public override void SetLength(long value) => throw new NotImplementedException();
public override void Write(byte[] buffer, int offset, int count)
{
throw new NotImplementedException();
stream.Write(buffer, offset, count);
writeCount += count;
}
#endregion
/// <summary>
@@ -105,7 +141,7 @@ namespace SharpCompress.Compressors.LZMA
/// couldn't be read or it isn't a validate LZIP header, or the dictionary
/// size if it *is* a valid LZIP file.
/// </summary>
private static int ValidateAndReadSize(Stream stream)
public static int ValidateAndReadSize(Stream stream)
{
if (stream == null)
{
@@ -131,6 +167,17 @@ namespace SharpCompress.Compressors.LZMA
return (1 << basePower) - subtractionNumerator * (1 << (basePower - 4));
}
public static void WriteHeaderSize(Stream stream)
{
if (stream == null)
{
throw new ArgumentNullException(nameof(stream));
}
// hard coding the dictionary size encoding
byte[] header = new byte[6] {(byte)'L', (byte)'Z', (byte)'I', (byte)'P', 1, 113};
stream.Write(header, 0, 6);
}
/// <summary>
/// Creates a byte array to communicate the parameters and dictionary size to LzmaStream.
/// </summary>

View File

@@ -141,10 +141,7 @@ namespace SharpCompress.Compressors.LZMA
{
position = encoder.Code(null, true);
}
if (inputStream != null)
{
inputStream.Dispose();
}
inputStream?.Dispose();
}
base.Dispose(disposing);
}

View File

@@ -58,22 +58,22 @@ namespace SharpCompress.Compressors.LZMA.Utilites
{
if (stream == null)
{
throw new ArgumentNullException("stream");
throw new ArgumentNullException(nameof(stream));
}
if (buffer == null)
{
throw new ArgumentNullException("buffer");
throw new ArgumentNullException(nameof(buffer));
}
if (offset < 0 || offset > buffer.Length)
{
throw new ArgumentOutOfRangeException("offset");
throw new ArgumentOutOfRangeException(nameof(offset));
}
if (length < 0 || length > buffer.Length - offset)
{
throw new ArgumentOutOfRangeException("length");
throw new ArgumentOutOfRangeException(nameof(length));
}
while (length > 0)

View File

@@ -146,12 +146,12 @@ namespace SharpCompress.Compressors.PPMd.I1
{
if (target == null)
{
throw new ArgumentNullException("target");
throw new ArgumentNullException(nameof(target));
}
if (source == null)
{
throw new ArgumentNullException("source");
throw new ArgumentNullException(nameof(source));
}
EncodeStart(properties);
@@ -235,12 +235,12 @@ namespace SharpCompress.Compressors.PPMd.I1
{
if (target == null)
{
throw new ArgumentNullException("target");
throw new ArgumentNullException(nameof(target));
}
if (source == null)
{
throw new ArgumentNullException("source");
throw new ArgumentNullException(nameof(source));
}
DecodeStart(source, properties);

View File

@@ -31,7 +31,7 @@ namespace SharpCompress.Compressors.Rar {
{
currentCrc = RarCRC.CheckCrc(currentCrc, buffer, offset, result);
}
else if (GetCrc() != readStream.CurrentCrc)
else if (GetCrc() != readStream.CurrentCrc && count != 0)
{
// NOTE: we use the last FileHeader in a multipart volume to check CRC
throw new InvalidFormatException("file crc mismatch");

View File

@@ -0,0 +1,54 @@
using System;
using System.IO;
namespace SharpCompress.Compressors.Xz
{
public static class BinaryUtils
{
public static int ReadLittleEndianInt32(this BinaryReader reader)
{
byte[] bytes = reader.ReadBytes(4);
return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24));
}
internal static uint ReadLittleEndianUInt32(this BinaryReader reader)
{
return unchecked((uint)ReadLittleEndianInt32(reader));
}
public static int ReadLittleEndianInt32(this Stream stream)
{
byte[] bytes = new byte[4];
var read = stream.ReadFully(bytes);
if (!read)
{
throw new EndOfStreamException();
}
return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24));
}
internal static uint ReadLittleEndianUInt32(this Stream stream)
{
return unchecked((uint)ReadLittleEndianInt32(stream));
}
internal static byte[] ToBigEndianBytes(this uint uint32)
{
var result = BitConverter.GetBytes(uint32);
if (BitConverter.IsLittleEndian)
Array.Reverse(result);
return result;
}
internal static byte[] ToLittleEndianBytes(this uint uint32)
{
var result = BitConverter.GetBytes(uint32);
if (!BitConverter.IsLittleEndian)
Array.Reverse(result);
return result;
}
}
}

View File

@@ -0,0 +1,10 @@
namespace SharpCompress.Compressors.Xz
{
public enum CheckType : byte
{
NONE = 0x00,
CRC32 = 0x01,
CRC64 = 0x04,
SHA256 = 0x0A
}
}

View File

@@ -0,0 +1,60 @@
using System;
using System.Collections.Generic;
namespace SharpCompress.Compressors.Xz
{
internal static class Crc32
{
public const UInt32 DefaultPolynomial = 0xedb88320u;
public const UInt32 DefaultSeed = 0xffffffffu;
static UInt32[] defaultTable;
public static UInt32 Compute(byte[] buffer)
{
return Compute(DefaultSeed, buffer);
}
public static UInt32 Compute(UInt32 seed, byte[] buffer)
{
return Compute(DefaultPolynomial, seed, buffer);
}
public static UInt32 Compute(UInt32 polynomial, UInt32 seed, byte[] buffer)
{
return ~CalculateHash(InitializeTable(polynomial), seed, buffer, 0, buffer.Length);
}
static UInt32[] InitializeTable(UInt32 polynomial)
{
if (polynomial == DefaultPolynomial && defaultTable != null)
return defaultTable;
var createTable = new UInt32[256];
for (var i = 0; i < 256; i++)
{
var entry = (UInt32)i;
for (var j = 0; j < 8; j++)
if ((entry & 1) == 1)
entry = (entry >> 1) ^ polynomial;
else
entry = entry >> 1;
createTable[i] = entry;
}
if (polynomial == DefaultPolynomial)
defaultTable = createTable;
return createTable;
}
static UInt32 CalculateHash(UInt32[] table, UInt32 seed, IList<byte> buffer, int start, int size)
{
var crc = seed;
for (var i = start; i < size - start; i++)
crc = (crc >> 8) ^ table[buffer[i] ^ crc & 0xff];
return crc;
}
}
}

View File

@@ -0,0 +1,57 @@
using System;
using System.Collections.Generic;
namespace SharpCompress.Compressors.Xz
{
internal static class Crc64
{
public const UInt64 DefaultSeed = 0x0;
internal static UInt64[] Table;
public const UInt64 Iso3309Polynomial = 0xD800000000000000;
public static UInt64 Compute(byte[] buffer)
{
return Compute(DefaultSeed, buffer);
}
public static UInt64 Compute(UInt64 seed, byte[] buffer)
{
if (Table == null)
Table = CreateTable(Iso3309Polynomial);
return CalculateHash(seed, Table, buffer, 0, buffer.Length);
}
public static UInt64 CalculateHash(UInt64 seed, UInt64[] table, IList<byte> buffer, int start, int size)
{
var crc = seed;
for (var i = start; i < size; i++)
unchecked
{
crc = (crc >> 8) ^ table[(buffer[i] ^ crc) & 0xff];
}
return crc;
}
public static ulong[] CreateTable(ulong polynomial)
{
var createTable = new UInt64[256];
for (var i = 0; i < 256; ++i)
{
var entry = (UInt64)i;
for (var j = 0; j < 8; ++j)
if ((entry & 1) == 1)
entry = (entry >> 1) ^ polynomial;
else
entry = entry >> 1;
createTable[i] = entry;
}
return createTable;
}
}
}

View File

@@ -0,0 +1,53 @@
using System;
using System.Collections.Generic;
using System.IO;
namespace SharpCompress.Compressors.Xz.Filters
{
internal abstract class BlockFilter : ReadOnlyStream
{
public enum FilterTypes : ulong
{
DELTA = 0x03,
ARCH_x86_FILTER = 0x04,
ARCH_PowerPC_FILTER = 0x05,
ARCH_IA64_FILTER = 0x06,
ARCH_ARM_FILTER = 0x07,
ARCH_ARMTHUMB_FILTER = 0x08,
ARCH_SPARC_FILTER = 0x09,
LZMA2 = 0x21,
}
static Dictionary<FilterTypes, Type> FilterMap = new Dictionary<FilterTypes, Type>()
{
{FilterTypes.LZMA2, typeof(Lzma2Filter) }
};
public abstract bool AllowAsLast { get; }
public abstract bool AllowAsNonLast { get; }
public abstract bool ChangesDataSize { get; }
public BlockFilter() { }
public abstract void Init(byte[] properties);
public abstract void ValidateFilter();
public FilterTypes FilterType { get; set; }
public static BlockFilter Read(BinaryReader reader)
{
var filterType = (FilterTypes)reader.ReadXZInteger();
if (!FilterMap.ContainsKey(filterType))
throw new NotImplementedException($"Filter {filterType} has not yet been implemented");
var filter = Activator.CreateInstance(FilterMap[filterType]) as BlockFilter;
var sizeOfProperties = reader.ReadXZInteger();
if (sizeOfProperties > int.MaxValue)
throw new InvalidDataException("Block filter information too large");
byte[] properties = reader.ReadBytes((int)sizeOfProperties);
filter.Init(properties);
return filter;
}
public abstract void SetBaseStream(Stream stream);
}
}

View File

@@ -0,0 +1,54 @@
using System;
using System.IO;
namespace SharpCompress.Compressors.Xz.Filters
{
internal class Lzma2Filter : BlockFilter
{
public override bool AllowAsLast => true;
public override bool AllowAsNonLast => false;
public override bool ChangesDataSize => true;
byte _dictionarySize;
public uint DictionarySize
{
get
{
if (_dictionarySize > 40)
throw new OverflowException("Dictionary size greater than UInt32.Max");
if (_dictionarySize == 40)
{
return uint.MaxValue;
}
int mantissa = 2 | (_dictionarySize & 1);
int exponent = _dictionarySize / 2 + 11;
return (uint)mantissa << exponent;
}
}
public override void Init(byte[] properties)
{
if (properties.Length != 1)
throw new InvalidDataException("LZMA properties unexpected length");
_dictionarySize = (byte)(properties[0] & 0x3F);
var reserved = properties[0] & 0xC0;
if (reserved != 0)
throw new InvalidDataException("Reserved bits used in LZMA properties");
}
public override void ValidateFilter()
{
}
public override void SetBaseStream(Stream stream)
{
BaseStream = new SharpCompress.Compressors.LZMA.LzmaStream(new[] { _dictionarySize }, stream);
}
public override int Read(byte[] buffer, int offset, int count)
{
return BaseStream.Read(buffer, offset, count);
}
}
}

View File

@@ -0,0 +1,32 @@
using System;
using System.IO;
namespace SharpCompress.Compressors.Xz
{
internal static class MultiByteIntegers
{
public static ulong ReadXZInteger(this BinaryReader reader, int MaxBytes = 9)
{
if (MaxBytes <= 0)
throw new ArgumentOutOfRangeException();
if (MaxBytes > 9)
MaxBytes = 9;
byte LastByte = reader.ReadByte();
ulong Output = (ulong)LastByte & 0x7F;
int i = 0;
while ((LastByte & 0x80) != 0)
{
if (++i >= MaxBytes)
throw new InvalidDataException();
LastByte = reader.ReadByte();
if (LastByte == 0)
throw new InvalidDataException();
Output |= ((ulong)(LastByte & 0x7F)) << (i * 7);
}
return Output;
}
}
}

View File

@@ -0,0 +1,44 @@
using System;
using System.IO;
namespace SharpCompress.Compressors.Xz
{
public abstract class ReadOnlyStream : Stream
{
public Stream BaseStream { get; protected set; }
public override bool CanRead => BaseStream.CanRead;
public override bool CanSeek => false;
public override bool CanWrite => false;
public override long Length => throw new NotSupportedException();
public override long Position
{
get => throw new NotSupportedException();
set => throw new NotSupportedException();
}
public override void Flush()
{
throw new NotSupportedException();
}
public override long Seek(long offset, SeekOrigin origin)
{
throw new NotSupportedException();
}
public override void SetLength(long value)
{
throw new NotSupportedException();
}
public override void Write(byte[] buffer, int offset, int count)
{
throw new NotSupportedException();
}
}
}

View File

@@ -0,0 +1,165 @@
using System.Collections.Generic;
using System.IO;
using System.Linq;
using SharpCompress.Compressors.Xz.Filters;
namespace SharpCompress.Compressors.Xz
{
internal sealed class XZBlock : XZReadOnlyStream
{
public int BlockHeaderSize => (_blockHeaderSizeByte + 1) * 4;
public ulong? CompressedSize { get; private set; }
public ulong? UncompressedSize { get; private set; }
public Stack<BlockFilter> Filters { get; private set; } = new Stack<BlockFilter>();
public bool HeaderIsLoaded { get; private set; }
private CheckType _checkType;
private int _checkSize;
private bool _streamConnected;
private int _numFilters;
private byte _blockHeaderSizeByte;
private Stream _decomStream;
private bool _endOfStream;
private bool _paddingSkipped;
private bool _crcChecked;
private ulong _bytesRead;
public XZBlock(Stream stream, CheckType checkType, int checkSize) : base(stream)
{
_checkType = checkType;
_checkSize = checkSize;
}
public override int Read(byte[] buffer, int offset, int count)
{
int bytesRead = 0;
if (!HeaderIsLoaded)
LoadHeader();
if (!_streamConnected)
ConnectStream();
if (!_endOfStream)
bytesRead = _decomStream.Read(buffer, offset, count);
if (bytesRead != count)
_endOfStream = true;
if (_endOfStream && !_paddingSkipped)
SkipPadding();
if (_endOfStream && !_crcChecked)
CheckCrc();
_bytesRead += (ulong)bytesRead;
return bytesRead;
}
private void SkipPadding()
{
int bytes = (int)(BaseStream.Position % 4);
if (bytes > 0)
{
byte[] paddingBytes = new byte[4 - bytes];
BaseStream.Read(paddingBytes, 0, paddingBytes.Length);
if (paddingBytes.Any(b => b != 0))
throw new InvalidDataException("Padding bytes were non-null");
}
_paddingSkipped = true;
}
private void CheckCrc()
{
byte[] crc = new byte[_checkSize];
BaseStream.Read(crc, 0, _checkSize);
// Actually do a check (and read in the bytes
// into the function throughout the stream read).
_crcChecked = true;
}
private void ConnectStream()
{
_decomStream = BaseStream;
while (Filters.Any())
{
var filter = Filters.Pop();
filter.SetBaseStream(_decomStream);
_decomStream = filter;
}
_streamConnected = true;
}
private void LoadHeader()
{
ReadHeaderSize();
byte[] headerCache = CacheHeader();
using (var cache = new MemoryStream(headerCache))
using (var cachedReader = new BinaryReader(cache))
{
cachedReader.BaseStream.Position = 1; // skip the header size byte
ReadBlockFlags(cachedReader);
ReadFilters(cachedReader);
}
HeaderIsLoaded = true;
}
private void ReadHeaderSize()
{
_blockHeaderSizeByte = (byte)BaseStream.ReadByte();
if (_blockHeaderSizeByte == 0)
throw new XZIndexMarkerReachedException();
}
private byte[] CacheHeader()
{
byte[] blockHeaderWithoutCrc = new byte[BlockHeaderSize - 4];
blockHeaderWithoutCrc[0] = _blockHeaderSizeByte;
var read = BaseStream.Read(blockHeaderWithoutCrc, 1, BlockHeaderSize - 5);
if (read != BlockHeaderSize - 5)
throw new EndOfStreamException("Reached end of stream unexectedly");
uint crc = BaseStream.ReadLittleEndianUInt32();
uint calcCrc = Crc32.Compute(blockHeaderWithoutCrc);
if (crc != calcCrc)
throw new InvalidDataException("Block header corrupt");
return blockHeaderWithoutCrc;
}
private void ReadBlockFlags(BinaryReader reader)
{
var blockFlags = reader.ReadByte();
_numFilters = (blockFlags & 0x03) + 1;
byte reserved = (byte)(blockFlags & 0x3C);
if (reserved != 0)
throw new InvalidDataException("Reserved bytes used, perhaps an unknown XZ implementation");
bool compressedSizePresent = (blockFlags & 0x40) != 0;
bool uncompressedSizePresent = (blockFlags & 0x80) != 0;
if (compressedSizePresent)
CompressedSize = reader.ReadXZInteger();
if (uncompressedSizePresent)
UncompressedSize = reader.ReadXZInteger();
}
private void ReadFilters(BinaryReader reader, long baseStreamOffset = 0)
{
int nonLastSizeChangers = 0;
for (int i = 0; i < _numFilters; i++)
{
var filter = BlockFilter.Read(reader);
if ((i + 1 == _numFilters && !filter.AllowAsLast)
|| (i + 1 < _numFilters && !filter.AllowAsNonLast))
throw new InvalidDataException("Block Filters in bad order");
if (filter.ChangesDataSize && i + 1 < _numFilters)
nonLastSizeChangers++;
filter.ValidateFilter();
Filters.Push(filter);
}
if (nonLastSizeChangers > 2)
throw new InvalidDataException("More than two non-last block filters cannot change stream size");
int blockHeaderPaddingSize = BlockHeaderSize -
(4 + (int)(reader.BaseStream.Position - baseStreamOffset));
byte[] blockHeaderPadding = reader.ReadBytes(blockHeaderPaddingSize);
if (!blockHeaderPadding.All(b => b == 0))
throw new InvalidDataException("Block header contains unknown fields");
}
}
}

View File

@@ -0,0 +1,49 @@
using System.IO;
using System.Linq;
using System.Text;
using SharpCompress.IO;
namespace SharpCompress.Compressors.Xz
{
public class XZFooter
{
private readonly BinaryReader _reader;
private readonly byte[] _magicBytes = new byte[] { 0x59, 0x5A };
public long StreamStartPosition { get; private set; }
public long BackwardSize { get; private set; }
public byte[] StreamFlags { get; private set; }
public XZFooter(BinaryReader reader)
{
_reader = reader;
StreamStartPosition = reader.BaseStream.Position;
}
public static XZFooter FromStream(Stream stream)
{
var footer = new XZFooter(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8));
footer.Process();
return footer;
}
public void Process()
{
uint crc = _reader.ReadLittleEndianUInt32();
byte[] footerBytes = _reader.ReadBytes(6);
uint myCrc = Crc32.Compute(footerBytes);
if (crc != myCrc)
throw new InvalidDataException("Footer corrupt");
using (var stream = new MemoryStream(footerBytes))
using (var reader = new BinaryReader(stream))
{
BackwardSize = (reader.ReadLittleEndianUInt32() + 1) * 4;
StreamFlags = reader.ReadBytes(2);
}
byte[] magBy = _reader.ReadBytes(2);
if (!Enumerable.SequenceEqual(magBy, _magicBytes))
{
throw new InvalidDataException("Magic footer missing");
}
}
}
}

View File

@@ -0,0 +1,55 @@
using System;
using System.IO;
using System.Linq;
using System.Text;
using SharpCompress.IO;
namespace SharpCompress.Compressors.Xz
{
public class XZHeader
{
private readonly BinaryReader _reader;
private readonly byte[] MagicHeader = { 0xFD, 0x37, 0x7A, 0x58, 0x5a, 0x00 };
public CheckType BlockCheckType { get; private set; }
public int BlockCheckSize => ((((int)BlockCheckType) + 2) / 3) * 4;
public XZHeader(BinaryReader reader)
{
_reader = reader;
}
public static XZHeader FromStream(Stream stream)
{
var header = new XZHeader(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8));
header.Process();
return header;
}
public void Process()
{
CheckMagicBytes(_reader.ReadBytes(6));
ProcessStreamFlags();
}
private void ProcessStreamFlags()
{
byte[] streamFlags = _reader.ReadBytes(2);
UInt32 crc = _reader.ReadLittleEndianUInt32();
UInt32 calcCrc = Crc32.Compute(streamFlags);
if (crc != calcCrc)
throw new InvalidDataException("Stream header corrupt");
BlockCheckType = (CheckType)(streamFlags[1] & 0x0F);
byte futureUse = (byte)(streamFlags[1] & 0xF0);
if (futureUse != 0 || streamFlags[0] != 0)
throw new InvalidDataException("Unknown XZ Stream Version");
}
private void CheckMagicBytes(byte[] header)
{
if (!Enumerable.SequenceEqual(header, MagicHeader))
throw new InvalidDataException("Invalid XZ Stream");
}
}
}

View File

@@ -0,0 +1,73 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using SharpCompress.IO;
namespace SharpCompress.Compressors.Xz
{
[CLSCompliant(false)]
public class XZIndex
{
private readonly BinaryReader _reader;
public long StreamStartPosition { get; private set; }
public ulong NumberOfRecords { get; private set; }
public List<XZIndexRecord> Records { get; } = new List<XZIndexRecord>();
private bool _indexMarkerAlreadyVerified;
public XZIndex(BinaryReader reader, bool indexMarkerAlreadyVerified)
{
_reader = reader;
_indexMarkerAlreadyVerified = indexMarkerAlreadyVerified;
StreamStartPosition = reader.BaseStream.Position;
if (indexMarkerAlreadyVerified)
StreamStartPosition--;
}
public static XZIndex FromStream(Stream stream, bool indexMarkerAlreadyVerified)
{
var index = new XZIndex(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8), indexMarkerAlreadyVerified);
index.Process();
return index;
}
public void Process()
{
if (!_indexMarkerAlreadyVerified)
VerifyIndexMarker();
NumberOfRecords = _reader.ReadXZInteger();
for (ulong i = 0; i < NumberOfRecords; i++)
{
Records.Add(XZIndexRecord.FromBinaryReader(_reader));
}
SkipPadding();
VerifyCrc32();
}
private void VerifyIndexMarker()
{
byte marker = _reader.ReadByte();
if (marker != 0)
throw new InvalidDataException("Not an index block");
}
private void SkipPadding()
{
int bytes = (int)(_reader.BaseStream.Position - StreamStartPosition) % 4;
if (bytes > 0)
{
byte[] paddingBytes = _reader.ReadBytes(4 - bytes);
if (paddingBytes.Any(b => b != 0))
throw new InvalidDataException("Padding bytes were non-null");
}
}
private void VerifyCrc32()
{
uint crc = _reader.ReadLittleEndianUInt32();
// TODO verify this matches
}
}
}

View File

@@ -0,0 +1,8 @@
using System;
namespace SharpCompress.Compressors.Xz
{
public class XZIndexMarkerReachedException : Exception
{
}
}

View File

@@ -0,0 +1,22 @@
using System;
using System.IO;
namespace SharpCompress.Compressors.Xz
{
[CLSCompliant(false)]
public class XZIndexRecord
{
public ulong UnpaddedSize { get; private set; }
public ulong UncompressedSize { get; private set; }
protected XZIndexRecord() { }
public static XZIndexRecord FromBinaryReader(BinaryReader br)
{
var record = new XZIndexRecord();
record.UnpaddedSize = br.ReadXZInteger();
record.UncompressedSize = br.ReadXZInteger();
return record;
}
}
}

View File

@@ -0,0 +1,14 @@
using System.IO;
namespace SharpCompress.Compressors.Xz
{
public abstract class XZReadOnlyStream : ReadOnlyStream
{
public XZReadOnlyStream(Stream stream)
{
BaseStream = stream;
if (!BaseStream.CanRead)
throw new InvalidDataException("Must be able to read from stream");
}
}
}

View File

@@ -0,0 +1,116 @@
using System;
using System.IO;
namespace SharpCompress.Compressors.Xz
{
[CLSCompliant(false)]
public sealed class XZStream : XZReadOnlyStream
{
public static bool IsXZStream(Stream stream)
{
try
{
return null != XZHeader.FromStream(stream);
}
catch (Exception)
{
return false;
}
}
private void AssertBlockCheckTypeIsSupported()
{
switch (Header.BlockCheckType)
{
case CheckType.NONE:
break;
case CheckType.CRC32:
break;
case CheckType.CRC64:
break;
case CheckType.SHA256:
throw new NotImplementedException();
default:
throw new NotSupportedException("Check Type unknown to this version of decoder.");
}
}
public XZHeader Header { get; private set; }
public XZIndex Index { get; private set; }
public XZFooter Footer { get; private set; }
public bool HeaderIsRead { get; private set; }
private XZBlock _currentBlock;
bool _endOfStream;
public XZStream(Stream stream) : base(stream)
{
}
public override int Read(byte[] buffer, int offset, int count)
{
int bytesRead = 0;
if (_endOfStream)
return bytesRead;
if (!HeaderIsRead)
ReadHeader();
bytesRead = ReadBlocks(buffer, offset, count);
if (bytesRead < count)
{
_endOfStream = true;
ReadIndex();
ReadFooter();
}
return bytesRead;
}
private void ReadHeader()
{
Header = XZHeader.FromStream(BaseStream);
AssertBlockCheckTypeIsSupported();
HeaderIsRead = true;
}
private void ReadIndex()
{
Index = XZIndex.FromStream(BaseStream, true);
// TODO veryfy Index
}
private void ReadFooter()
{
Footer = XZFooter.FromStream(BaseStream);
// TODO verify footer
}
private int ReadBlocks(byte[] buffer, int offset, int count)
{
int bytesRead = 0;
if (_currentBlock == null)
NextBlock();
for (;;)
{
try
{
if (bytesRead >= count)
break;
int remaining = count - bytesRead;
int newOffset = offset + bytesRead;
int justRead = _currentBlock.Read(buffer, newOffset, remaining);
if (justRead < remaining)
NextBlock();
bytesRead += justRead;
}
catch (XZIndexMarkerReachedException)
{
break;
}
}
return bytesRead;
}
private void NextBlock()
{
_currentBlock = new XZBlock(BaseStream, Header.BlockCheckType, Header.BlockCheckSize);
}
}
}

View File

@@ -156,7 +156,7 @@ namespace SharpCompress.Converters
{
if (dest == null)
{
throw new ArgumentNullException("dest");
throw new ArgumentNullException(nameof(dest));
}
if (destIdx < 0 || destIdx > dest.Length - size)
{
@@ -170,7 +170,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 8)
{
@@ -195,7 +195,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 8)
{
@@ -221,7 +221,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 8)
{
@@ -247,7 +247,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 4)
{
@@ -273,7 +273,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 4)
{
@@ -299,7 +299,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 4)
{
@@ -325,7 +325,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 2)
{
@@ -351,7 +351,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 2)
{
@@ -468,7 +468,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 8)
{
@@ -494,7 +494,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 8)
{
@@ -520,7 +520,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 8)
{
@@ -546,7 +546,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 4)
{
@@ -572,7 +572,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 4)
{
@@ -598,7 +598,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 4)
{
@@ -624,7 +624,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 2)
{
@@ -650,7 +650,7 @@ namespace SharpCompress.Converters
{
if (data == null)
{
throw new ArgumentNullException("data");
throw new ArgumentNullException(nameof(data));
}
if (data.Length - index < 2)
{

View File

@@ -0,0 +1,106 @@
using System;
using System.Collections.Generic;
using System.IO;
namespace SharpCompress.Crypto
{
internal sealed class Crc32Stream : Stream
{
public const uint DefaultPolynomial = 0xedb88320u;
public const uint DefaultSeed = 0xffffffffu;
private static uint[] defaultTable;
private readonly uint[] table;
private uint hash;
private readonly Stream stream;
public Crc32Stream(Stream stream)
: this(stream, DefaultPolynomial, DefaultSeed)
{
}
public Crc32Stream(Stream stream, uint polynomial, uint seed)
{
this.stream = stream;
table = InitializeTable(polynomial);
hash = seed;
}
public Stream WrappedStream => stream;
public override void Flush()
{
stream.Flush();
}
public override int Read(byte[] buffer, int offset, int count) => throw new NotSupportedException();
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
public override void SetLength(long value) => throw new NotSupportedException();
public override void Write(byte[] buffer, int offset, int count)
{
stream.Write(buffer, offset, count);
hash = CalculateCrc(table, hash, buffer, offset, count);
}
public override bool CanRead => stream.CanRead;
public override bool CanSeek => false;
public override bool CanWrite => stream.CanWrite;
public override long Length => throw new NotSupportedException();
public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); }
public uint Crc => ~hash;
public static uint Compute(byte[] buffer)
{
return Compute(DefaultSeed, buffer);
}
public static uint Compute(uint seed, byte[] buffer)
{
return Compute(DefaultPolynomial, seed, buffer);
}
public static uint Compute(uint polynomial, uint seed, byte[] buffer)
{
return ~CalculateCrc(InitializeTable(polynomial), seed, buffer, 0, buffer.Length);
}
private static uint[] InitializeTable(uint polynomial)
{
if (polynomial == DefaultPolynomial && defaultTable != null)
return defaultTable;
var createTable = new uint[256];
for (var i = 0; i < 256; i++)
{
var entry = (uint)i;
for (var j = 0; j < 8; j++)
if ((entry & 1) == 1)
entry = (entry >> 1) ^ polynomial;
else
entry = entry >> 1;
createTable[i] = entry;
}
if (polynomial == DefaultPolynomial)
defaultTable = createTable;
return createTable;
}
private static uint CalculateCrc(uint[] table, uint crc, byte[] buffer, int offset, int count)
{
unchecked
{
for (int i = offset, end = offset + count; i < end; i++)
crc = (crc >> 8) ^ table[(crc ^ buffer[i]) & 0xFF];
}
return crc;
}
}
}

View File

@@ -12,7 +12,7 @@ namespace Org.BouncyCastle.Crypto.Parameters
{
if (key == null)
{
throw new ArgumentNullException("key");
throw new ArgumentNullException(nameof(key));
}
this.key = (byte[])key.Clone();
@@ -25,15 +25,15 @@ namespace Org.BouncyCastle.Crypto.Parameters
{
if (key == null)
{
throw new ArgumentNullException("key");
throw new ArgumentNullException(nameof(key));
}
if (keyOff < 0 || keyOff > key.Length)
{
throw new ArgumentOutOfRangeException("keyOff");
throw new ArgumentOutOfRangeException(nameof(keyOff));
}
if (keyLen < 0 || (keyOff + keyLen) > key.Length)
{
throw new ArgumentOutOfRangeException("keyLen");
throw new ArgumentOutOfRangeException(nameof(keyLen));
}
this.key = new byte[keyLen];

View File

@@ -41,7 +41,7 @@ namespace SharpCompress.IO
throw new NotSupportedException();
}
public override long Length => throw new NotSupportedException();
public override long Length => BytesLeftToRead;
public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); }

View File

@@ -22,6 +22,7 @@ namespace SharpCompress.IO
public override void Flush()
{
writableStream.Flush();
}
public override long Length => throw new NotSupportedException();

View File

@@ -1,5 +1,6 @@
using System;
using System.IO;
using SharpCompress.Compressors.Filters;
namespace SharpCompress.IO
{
@@ -46,8 +47,13 @@ namespace SharpCompress.IO
}
else
{
bufferStream.TransferTo(buffer);
bufferStream = buffer;
//create new memorystream to allow proper resizing as memorystream could be a user provided buffer
//https://github.com/adamhathcock/sharpcompress/issues/306
bufferStream = new MemoryStream();
buffer.Position = 0;
buffer.TransferTo(bufferStream);
bufferStream.Position = 0;
}
isRewound = true;
@@ -105,6 +111,12 @@ namespace SharpCompress.IO
public override int Read(byte[] buffer, int offset, int count)
{
//don't actually read if we don't really want to read anything
//currently a network stream bug on Windows for .NET Core
if (count == 0)
{
return 0;
}
int read;
if (isRewound && bufferStream.Position != bufferStream.Length)
{

View File

@@ -139,30 +139,28 @@ namespace SharpCompress.Readers
}
}
private readonly byte[] skipBuffer = new byte[4096];
private void Skip()
{
if (!Entry.IsSolid)
if (ArchiveType != ArchiveType.Rar
&& !Entry.IsSolid
&& Entry.CompressedSize > 0)
{
var rawStream = Entry.Parts.First().GetRawStream();
//not solid and has a known compressed size then we can skip raw bytes.
var part = Entry.Parts.First();
var rawStream = part.GetRawStream();
if (rawStream != null)
{
var bytesToAdvance = Entry.CompressedSize;
for (var i = 0; i < bytesToAdvance / skipBuffer.Length; i++)
{
rawStream.Read(skipBuffer, 0, skipBuffer.Length);
}
rawStream.Read(skipBuffer, 0, (int)(bytesToAdvance % skipBuffer.Length));
rawStream.Skip(bytesToAdvance);
part.Skipped = true;
return;
}
}
//don't know the size so we have to try to decompress to skip
using (var s = OpenEntryStream())
{
while (s.Read(skipBuffer, 0, skipBuffer.Length) > 0)
{
}
s.Skip();
}
}

View File

@@ -29,11 +29,11 @@ namespace SharpCompress.Readers.GZip
return new GZipReader(stream, options ?? new ReaderOptions());
}
#endregion
#endregion Open
internal override IEnumerable<GZipEntry> GetEntries(Stream stream)
{
return GZipEntry.GetEntries(stream);
return GZipEntry.GetEntries(stream, Options);
}
}
}

Some files were not shown because too many files have changed in this diff Show More