mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-08 13:34:57 +00:00
Compare commits
134 Commits
copilot/fi
...
copilot/ad
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea02d31096 | ||
|
|
d04830ba90 | ||
|
|
8533b09091 | ||
|
|
44b7955d85 | ||
|
|
038b9f18c6 | ||
|
|
5667595587 | ||
|
|
6e0e20ba6e | ||
|
|
ec31cb9987 | ||
|
|
32d5b61c4a | ||
|
|
128c9e639f | ||
|
|
5e3f01dc03 | ||
|
|
39a0b4ce78 | ||
|
|
af719707bf | ||
|
|
8415a19912 | ||
|
|
1607d2768e | ||
|
|
c97c05a3a7 | ||
|
|
b2beea9c4e | ||
|
|
41fbaa1c28 | ||
|
|
d9274cf794 | ||
|
|
583b048046 | ||
|
|
ead5916eae | ||
|
|
d15ab92da3 | ||
|
|
1ab30f2af5 | ||
|
|
4dbe0b91f1 | ||
|
|
a972d3784e | ||
|
|
6991900eb0 | ||
|
|
d614beb9eb | ||
|
|
253a46d458 | ||
|
|
32b1ec32c6 | ||
|
|
eb2cba09b2 | ||
|
|
e79dceb67e | ||
|
|
87c38d6dab | ||
|
|
9e98d9c45c | ||
|
|
0e9a4b0511 | ||
|
|
eae25aff64 | ||
|
|
b8c06ff36e | ||
|
|
6cf2e054bf | ||
|
|
95749234f5 | ||
|
|
b976961434 | ||
|
|
e1aa727513 | ||
|
|
1f71ce1be2 | ||
|
|
cf13de6ac1 | ||
|
|
c2e01798f8 | ||
|
|
8fc8295a89 | ||
|
|
d392991764 | ||
|
|
e57e87090f | ||
|
|
c701bbbee3 | ||
|
|
2f0eb0bd4b | ||
|
|
17bde8da8a | ||
|
|
99d355e6ca | ||
|
|
c790fd21a4 | ||
|
|
bee51af48b | ||
|
|
ca743eae22 | ||
|
|
93504cf82f | ||
|
|
6d3e4e842b | ||
|
|
54b64a8c3b | ||
|
|
0e59bf39f4 | ||
|
|
8b95e0a76d | ||
|
|
48a2ad7b57 | ||
|
|
cfc6651fff | ||
|
|
b23827a8db | ||
|
|
3f9986c13c | ||
|
|
224989f19b | ||
|
|
c7010b75c1 | ||
|
|
00cfeee56e | ||
|
|
aaa97e2ce2 | ||
|
|
1d52618137 | ||
|
|
34309f17f4 | ||
|
|
220ba67faa | ||
|
|
230f96e8e8 | ||
|
|
930c8899d2 | ||
|
|
7c0cef7dd8 | ||
|
|
951ebb3fa2 | ||
|
|
2a4d098b41 | ||
|
|
5839b87f98 | ||
|
|
425a2bd680 | ||
|
|
939c2497c8 | ||
|
|
8995ba56b8 | ||
|
|
e941ab60ca | ||
|
|
48860f1349 | ||
|
|
94b2c5c593 | ||
|
|
f1d8ae5a22 | ||
|
|
e44d2093e5 | ||
|
|
9fa1201a4c | ||
|
|
7800808648 | ||
|
|
2789e86d21 | ||
|
|
afd1e39b88 | ||
|
|
aec4c738ef | ||
|
|
63ecc8c842 | ||
|
|
5f6d583521 | ||
|
|
0341984f10 | ||
|
|
fe757486ae | ||
|
|
46d480c9a1 | ||
|
|
8fe3cba7a8 | ||
|
|
aa19f4da8b | ||
|
|
a08f95326c | ||
|
|
b4c5437c92 | ||
|
|
8680e3b39e | ||
|
|
1b3002c8df | ||
|
|
394fd2e7db | ||
|
|
d83af56d28 | ||
|
|
28c93d6841 | ||
|
|
5f52fc2176 | ||
|
|
8fba579e3a | ||
|
|
40b1aadeb2 | ||
|
|
40e72ad199 | ||
|
|
618b4bbb83 | ||
|
|
1eaf3e6294 | ||
|
|
fd453e946d | ||
|
|
c294071015 | ||
|
|
c2f6055e33 | ||
|
|
5161f4df33 | ||
|
|
3396f8fe00 | ||
|
|
9291f58091 | ||
|
|
85f3b17c42 | ||
|
|
14d432e22d | ||
|
|
0fdf9c74a8 | ||
|
|
e2df7894f9 | ||
|
|
7af029b5de | ||
|
|
8fc5ca5a71 | ||
|
|
aa0356de9f | ||
|
|
3a6d24b1d9 | ||
|
|
b9b159be4c | ||
|
|
40212083a5 | ||
|
|
d3428b066e | ||
|
|
94c64b2a45 | ||
|
|
0d671a0bb2 | ||
|
|
0f374b27cf | ||
|
|
0d487df61b | ||
|
|
c082d4203b | ||
|
|
d4380b6bb6 | ||
|
|
fb76bd82f2 | ||
|
|
3bdaba46a9 | ||
|
|
7c3c94ed7f |
@@ -3,11 +3,11 @@
|
||||
"isRoot": true,
|
||||
"tools": {
|
||||
"csharpier": {
|
||||
"version": "1.2.1",
|
||||
"version": "1.2.4",
|
||||
"commands": [
|
||||
"csharpier"
|
||||
],
|
||||
"rollForward": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
15
.github/COPILOT_AGENT_README.md
vendored
15
.github/COPILOT_AGENT_README.md
vendored
@@ -1,15 +0,0 @@
|
||||
# Copilot Coding Agent Configuration
|
||||
|
||||
This repository includes a minimal opt-in configuration and CI workflow to allow the GitHub Copilot coding agent to open and validate PRs.
|
||||
|
||||
- .copilot-agent.yml: opt-in config for automated agents
|
||||
- .github/agents/copilot-agent.yml: detailed agent policy configuration
|
||||
- .github/workflows/dotnetcore.yml: CI runs on PRs touching the solution, source, or tests to validate changes
|
||||
- AGENTS.md: general instructions for Copilot coding agent with project-specific guidelines
|
||||
|
||||
Maintainers can adjust the allowed paths or disable the agent by editing or removing .copilot-agent.yml.
|
||||
|
||||
Notes:
|
||||
- The agent can create, modify, and delete files within the allowed paths (src, tests, README.md, AGENTS.md)
|
||||
- All changes require review before merge
|
||||
- If build/test paths are different, update the workflow accordingly; this workflow targets SharpCompress.sln and the SharpCompress.Test test project.
|
||||
25
.github/prompts/plan-async.prompt.md
vendored
Normal file
25
.github/prompts/plan-async.prompt.md
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Plan: Implement Missing Async Functionality in SharpCompress
|
||||
|
||||
SharpCompress has async support for low-level stream operations and Reader/Writer APIs, but critical entry points (Archive.Open, factory methods, initialization) remain synchronous. This plan adds async overloads for all user-facing I/O operations and fixes existing async bugs, enabling full end-to-end async workflows.
|
||||
|
||||
## Steps
|
||||
|
||||
1. **Add async factory methods** to [ArchiveFactory.cs](src/SharpCompress/Factories/ArchiveFactory.cs), [ReaderFactory.cs](src/SharpCompress/Factories/ReaderFactory.cs), and [WriterFactory.cs](src/SharpCompress/Factories/WriterFactory.cs) with `OpenAsync` and `CreateAsync` overloads accepting `CancellationToken`
|
||||
|
||||
2. **Implement async Open methods** on concrete archive types ([ZipArchive.cs](src/SharpCompress/Archives/Zip/ZipArchive.cs), [TarArchive.cs](src/SharpCompress/Archives/Tar/TarArchive.cs), [RarArchive.cs](src/SharpCompress/Archives/Rar/RarArchive.cs), [GZipArchive.cs](src/SharpCompress/Archives/GZip/GZipArchive.cs), [SevenZipArchive.cs](src/SharpCompress/Archives/SevenZip/SevenZipArchive.cs)) and reader types ([ZipReader.cs](src/SharpCompress/Readers/Zip/ZipReader.cs), [TarReader.cs](src/SharpCompress/Readers/Tar/TarReader.cs), etc.)
|
||||
|
||||
3. **Convert archive initialization logic to async** including header reading, volume loading, and format signature detection across archive constructors and internal initialization methods
|
||||
|
||||
4. **Fix LZMA decoder async bugs** in [LzmaStream.cs](src/SharpCompress/Compressors/LZMA/LzmaStream.cs), [Decoder.cs](src/SharpCompress/Compressors/LZMA/Decoder.cs), and [OutWindow.cs](src/SharpCompress/Compressors/LZMA/OutWindow.cs) to enable true async 7Zip support and remove `NonDisposingStream` workaround
|
||||
|
||||
5. **Complete Rar async implementation** by converting `UnpackV2017` methods to async in [UnpackV2017.cs](src/SharpCompress/Compressors/Rar/UnpackV2017.cs) and updating Rar20 decompression
|
||||
|
||||
6. **Add comprehensive async tests** covering all new async entry points, cancellation scenarios, and concurrent operations across all archive formats in test files
|
||||
|
||||
## Further Considerations
|
||||
|
||||
1. **Breaking changes** - Should new async methods be added alongside existing sync methods (non-breaking), or should sync methods eventually be deprecated? Recommend additive approach for backward compatibility.
|
||||
|
||||
2. **Performance impact** - Header parsing for formats like Zip/Tar is often small; consider whether truly async parsing adds value vs sync parsing wrapped in Task, or make it conditional based on stream type (network vs file).
|
||||
|
||||
3. **7Zip complexity** - The LZMA async bug fix (Step 4) may be challenging due to state management in the decoder; consider whether to scope it separately or implement a simpler workaround that maintains correctness.
|
||||
123
.github/prompts/plan-for-next.prompt.md
vendored
Normal file
123
.github/prompts/plan-for-next.prompt.md
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
# Plan: Modernize SharpCompress Public API
|
||||
|
||||
Based on comprehensive analysis, the API has several inconsistencies around factory patterns, async support, format capabilities, and options classes. Most improvements can be done incrementally without breaking changes.
|
||||
|
||||
## Steps
|
||||
|
||||
1. **Standardize factory patterns** by deprecating format-specific static `Open` methods in [Archives/Zip/ZipArchive.cs](src/SharpCompress/Archives/Zip/ZipArchive.cs), [Archives/Tar/TarArchive.cs](src/SharpCompress/Archives/Tar/TarArchive.cs), etc. in favor of centralized [Factories/ArchiveFactory.cs](src/SharpCompress/Factories/ArchiveFactory.cs)
|
||||
|
||||
2. **Complete async implementation** in [Writers/Zip/ZipWriter.cs](src/SharpCompress/Writers/Zip/ZipWriter.cs) and other writers that currently use sync-over-async, implementing true async I/O throughout the writer hierarchy
|
||||
|
||||
3. **Unify options classes** by making [Common/ExtractionOptions.cs](src/SharpCompress/Common/ExtractionOptions.cs) inherit from `OptionsBase` and adding progress reporting to extraction methods consistently
|
||||
|
||||
4. **Clarify GZip semantics** in [Archives/GZip/GZipArchive.cs](src/SharpCompress/Archives/GZip/GZipArchive.cs) by adding XML documentation explaining single-entry limitation and relationship to GZip compression used in Tar.gz
|
||||
|
||||
## Further Considerations
|
||||
|
||||
1. **Breaking changes roadmap** - Should we plan a major version (2.0) to remove deprecated factory methods, clean up `ArchiveType` enum (remove Arc/Arj or add full support), and consolidate naming patterns?
|
||||
|
||||
2. **Progress reporting consistency** - Should `IProgress<ArchiveExtractionProgress<IEntry>>` be added to all extraction extension methods or consolidated into options classes?
|
||||
|
||||
## Detailed Analysis
|
||||
|
||||
### Factory Pattern Issues
|
||||
|
||||
Three different factory patterns exist with overlapping functionality:
|
||||
|
||||
1. **Static Factories**: ArchiveFactory, ReaderFactory, WriterFactory
|
||||
2. **Instance Factories**: IArchiveFactory, IReaderFactory, IWriterFactory
|
||||
3. **Format-specific static methods**: Each archive class has static `Open` methods
|
||||
|
||||
**Example confusion:**
|
||||
```csharp
|
||||
// Three ways to open a Zip archive - which is recommended?
|
||||
var archive1 = ArchiveFactory.Open("file.zip");
|
||||
var archive2 = ZipArchive.Open("file.zip");
|
||||
var archive3 = ArchiveFactory.AutoFactory.Open(fileInfo, options);
|
||||
```
|
||||
|
||||
### Async Support Gaps
|
||||
|
||||
Base `IWriter` interface has async methods, but writer implementations provide minimal async support. Most writers just call synchronous methods:
|
||||
|
||||
```csharp
|
||||
public virtual async Task WriteAsync(...)
|
||||
{
|
||||
// Default implementation calls synchronous version
|
||||
Write(filename, source, modificationTime);
|
||||
await Task.CompletedTask.ConfigureAwait(false);
|
||||
}
|
||||
```
|
||||
|
||||
Real async implementations only in:
|
||||
- `TarWriter` - Proper async implementation
|
||||
- Most other writers use sync-over-async
|
||||
|
||||
### GZip Archive Special Case
|
||||
|
||||
GZip is treated as both a compression format and an archive format, but only supports single-entry archives:
|
||||
|
||||
```csharp
|
||||
protected override GZipArchiveEntry CreateEntryInternal(...)
|
||||
{
|
||||
if (Entries.Any())
|
||||
{
|
||||
throw new InvalidFormatException("Only one entry is allowed in a GZip Archive");
|
||||
}
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
### Options Class Hierarchy
|
||||
|
||||
```
|
||||
OptionsBase (LeaveStreamOpen, ArchiveEncoding)
|
||||
├─ ReaderOptions (LookForHeader, Password, DisableCheckIncomplete, BufferSize, ExtensionHint, Progress)
|
||||
├─ WriterOptions (CompressionType, CompressionLevel, Progress)
|
||||
│ ├─ ZipWriterOptions (ArchiveComment, UseZip64)
|
||||
│ ├─ TarWriterOptions (FinalizeArchiveOnClose, HeaderFormat)
|
||||
│ └─ GZipWriterOptions (no additional properties)
|
||||
└─ ExtractionOptions (standalone - Overwrite, ExtractFullPath, PreserveFileTime, PreserveAttributes)
|
||||
```
|
||||
|
||||
**Issues:**
|
||||
- `ExtractionOptions` doesn't inherit from `OptionsBase` - no encoding support during extraction
|
||||
- Progress reporting inconsistency between readers and extraction
|
||||
- Obsolete properties (`ChecksumIsValid`, `Version`) with unclear migration path
|
||||
|
||||
### Implementation Priorities
|
||||
|
||||
**High Priority (Non-Breaking):**
|
||||
1. Add API usage guide (Archive vs Reader, factory recommendations, async best practices)
|
||||
2. Fix progress reporting consistency
|
||||
3. Complete async implementation in writers
|
||||
|
||||
**Medium Priority (Next Major Version):**
|
||||
1. Unify factory pattern - deprecate format-specific static `Open` methods
|
||||
2. Clean up options classes - make `ExtractionOptions` inherit from `OptionsBase`
|
||||
3. Clarify archive types - remove Arc/Arj from `ArchiveType` enum or add full support
|
||||
4. Standardize naming across archive types
|
||||
|
||||
**Low Priority:**
|
||||
1. Add BZip2 archive support similar to GZipArchive
|
||||
2. Complete obsolete property cleanup with migration guide
|
||||
|
||||
### Backward Compatibility Strategy
|
||||
|
||||
**Safe (Non-Breaking) Changes:**
|
||||
- Add new methods to interfaces (use default implementations)
|
||||
- Add new options properties (with defaults)
|
||||
- Add new factory methods
|
||||
- Improve async implementations
|
||||
- Add progress reporting support
|
||||
|
||||
**Breaking Changes to Avoid:**
|
||||
- ❌ Removing format-specific `Open` methods (deprecate instead)
|
||||
- ❌ Changing `LeaveStreamOpen` default (currently `true`)
|
||||
- ❌ Removing obsolete properties before major version bump
|
||||
- ❌ Changing return types or signatures of existing methods
|
||||
|
||||
**Deprecation Pattern:**
|
||||
- Use `[Obsolete]` for one major version
|
||||
- Use `[EditorBrowsable(EditorBrowsableState.Never)]` in next major version
|
||||
- Remove in following major version
|
||||
2
.github/workflows/dotnetcore.yml
vendored
2
.github/workflows/dotnetcore.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
with:
|
||||
dotnet-version: 10.0.x
|
||||
- run: dotnet run --project build/build.csproj
|
||||
- uses: actions/upload-artifact@v5
|
||||
- uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: ${{ matrix.os }}-sharpcompress.nupkg
|
||||
path: artifacts/*
|
||||
|
||||
32
AGENTS.md
32
AGENTS.md
@@ -28,14 +28,38 @@ SharpCompress is a pure C# compression library supporting multiple archive forma
|
||||
|
||||
## Code Formatting
|
||||
|
||||
**Copilot agents: You MUST run the `format` task after making code changes to ensure consistency.**
|
||||
|
||||
- Use CSharpier for code formatting to ensure consistent style across the project
|
||||
- CSharpier is configured as a local tool in `.config/dotnet-tools.json`
|
||||
- Restore tools with: `dotnet tool restore`
|
||||
- Format files from the project root with: `dotnet csharpier .`
|
||||
- **Run `dotnet csharpier .` from the project root after making code changes before committing**
|
||||
- Configure your IDE to format on save using CSharpier for the best experience
|
||||
|
||||
### Commands
|
||||
|
||||
1. **Restore tools** (first time only):
|
||||
```bash
|
||||
dotnet tool restore
|
||||
```
|
||||
|
||||
2. **Check if files are formatted correctly** (doesn't modify files):
|
||||
```bash
|
||||
dotnet csharpier check .
|
||||
```
|
||||
- Exit code 0: All files are properly formatted
|
||||
- Exit code 1: Some files need formatting (will show which files and differences)
|
||||
|
||||
3. **Format files** (modifies files):
|
||||
```bash
|
||||
dotnet csharpier format .
|
||||
```
|
||||
- Formats all files in the project to match CSharpier style
|
||||
- Run from project root directory
|
||||
|
||||
4. **Configure your IDE** to format on save using CSharpier for the best experience
|
||||
|
||||
### Additional Notes
|
||||
- The project also uses `.editorconfig` for editor settings (indentation, encoding, etc.)
|
||||
- Let CSharpier handle code style while `.editorconfig` handles editor behavior
|
||||
- Always run `dotnet csharpier check .` before committing to verify formatting
|
||||
|
||||
## Project Setup and Structure
|
||||
|
||||
|
||||
@@ -1,19 +1,18 @@
|
||||
<Project>
|
||||
<ItemGroup>
|
||||
<PackageVersion Include="Bullseye" Version="6.0.0" />
|
||||
<PackageVersion Include="Bullseye" Version="6.1.0" />
|
||||
<PackageVersion Include="AwesomeAssertions" Version="9.3.0" />
|
||||
<PackageVersion Include="Glob" Version="1.1.9" />
|
||||
<PackageVersion Include="JetBrains.Profiler.SelfApi" Version="2.5.14" />
|
||||
<PackageVersion Include="JetBrains.Profiler.SelfApi" Version="2.5.15" />
|
||||
<PackageVersion Include="Microsoft.Bcl.AsyncInterfaces" Version="10.0.0" />
|
||||
<PackageVersion Include="Microsoft.NET.Test.Sdk" Version="18.0.1" />
|
||||
<PackageVersion Include="Mono.Posix.NETStandard" Version="1.0.0" />
|
||||
<PackageVersion Include="SimpleExec" Version="12.0.0" />
|
||||
<PackageVersion Include="SimpleExec" Version="12.1.0" />
|
||||
<PackageVersion Include="System.Text.Encoding.CodePages" Version="10.0.0" />
|
||||
<PackageVersion Include="System.Buffers" Version="4.6.1" />
|
||||
<PackageVersion Include="System.Memory" Version="4.6.3" />
|
||||
<PackageVersion Include="System.Text.Encoding.CodePages" Version="10.0.0" />
|
||||
<PackageVersion Include="xunit" Version="2.9.3" />
|
||||
<PackageVersion Include="xunit.runner.visualstudio" Version="3.1.5" />
|
||||
<PackageVersion Include="ZstdSharp.Port" Version="0.8.6" />
|
||||
<PackageVersion Include="Microsoft.NET.ILLink.Tasks" Version="10.0.0" />
|
||||
<PackageVersion Include="Microsoft.SourceLink.GitHub" Version="8.0.0" />
|
||||
<PackageVersion Include="Microsoft.NETFramework.ReferenceAssemblies" Version="1.0.3" />
|
||||
|
||||
14
FORMATS.md
14
FORMATS.md
@@ -24,7 +24,7 @@
|
||||
1. SOLID Rars are only supported in the RarReader API.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading/writing is supported but only with seekable streams as the Zip spec doesn't support Zip64 data in post data descriptors. Deflate64 is only supported for reading. See [Zip Format Notes](#zip-format-notes) for details on multi-volume archives and streaming behavior.
|
||||
3. The Tar format requires a file size in the header. If no size is specified to the TarWriter and the stream is not seekable, then an exception will be thrown.
|
||||
4. The 7Zip format doesn't allow for reading as a forward-only stream so 7Zip is only supported through the Archive API
|
||||
4. The 7Zip format doesn't allow for reading as a forward-only stream so 7Zip is only supported through the Archive API. See [7Zip Format Notes](#7zip-format-notes) for details on async extraction behavior.
|
||||
5. LZip has no support for extra data like the file name or timestamp. There is a default filename used when looking at the entry Key on the archive.
|
||||
|
||||
### Zip Format Notes
|
||||
@@ -32,6 +32,18 @@
|
||||
- Multi-volume/split ZIP archives require ZipArchive (seekable streams) as ZipReader cannot seek across volume files.
|
||||
- ZipReader processes entries from LocalEntry headers (which include directory entries ending with `/`) and intentionally skips DirectoryEntry headers from the central directory, as they are redundant in streaming mode - all entry data comes from LocalEntry headers which ZipReader has already processed.
|
||||
|
||||
### 7Zip Format Notes
|
||||
|
||||
- **Async Extraction Performance**: When using async extraction methods (e.g., `ExtractAllEntries()` with `MoveToNextEntryAsync()`), each file creates its own decompression stream to avoid state corruption in the LZMA decoder. This is less efficient than synchronous extraction, which can reuse a single decompression stream for multiple files in the same folder.
|
||||
|
||||
**Performance Impact**: For archives with many small files in the same compression folder, async extraction will be slower than synchronous extraction because it must:
|
||||
1. Create a new LZMA decoder for each file
|
||||
2. Skip through the decompressed data to reach each file's starting position
|
||||
|
||||
**Recommendation**: For best performance with 7Zip archives, use synchronous extraction methods (`MoveToNextEntry()` and `WriteEntryToDirectory()`) when possible. Use async methods only when you need to avoid blocking the thread (e.g., in UI applications or async-only contexts).
|
||||
|
||||
**Technical Details**: 7Zip archives group files into "folders" (compression units), where all files in a folder share one continuous LZMA-compressed stream. The LZMA decoder maintains internal state (dictionary window, decoder positions) that assumes sequential, non-interruptible processing. Async operations can yield control during awaits, which would corrupt this shared state. To avoid this, async extraction creates a fresh decoder stream for each file.
|
||||
|
||||
## Compression Streams
|
||||
|
||||
For those who want to directly compress/decompress bits. The single file formats are represented here as well. However, BZip2, LZip and XZ have no metadata (GZip has a little) so using them without something like a Tar file makes little sense.
|
||||
|
||||
64
USAGE.md
64
USAGE.md
@@ -87,20 +87,17 @@ memoryStream.Position = 0;
|
||||
### Extract all files from a rar file to a directory using RarArchive
|
||||
|
||||
Note: Extracting a solid rar or 7z file needs to be done in sequential order to get acceptable decompression speed.
|
||||
It is explicitly recommended to use `ExtractAllEntries` when extracting an entire `IArchive` instead of iterating over all its `Entries`.
|
||||
Alternatively, use `IArchive.WriteToDirectory`.
|
||||
`ExtractAllEntries` is primarily intended for solid archives (like solid Rar) or 7Zip archives, where sequential extraction provides the best performance. For general/simple extraction with any supported archive type, use `archive.WriteToDirectory()` instead.
|
||||
|
||||
```C#
|
||||
using (var archive = RarArchive.Open("Test.rar"))
|
||||
{
|
||||
using (var reader = archive.ExtractAllEntries())
|
||||
// Simple extraction with RarArchive; this WriteToDirectory pattern works for all archive types
|
||||
archive.WriteToDirectory(@"D:\temp", new ExtractionOptions()
|
||||
{
|
||||
reader.WriteAllToDirectory(@"D:\temp", new ExtractionOptions()
|
||||
{
|
||||
ExtractFullPath = true,
|
||||
Overwrite = true
|
||||
});
|
||||
}
|
||||
ExtractFullPath = true,
|
||||
Overwrite = true
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
@@ -116,6 +113,41 @@ using (var archive = RarArchive.Open("Test.rar"))
|
||||
}
|
||||
```
|
||||
|
||||
### Extract solid Rar or 7Zip archives with manual progress reporting
|
||||
|
||||
`ExtractAllEntries` only works for solid archives (Rar) or 7Zip archives. For optimal performance with these archive types, use this method:
|
||||
|
||||
```C#
|
||||
using (var archive = RarArchive.Open("archive.rar")) // Must be solid Rar or 7Zip
|
||||
{
|
||||
if (archive.IsSolid || archive.Type == ArchiveType.SevenZip)
|
||||
{
|
||||
// Calculate total size for progress reporting
|
||||
double totalSize = archive.Entries.Where(e => !e.IsDirectory).Sum(e => e.Size);
|
||||
long completed = 0;
|
||||
|
||||
using (var reader = archive.ExtractAllEntries())
|
||||
{
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
if (!reader.Entry.IsDirectory)
|
||||
{
|
||||
reader.WriteEntryToDirectory(@"D:\output", new ExtractionOptions()
|
||||
{
|
||||
ExtractFullPath = true,
|
||||
Overwrite = true
|
||||
});
|
||||
|
||||
completed += reader.Entry.Size;
|
||||
double progress = completed / totalSize;
|
||||
Console.WriteLine($"Progress: {progress:P}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Use ReaderFactory to autodetect archive type and Open the entry stream
|
||||
|
||||
```C#
|
||||
@@ -298,14 +330,12 @@ using (var writer = WriterFactory.Open(stream, ArchiveType.Zip, CompressionType.
|
||||
```C#
|
||||
using (var archive = ZipArchive.Open("archive.zip"))
|
||||
{
|
||||
using (var reader = archive.ExtractAllEntries())
|
||||
{
|
||||
await reader.WriteAllToDirectoryAsync(
|
||||
@"C:\output",
|
||||
new ExtractionOptions() { ExtractFullPath = true, Overwrite = true },
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
// Simple async extraction - works for all archive types
|
||||
await archive.WriteToDirectoryAsync(
|
||||
@"C:\output",
|
||||
new ExtractionOptions() { ExtractFullPath = true, Overwrite = true },
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ const string Restore = "restore";
|
||||
const string Build = "build";
|
||||
const string Test = "test";
|
||||
const string Format = "format";
|
||||
const string CheckFormat = "check-format";
|
||||
const string Publish = "publish";
|
||||
|
||||
Target(
|
||||
@@ -42,12 +43,20 @@ Target(
|
||||
Target(
|
||||
Format,
|
||||
() =>
|
||||
{
|
||||
Run("dotnet", "tool restore");
|
||||
Run("dotnet", "csharpier format .");
|
||||
}
|
||||
);
|
||||
Target(
|
||||
CheckFormat,
|
||||
() =>
|
||||
{
|
||||
Run("dotnet", "tool restore");
|
||||
Run("dotnet", "csharpier check .");
|
||||
}
|
||||
);
|
||||
Target(Restore, [Format], () => Run("dotnet", "restore"));
|
||||
Target(Restore, [CheckFormat], () => Run("dotnet", "restore"));
|
||||
|
||||
Target(
|
||||
Build,
|
||||
@@ -61,7 +70,7 @@ Target(
|
||||
Target(
|
||||
Test,
|
||||
[Build],
|
||||
["net8.0", "net48"],
|
||||
["net10.0", "net48"],
|
||||
framework =>
|
||||
{
|
||||
IEnumerable<string> GetFiles(string d)
|
||||
|
||||
@@ -4,9 +4,9 @@
|
||||
"net10.0": {
|
||||
"Bullseye": {
|
||||
"type": "Direct",
|
||||
"requested": "[6.0.0, )",
|
||||
"resolved": "6.0.0",
|
||||
"contentHash": "vgwwXfzs7jJrskWH7saHRMgPzziq/e86QZNWY1MnMxd7e+De7E7EX4K3C7yrvaK9y02SJoLxNxcLG/q5qUAghw=="
|
||||
"requested": "[6.1.0, )",
|
||||
"resolved": "6.1.0",
|
||||
"contentHash": "fltnAJDe0BEX5eymXGUq+il2rSUA0pHqUonNDRH2TrvRu8SkU17mYG0IVpdmG2ibtfhdjNrv4CuTCxHOwcozCA=="
|
||||
},
|
||||
"Glob": {
|
||||
"type": "Direct",
|
||||
@@ -16,9 +16,9 @@
|
||||
},
|
||||
"SimpleExec": {
|
||||
"type": "Direct",
|
||||
"requested": "[12.0.0, )",
|
||||
"resolved": "12.0.0",
|
||||
"contentHash": "ptxlWtxC8vM6Y6e3h9ZTxBBkOWnWrm/Sa1HT+2i1xcXY3Hx2hmKDZP5RShPf8Xr9D+ivlrXNy57ktzyH8kyt+Q=="
|
||||
"requested": "[12.1.0, )",
|
||||
"resolved": "12.1.0",
|
||||
"contentHash": "PcCSAlMcKr5yTd571MgEMoGmoSr+omwziq2crB47lKP740lrmjuBocAUXHj+Q6LR6aUDFyhszot2wbtFJTClkA=="
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Archives;
|
||||
|
||||
public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtractionListener
|
||||
public abstract class AbstractArchive<TEntry, TVolume> : IArchive
|
||||
where TEntry : IArchiveEntry
|
||||
where TVolume : IVolume
|
||||
{
|
||||
@@ -17,11 +17,6 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
private bool _disposed;
|
||||
private readonly SourceStream? _sourceStream;
|
||||
|
||||
public event EventHandler<ArchiveExtractionEventArgs<IArchiveEntry>>? EntryExtractionBegin;
|
||||
public event EventHandler<ArchiveExtractionEventArgs<IArchiveEntry>>? EntryExtractionEnd;
|
||||
|
||||
public event EventHandler<CompressedBytesReadEventArgs>? CompressedBytesRead;
|
||||
public event EventHandler<FilePartExtractionBeginEventArgs>? FilePartExtractionBegin;
|
||||
protected ReaderOptions ReaderOptions { get; }
|
||||
|
||||
internal AbstractArchive(ArchiveType type, SourceStream sourceStream)
|
||||
@@ -43,12 +38,6 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
|
||||
public ArchiveType Type { get; }
|
||||
|
||||
void IArchiveExtractionListener.FireEntryExtractionBegin(IArchiveEntry entry) =>
|
||||
EntryExtractionBegin?.Invoke(this, new ArchiveExtractionEventArgs<IArchiveEntry>(entry));
|
||||
|
||||
void IArchiveExtractionListener.FireEntryExtractionEnd(IArchiveEntry entry) =>
|
||||
EntryExtractionEnd?.Invoke(this, new ArchiveExtractionEventArgs<IArchiveEntry>(entry));
|
||||
|
||||
private static Stream CheckStreams(Stream stream)
|
||||
{
|
||||
if (!stream.CanSeek || !stream.CanRead)
|
||||
@@ -99,38 +88,12 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
}
|
||||
}
|
||||
|
||||
void IArchiveExtractionListener.EnsureEntriesLoaded()
|
||||
private void EnsureEntriesLoaded()
|
||||
{
|
||||
_lazyEntries.EnsureFullyLoaded();
|
||||
_lazyVolumes.EnsureFullyLoaded();
|
||||
}
|
||||
|
||||
void IExtractionListener.FireCompressedBytesRead(
|
||||
long currentPartCompressedBytes,
|
||||
long compressedReadBytes
|
||||
) =>
|
||||
CompressedBytesRead?.Invoke(
|
||||
this,
|
||||
new CompressedBytesReadEventArgs(
|
||||
currentFilePartCompressedBytesRead: currentPartCompressedBytes,
|
||||
compressedBytesRead: compressedReadBytes
|
||||
)
|
||||
);
|
||||
|
||||
void IExtractionListener.FireFilePartExtractionBegin(
|
||||
string name,
|
||||
long size,
|
||||
long compressedSize
|
||||
) =>
|
||||
FilePartExtractionBegin?.Invoke(
|
||||
this,
|
||||
new FilePartExtractionBeginEventArgs(
|
||||
compressedSize: compressedSize,
|
||||
size: size,
|
||||
name: name
|
||||
)
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Use this method to extract all entries in an archive in order.
|
||||
/// This is primarily for SOLID Rar Archives or 7Zip Archives as they need to be
|
||||
@@ -146,11 +109,11 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
{
|
||||
if (!IsSolid && Type != ArchiveType.SevenZip)
|
||||
{
|
||||
throw new InvalidOperationException(
|
||||
throw new SharpCompressException(
|
||||
"ExtractAllEntries can only be used on solid archives or 7Zip archives (which require random access)."
|
||||
);
|
||||
}
|
||||
((IArchiveExtractionListener)this).EnsureEntriesLoaded();
|
||||
EnsureEntriesLoaded();
|
||||
return CreateReaderForSolidExtraction();
|
||||
}
|
||||
|
||||
@@ -173,7 +136,7 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
{
|
||||
get
|
||||
{
|
||||
((IArchiveExtractionListener)this).EnsureEntriesLoaded();
|
||||
EnsureEntriesLoaded();
|
||||
return Entries.All(x => x.IsComplete);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Factories;
|
||||
using SharpCompress.IO;
|
||||
@@ -24,6 +26,27 @@ public static class ArchiveFactory
|
||||
return FindFactory<IArchiveFactory>(stream).Open(stream, readerOptions);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens an Archive for random access asynchronously
|
||||
/// </summary>
|
||||
/// <param name="stream"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
/// <returns></returns>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
Stream stream,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
readerOptions ??= new ReaderOptions();
|
||||
stream = SharpCompressStream.Create(stream, bufferSize: readerOptions.BufferSize);
|
||||
var factory = FindFactory<IArchiveFactory>(stream);
|
||||
return await factory
|
||||
.OpenAsync(stream, readerOptions, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public static IWritableArchive Create(ArchiveType type)
|
||||
{
|
||||
var factory = Factory
|
||||
@@ -49,6 +72,22 @@ public static class ArchiveFactory
|
||||
return Open(new FileInfo(filePath), options);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens an Archive from a filepath asynchronously.
|
||||
/// </summary>
|
||||
/// <param name="filePath"></param>
|
||||
/// <param name="options"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static Task<IArchive> OpenAsync(
|
||||
string filePath,
|
||||
ReaderOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
return OpenAsync(new FileInfo(filePath), options, cancellationToken);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
@@ -61,6 +100,24 @@ public static class ArchiveFactory
|
||||
return FindFactory<IArchiveFactory>(fileInfo).Open(fileInfo, options);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens an Archive from a FileInfo object asynchronously.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="options"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
FileInfo fileInfo,
|
||||
ReaderOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
options ??= new ReaderOptions { LeaveStreamOpen = false };
|
||||
|
||||
var factory = FindFactory<IArchiveFactory>(fileInfo);
|
||||
return await factory.OpenAsync(fileInfo, options, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with IEnumerable FileInfo objects, multi and split support.
|
||||
/// </summary>
|
||||
@@ -87,6 +144,40 @@ public static class ArchiveFactory
|
||||
return FindFactory<IMultiArchiveFactory>(fileInfo).Open(filesArray, options);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a multi-part archive from files asynchronously.
|
||||
/// </summary>
|
||||
/// <param name="fileInfos"></param>
|
||||
/// <param name="options"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IEnumerable<FileInfo> fileInfos,
|
||||
ReaderOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
var filesArray = fileInfos.ToArray();
|
||||
if (filesArray.Length == 0)
|
||||
{
|
||||
throw new InvalidOperationException("No files to open");
|
||||
}
|
||||
|
||||
var fileInfo = filesArray[0];
|
||||
if (filesArray.Length == 1)
|
||||
{
|
||||
return await OpenAsync(fileInfo, options, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
fileInfo.NotNull(nameof(fileInfo));
|
||||
options ??= new ReaderOptions { LeaveStreamOpen = false };
|
||||
|
||||
var factory = FindFactory<IMultiArchiveFactory>(fileInfo);
|
||||
return await factory
|
||||
.OpenAsync(filesArray, options, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with IEnumerable FileInfo objects, multi and split support.
|
||||
/// </summary>
|
||||
@@ -113,6 +204,41 @@ public static class ArchiveFactory
|
||||
return FindFactory<IMultiArchiveFactory>(firstStream).Open(streamsArray, options);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a multi-part archive from streams asynchronously.
|
||||
/// </summary>
|
||||
/// <param name="streams"></param>
|
||||
/// <param name="options"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IEnumerable<Stream> streams,
|
||||
ReaderOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
streams.NotNull(nameof(streams));
|
||||
var streamsArray = streams.ToArray();
|
||||
if (streamsArray.Length == 0)
|
||||
{
|
||||
throw new InvalidOperationException("No streams");
|
||||
}
|
||||
|
||||
var firstStream = streamsArray[0];
|
||||
if (streamsArray.Length == 1)
|
||||
{
|
||||
return await OpenAsync(firstStream, options, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
firstStream.NotNull(nameof(firstStream));
|
||||
options ??= new ReaderOptions();
|
||||
|
||||
var factory = FindFactory<IMultiArchiveFactory>(firstStream);
|
||||
return await factory
|
||||
.OpenAsync(streamsArray, options, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific directory, retaining filename
|
||||
/// </summary>
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
@@ -20,11 +22,30 @@ class AutoArchiveFactory : IArchiveFactory
|
||||
int bufferSize = ReaderOptions.DefaultBufferSize
|
||||
) => throw new NotSupportedException();
|
||||
|
||||
public Task<bool> IsArchiveAsync(
|
||||
Stream stream,
|
||||
string? password = null,
|
||||
int bufferSize = ReaderOptions.DefaultBufferSize,
|
||||
CancellationToken cancellationToken = default
|
||||
) => throw new NotSupportedException();
|
||||
|
||||
public FileInfo? GetFilePart(int index, FileInfo part1) => throw new NotSupportedException();
|
||||
|
||||
public IArchive Open(Stream stream, ReaderOptions? readerOptions = null) =>
|
||||
ArchiveFactory.Open(stream, readerOptions);
|
||||
|
||||
public Task<IArchive> OpenAsync(
|
||||
Stream stream,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
) => ArchiveFactory.OpenAsync(stream, readerOptions, cancellationToken);
|
||||
|
||||
public IArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null) =>
|
||||
ArchiveFactory.Open(fileInfo, readerOptions);
|
||||
|
||||
public Task<IArchive> OpenAsync(
|
||||
FileInfo fileInfo,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
) => ArchiveFactory.OpenAsync(fileInfo, readerOptions, cancellationToken);
|
||||
}
|
||||
|
||||
@@ -102,6 +102,70 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a GZipArchive asynchronously from a stream.
|
||||
/// </summary>
|
||||
/// <param name="stream"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
Stream stream,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(stream, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a GZipArchive asynchronously from a FileInfo.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
FileInfo fileInfo,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(fileInfo, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a GZipArchive asynchronously from multiple streams.
|
||||
/// </summary>
|
||||
/// <param name="streams"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<Stream> streams,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(streams, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a GZipArchive asynchronously from multiple FileInfo objects.
|
||||
/// </summary>
|
||||
/// <param name="fileInfos"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<FileInfo> fileInfos,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(fileInfos, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public static GZipArchive Create() => new();
|
||||
|
||||
/// <summary>
|
||||
@@ -167,6 +231,28 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
return true;
|
||||
}
|
||||
|
||||
public static async Task<bool> IsGZipFileAsync(
|
||||
Stream stream,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
// read the header on the first read
|
||||
byte[] header = new byte[10];
|
||||
|
||||
// workitem 8501: handle edge case (decompress empty stream)
|
||||
if (!await stream.ReadFullyAsync(header, cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (header[0] != 0x1F || header[1] != 0x8B || header[2] != 8)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
internal GZipArchive()
|
||||
: base(ArchiveType.GZip) { }
|
||||
|
||||
|
||||
@@ -7,12 +7,6 @@ namespace SharpCompress.Archives;
|
||||
|
||||
public interface IArchive : IDisposable
|
||||
{
|
||||
event EventHandler<ArchiveExtractionEventArgs<IArchiveEntry>> EntryExtractionBegin;
|
||||
event EventHandler<ArchiveExtractionEventArgs<IArchiveEntry>> EntryExtractionEnd;
|
||||
|
||||
event EventHandler<CompressedBytesReadEventArgs> CompressedBytesRead;
|
||||
event EventHandler<FilePartExtractionBeginEventArgs> FilePartExtractionBegin;
|
||||
|
||||
IEnumerable<IArchiveEntry> Entries { get; }
|
||||
IEnumerable<IVolume> Volumes { get; }
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
@@ -8,127 +9,153 @@ namespace SharpCompress.Archives;
|
||||
|
||||
public static class IArchiveEntryExtensions
|
||||
{
|
||||
public static void WriteTo(this IArchiveEntry archiveEntry, Stream streamToWriteTo)
|
||||
private const int BufferSize = 81920;
|
||||
|
||||
/// <param name="archiveEntry">The archive entry to extract.</param>
|
||||
extension(IArchiveEntry archiveEntry)
|
||||
{
|
||||
if (archiveEntry.IsDirectory)
|
||||
/// <summary>
|
||||
/// Extract entry to the specified stream.
|
||||
/// </summary>
|
||||
/// <param name="streamToWriteTo">The stream to write the entry content to.</param>
|
||||
/// <param name="progress">Optional progress reporter for tracking extraction progress.</param>
|
||||
public void WriteTo(Stream streamToWriteTo, IProgress<ProgressReport>? progress = null)
|
||||
{
|
||||
throw new ExtractionException("Entry is a file directory and cannot be extracted.");
|
||||
if (archiveEntry.IsDirectory)
|
||||
{
|
||||
throw new ExtractionException("Entry is a file directory and cannot be extracted.");
|
||||
}
|
||||
|
||||
using var entryStream = archiveEntry.OpenEntryStream();
|
||||
var sourceStream = WrapWithProgress(entryStream, archiveEntry, progress);
|
||||
sourceStream.CopyTo(streamToWriteTo, BufferSize);
|
||||
}
|
||||
|
||||
var streamListener = (IArchiveExtractionListener)archiveEntry.Archive;
|
||||
streamListener.EnsureEntriesLoaded();
|
||||
streamListener.FireEntryExtractionBegin(archiveEntry);
|
||||
streamListener.FireFilePartExtractionBegin(
|
||||
archiveEntry.Key ?? "Key",
|
||||
archiveEntry.Size,
|
||||
archiveEntry.CompressedSize
|
||||
);
|
||||
var entryStream = archiveEntry.OpenEntryStream();
|
||||
using (entryStream)
|
||||
/// <summary>
|
||||
/// Extract entry to the specified stream asynchronously.
|
||||
/// </summary>
|
||||
/// <param name="streamToWriteTo">The stream to write the entry content to.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <param name="progress">Optional progress reporter for tracking extraction progress.</param>
|
||||
public async Task WriteToAsync(
|
||||
Stream streamToWriteTo,
|
||||
IProgress<ProgressReport>? progress = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
using Stream s = new ListeningStream(streamListener, entryStream);
|
||||
s.CopyTo(streamToWriteTo);
|
||||
if (archiveEntry.IsDirectory)
|
||||
{
|
||||
throw new ExtractionException("Entry is a file directory and cannot be extracted.");
|
||||
}
|
||||
|
||||
using var entryStream = await archiveEntry.OpenEntryStreamAsync(cancellationToken);
|
||||
var sourceStream = WrapWithProgress(entryStream, archiveEntry, progress);
|
||||
await sourceStream
|
||||
.CopyToAsync(streamToWriteTo, BufferSize, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
streamListener.FireEntryExtractionEnd(archiveEntry);
|
||||
}
|
||||
|
||||
public static async Task WriteToAsync(
|
||||
this IArchiveEntry archiveEntry,
|
||||
Stream streamToWriteTo,
|
||||
CancellationToken cancellationToken = default
|
||||
private static Stream WrapWithProgress(
|
||||
Stream source,
|
||||
IArchiveEntry entry,
|
||||
IProgress<ProgressReport>? progress
|
||||
)
|
||||
{
|
||||
if (archiveEntry.IsDirectory)
|
||||
if (progress is null)
|
||||
{
|
||||
throw new ExtractionException("Entry is a file directory and cannot be extracted.");
|
||||
return source;
|
||||
}
|
||||
|
||||
var streamListener = (IArchiveExtractionListener)archiveEntry.Archive;
|
||||
streamListener.EnsureEntriesLoaded();
|
||||
streamListener.FireEntryExtractionBegin(archiveEntry);
|
||||
streamListener.FireFilePartExtractionBegin(
|
||||
archiveEntry.Key ?? "Key",
|
||||
archiveEntry.Size,
|
||||
archiveEntry.CompressedSize
|
||||
var entryPath = entry.Key ?? string.Empty;
|
||||
var totalBytes = GetEntrySizeSafe(entry);
|
||||
return new ProgressReportingStream(
|
||||
source,
|
||||
progress,
|
||||
entryPath,
|
||||
totalBytes,
|
||||
leaveOpen: true
|
||||
);
|
||||
var entryStream = archiveEntry.OpenEntryStream();
|
||||
using (entryStream)
|
||||
{
|
||||
using Stream s = new ListeningStream(streamListener, entryStream);
|
||||
await s.CopyToAsync(streamToWriteTo, 81920, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
streamListener.FireEntryExtractionEnd(archiveEntry);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific directory, retaining filename
|
||||
/// </summary>
|
||||
public static void WriteToDirectory(
|
||||
this IArchiveEntry entry,
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToDirectory(
|
||||
entry,
|
||||
destinationDirectory,
|
||||
options,
|
||||
entry.WriteToFile
|
||||
);
|
||||
private static long? GetEntrySizeSafe(IArchiveEntry entry)
|
||||
{
|
||||
try
|
||||
{
|
||||
var size = entry.Size;
|
||||
return size >= 0 ? size : null;
|
||||
}
|
||||
catch (NotImplementedException)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific directory asynchronously, retaining filename
|
||||
/// </summary>
|
||||
public static Task WriteToDirectoryAsync(
|
||||
this IArchiveEntry entry,
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToDirectoryAsync(
|
||||
entry,
|
||||
destinationDirectory,
|
||||
options,
|
||||
(x, opt) => entry.WriteToFileAsync(x, opt, cancellationToken),
|
||||
cancellationToken
|
||||
);
|
||||
extension(IArchiveEntry entry)
|
||||
{
|
||||
/// <summary>
|
||||
/// Extract to specific directory, retaining filename
|
||||
/// </summary>
|
||||
public void WriteToDirectory(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToDirectory(
|
||||
entry,
|
||||
destinationDirectory,
|
||||
options,
|
||||
entry.WriteToFile
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific file
|
||||
/// </summary>
|
||||
public static void WriteToFile(
|
||||
this IArchiveEntry entry,
|
||||
string destinationFileName,
|
||||
ExtractionOptions? options = null
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToFile(
|
||||
entry,
|
||||
destinationFileName,
|
||||
options,
|
||||
(x, fm) =>
|
||||
{
|
||||
using var fs = File.Open(destinationFileName, fm);
|
||||
entry.WriteTo(fs);
|
||||
}
|
||||
);
|
||||
/// <summary>
|
||||
/// Extract to specific directory asynchronously, retaining filename
|
||||
/// </summary>
|
||||
public Task WriteToDirectoryAsync(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToDirectoryAsync(
|
||||
entry,
|
||||
destinationDirectory,
|
||||
options,
|
||||
entry.WriteToFileAsync,
|
||||
cancellationToken
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific file asynchronously
|
||||
/// </summary>
|
||||
public static Task WriteToFileAsync(
|
||||
this IArchiveEntry entry,
|
||||
string destinationFileName,
|
||||
ExtractionOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToFileAsync(
|
||||
entry,
|
||||
destinationFileName,
|
||||
options,
|
||||
async (x, fm) =>
|
||||
{
|
||||
using var fs = File.Open(destinationFileName, fm);
|
||||
await entry.WriteToAsync(fs, cancellationToken).ConfigureAwait(false);
|
||||
},
|
||||
cancellationToken
|
||||
);
|
||||
/// <summary>
|
||||
/// Extract to specific file
|
||||
/// </summary>
|
||||
public void WriteToFile(string destinationFileName, ExtractionOptions? options = null) =>
|
||||
ExtractionMethods.WriteEntryToFile(
|
||||
entry,
|
||||
destinationFileName,
|
||||
options,
|
||||
(x, fm) =>
|
||||
{
|
||||
using var fs = File.Open(destinationFileName, fm);
|
||||
entry.WriteTo(fs);
|
||||
}
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific file asynchronously
|
||||
/// </summary>
|
||||
public Task WriteToFileAsync(
|
||||
string destinationFileName,
|
||||
ExtractionOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToFileAsync(
|
||||
entry,
|
||||
destinationFileName,
|
||||
options,
|
||||
async (x, fm, ct) =>
|
||||
{
|
||||
using var fs = File.Open(destinationFileName, fm);
|
||||
await entry.WriteToAsync(fs, null, ct).ConfigureAwait(false);
|
||||
},
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
@@ -10,76 +10,159 @@ namespace SharpCompress.Archives;
|
||||
|
||||
public static class IArchiveExtensions
|
||||
{
|
||||
/// <summary>
|
||||
/// Extract to specific directory, retaining filename
|
||||
/// </summary>
|
||||
public static void WriteToDirectory(
|
||||
this IArchive archive,
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null
|
||||
)
|
||||
{
|
||||
using var reader = archive.ExtractAllEntries();
|
||||
reader.WriteAllToDirectory(destinationDirectory, options);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Extracts the archive to the destination directory. Directories will be created as needed.
|
||||
/// </summary>
|
||||
/// <param name="archive">The archive to extract.</param>
|
||||
/// <param name="destination">The folder to extract into.</param>
|
||||
/// <param name="progressReport">Optional progress report callback.</param>
|
||||
/// <param name="cancellationToken">Optional cancellation token.</param>
|
||||
public static void ExtractToDirectory(
|
||||
this IArchive archive,
|
||||
string destination,
|
||||
Action<double>? progressReport = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
extension(IArchive archive)
|
||||
{
|
||||
// Prepare for progress reporting
|
||||
var totalBytes = archive.TotalUncompressSize;
|
||||
var bytesRead = 0L;
|
||||
|
||||
// Tracking for created directories.
|
||||
var seenDirectories = new HashSet<string>();
|
||||
|
||||
// Extract
|
||||
foreach (var entry in archive.Entries)
|
||||
/// <summary>
|
||||
/// Extract to specific directory with progress reporting
|
||||
/// </summary>
|
||||
/// <param name="destinationDirectory">The folder to extract into.</param>
|
||||
/// <param name="options">Extraction options.</param>
|
||||
/// <param name="progress">Optional progress reporter for tracking extraction progress.</param>
|
||||
public void WriteToDirectory(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null,
|
||||
IProgress<ProgressReport>? progress = null
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
if (entry.IsDirectory)
|
||||
// For solid archives (Rar, 7Zip), use the optimized reader-based approach
|
||||
if (archive.IsSolid || archive.Type == ArchiveType.SevenZip)
|
||||
{
|
||||
var dirPath = Path.Combine(destination, entry.Key.NotNull("Entry Key is null"));
|
||||
if (
|
||||
Path.GetDirectoryName(dirPath + "/") is { } emptyDirectory
|
||||
&& seenDirectories.Add(dirPath)
|
||||
)
|
||||
{
|
||||
Directory.CreateDirectory(emptyDirectory);
|
||||
}
|
||||
continue;
|
||||
using var reader = archive.ExtractAllEntries();
|
||||
reader.WriteAllToDirectory(destinationDirectory, options);
|
||||
}
|
||||
|
||||
// Create each directory if not already created
|
||||
var path = Path.Combine(destination, entry.Key.NotNull("Entry Key is null"));
|
||||
if (Path.GetDirectoryName(path) is { } directory)
|
||||
else
|
||||
{
|
||||
if (!Directory.Exists(directory) && !seenDirectories.Contains(directory))
|
||||
{
|
||||
Directory.CreateDirectory(directory);
|
||||
seenDirectories.Add(directory);
|
||||
}
|
||||
// For non-solid archives, extract entries directly
|
||||
archive.WriteToDirectoryInternal(destinationDirectory, options, progress);
|
||||
}
|
||||
}
|
||||
|
||||
// Write file
|
||||
using var fs = File.OpenWrite(path);
|
||||
entry.WriteTo(fs);
|
||||
private void WriteToDirectoryInternal(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options,
|
||||
IProgress<ProgressReport>? progress
|
||||
)
|
||||
{
|
||||
// Prepare for progress reporting
|
||||
var totalBytes = archive.TotalUncompressSize;
|
||||
var bytesRead = 0L;
|
||||
|
||||
// Update progress
|
||||
bytesRead += entry.Size;
|
||||
progressReport?.Invoke(bytesRead / (double)totalBytes);
|
||||
// Tracking for created directories.
|
||||
var seenDirectories = new HashSet<string>();
|
||||
|
||||
// Extract
|
||||
foreach (var entry in archive.Entries)
|
||||
{
|
||||
if (entry.IsDirectory)
|
||||
{
|
||||
var dirPath = Path.Combine(
|
||||
destinationDirectory,
|
||||
entry.Key.NotNull("Entry Key is null")
|
||||
);
|
||||
if (
|
||||
Path.GetDirectoryName(dirPath + "/") is { } parentDirectory
|
||||
&& seenDirectories.Add(dirPath)
|
||||
)
|
||||
{
|
||||
Directory.CreateDirectory(parentDirectory);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Use the entry's WriteToDirectory method which respects ExtractionOptions
|
||||
entry.WriteToDirectory(destinationDirectory, options);
|
||||
|
||||
// Update progress
|
||||
bytesRead += entry.Size;
|
||||
progress?.Report(
|
||||
new ProgressReport(entry.Key ?? string.Empty, bytesRead, totalBytes)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific directory asynchronously with progress reporting and cancellation support
|
||||
/// </summary>
|
||||
/// <param name="destinationDirectory">The folder to extract into.</param>
|
||||
/// <param name="options">Extraction options.</param>
|
||||
/// <param name="progress">Optional progress reporter for tracking extraction progress.</param>
|
||||
/// <param name="cancellationToken">Optional cancellation token.</param>
|
||||
public async Task WriteToDirectoryAsync(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null,
|
||||
IProgress<ProgressReport>? progress = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
// For solid archives (Rar, 7Zip), use the optimized reader-based approach
|
||||
if (archive.IsSolid || archive.Type == ArchiveType.SevenZip)
|
||||
{
|
||||
using var reader = archive.ExtractAllEntries();
|
||||
await reader.WriteAllToDirectoryAsync(
|
||||
destinationDirectory,
|
||||
options,
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
// For non-solid archives, extract entries directly
|
||||
await archive.WriteToDirectoryAsyncInternal(
|
||||
destinationDirectory,
|
||||
options,
|
||||
progress,
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task WriteToDirectoryAsyncInternal(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options,
|
||||
IProgress<ProgressReport>? progress,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
// Prepare for progress reporting
|
||||
var totalBytes = archive.TotalUncompressSize;
|
||||
var bytesRead = 0L;
|
||||
|
||||
// Tracking for created directories.
|
||||
var seenDirectories = new HashSet<string>();
|
||||
|
||||
// Extract
|
||||
foreach (var entry in archive.Entries)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
if (entry.IsDirectory)
|
||||
{
|
||||
var dirPath = Path.Combine(
|
||||
destinationDirectory,
|
||||
entry.Key.NotNull("Entry Key is null")
|
||||
);
|
||||
if (
|
||||
Path.GetDirectoryName(dirPath + "/") is { } parentDirectory
|
||||
&& seenDirectories.Add(dirPath)
|
||||
)
|
||||
{
|
||||
Directory.CreateDirectory(parentDirectory);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Use the entry's WriteToDirectoryAsync method which respects ExtractionOptions
|
||||
await entry
|
||||
.WriteToDirectoryAsync(destinationDirectory, options, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
// Update progress
|
||||
bytesRead += entry.Size;
|
||||
progress?.Report(
|
||||
new ProgressReport(entry.Key ?? string.Empty, bytesRead, totalBytes)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
using SharpCompress.Common;
|
||||
|
||||
namespace SharpCompress.Archives;
|
||||
|
||||
internal interface IArchiveExtractionListener : IExtractionListener
|
||||
{
|
||||
void EnsureEntriesLoaded();
|
||||
void FireEntryExtractionBegin(IArchiveEntry entry);
|
||||
void FireEntryExtractionEnd(IArchiveEntry entry);
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Factories;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
@@ -26,10 +28,34 @@ public interface IArchiveFactory : IFactory
|
||||
/// <param name="readerOptions">reading options.</param>
|
||||
IArchive Open(Stream stream, ReaderOptions? readerOptions = null);
|
||||
|
||||
/// <summary>
|
||||
/// Opens an Archive for random access asynchronously.
|
||||
/// </summary>
|
||||
/// <param name="stream">An open, readable and seekable stream.</param>
|
||||
/// <param name="readerOptions">reading options.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task<IArchive> OpenAsync(
|
||||
Stream stream,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo">the file to open.</param>
|
||||
/// <param name="readerOptions">reading options.</param>
|
||||
IArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null);
|
||||
|
||||
/// <summary>
|
||||
/// Opens an Archive from a FileInfo object asynchronously.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo">the file to open.</param>
|
||||
/// <param name="readerOptions">reading options.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task<IArchive> OpenAsync(
|
||||
FileInfo fileInfo,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Factories;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
@@ -27,10 +29,34 @@ public interface IMultiArchiveFactory : IFactory
|
||||
/// <param name="readerOptions">reading options.</param>
|
||||
IArchive Open(IReadOnlyList<Stream> streams, ReaderOptions? readerOptions = null);
|
||||
|
||||
/// <summary>
|
||||
/// Opens a multi-part archive from streams asynchronously.
|
||||
/// </summary>
|
||||
/// <param name="streams"></param>
|
||||
/// <param name="readerOptions">reading options.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<Stream> streams,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with IEnumerable Stream objects, multi and split support.
|
||||
/// </summary>
|
||||
/// <param name="fileInfos"></param>
|
||||
/// <param name="readerOptions">reading options.</param>
|
||||
IArchive Open(IReadOnlyList<FileInfo> fileInfos, ReaderOptions? readerOptions = null);
|
||||
|
||||
/// <summary>
|
||||
/// Opens a multi-part archive from files asynchronously.
|
||||
/// </summary>
|
||||
/// <param name="fileInfos"></param>
|
||||
/// <param name="readerOptions">reading options.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<FileInfo> fileInfos,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
);
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Rar;
|
||||
using SharpCompress.Common.Rar.Headers;
|
||||
@@ -181,6 +183,70 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a RarArchive asynchronously from a stream.
|
||||
/// </summary>
|
||||
/// <param name="stream"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
Stream stream,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(stream, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a RarArchive asynchronously from a FileInfo.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
FileInfo fileInfo,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(fileInfo, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a RarArchive asynchronously from multiple streams.
|
||||
/// </summary>
|
||||
/// <param name="streams"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<Stream> streams,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(streams, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a RarArchive asynchronously from multiple FileInfo objects.
|
||||
/// </summary>
|
||||
/// <param name="fileInfos"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<FileInfo> fileInfos,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(fileInfos, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public static bool IsRarFile(string filePath) => IsRarFile(new FileInfo(filePath));
|
||||
|
||||
public static bool IsRarFile(FileInfo fileInfo)
|
||||
|
||||
@@ -76,7 +76,7 @@ public class RarArchiveEntry : RarEntry, IArchiveEntry
|
||||
stream = new RarStream(
|
||||
archive.UnpackV1.Value,
|
||||
FileHeader,
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>(), archive)
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>())
|
||||
);
|
||||
}
|
||||
else
|
||||
@@ -84,7 +84,7 @@ public class RarArchiveEntry : RarEntry, IArchiveEntry
|
||||
stream = new RarStream(
|
||||
archive.UnpackV2017.Value,
|
||||
FileHeader,
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>(), archive)
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>())
|
||||
);
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ public class RarArchiveEntry : RarEntry, IArchiveEntry
|
||||
stream = new RarStream(
|
||||
archive.UnpackV1.Value,
|
||||
FileHeader,
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>(), archive)
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>())
|
||||
);
|
||||
}
|
||||
else
|
||||
@@ -108,7 +108,7 @@ public class RarArchiveEntry : RarEntry, IArchiveEntry
|
||||
stream = new RarStream(
|
||||
archive.UnpackV2017.Value,
|
||||
FileHeader,
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>(), archive)
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>())
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.SevenZip;
|
||||
using SharpCompress.Compressors.LZMA.Utilites;
|
||||
@@ -103,6 +105,70 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a SevenZipArchive asynchronously from a stream.
|
||||
/// </summary>
|
||||
/// <param name="stream"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
Stream stream,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(stream, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a SevenZipArchive asynchronously from a FileInfo.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
FileInfo fileInfo,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(fileInfo, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a SevenZipArchive asynchronously from multiple streams.
|
||||
/// </summary>
|
||||
/// <param name="streams"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<Stream> streams,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(streams, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a SevenZipArchive asynchronously from multiple FileInfo objects.
|
||||
/// </summary>
|
||||
/// <param name="fileInfos"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<FileInfo> fileInfos,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(fileInfos, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with a SourceStream able to handle FileInfo and Streams.
|
||||
/// </summary>
|
||||
@@ -213,9 +279,7 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
private sealed class SevenZipReader : AbstractReader<SevenZipEntry, SevenZipVolume>
|
||||
{
|
||||
private readonly SevenZipArchive _archive;
|
||||
private CFolder? _currentFolder;
|
||||
private Stream? _currentStream;
|
||||
private CFileItem? _currentItem;
|
||||
private SevenZipEntry? _currentEntry;
|
||||
|
||||
internal SevenZipReader(ReaderOptions readerOptions, SevenZipArchive archive)
|
||||
: base(readerOptions, ArchiveType.SevenZip) => this._archive = archive;
|
||||
@@ -228,40 +292,135 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
stream.Position = 0;
|
||||
foreach (var dir in entries.Where(x => x.IsDirectory))
|
||||
{
|
||||
_currentEntry = dir;
|
||||
yield return dir;
|
||||
}
|
||||
foreach (
|
||||
var group in entries.Where(x => !x.IsDirectory).GroupBy(x => x.FilePart.Folder)
|
||||
)
|
||||
// For non-directory entries, yield them without creating shared streams
|
||||
// Each call to GetEntryStream() will create a fresh decompression stream
|
||||
// to avoid state corruption issues with async operations
|
||||
foreach (var entry in entries.Where(x => !x.IsDirectory))
|
||||
{
|
||||
_currentFolder = group.Key;
|
||||
if (group.Key is null)
|
||||
{
|
||||
_currentStream = Stream.Null;
|
||||
}
|
||||
else
|
||||
{
|
||||
_currentStream = _archive._database?.GetFolderStream(
|
||||
stream,
|
||||
_currentFolder,
|
||||
new PasswordProvider(Options.Password)
|
||||
);
|
||||
}
|
||||
foreach (var entry in group)
|
||||
{
|
||||
_currentItem = entry.FilePart.Header;
|
||||
yield return entry;
|
||||
}
|
||||
_currentEntry = entry;
|
||||
yield return entry;
|
||||
}
|
||||
}
|
||||
|
||||
protected override EntryStream GetEntryStream() =>
|
||||
CreateEntryStream(
|
||||
new ReadOnlySubStream(
|
||||
_currentStream.NotNull("currentStream is not null"),
|
||||
_currentItem?.Size ?? 0
|
||||
)
|
||||
);
|
||||
protected override EntryStream GetEntryStream()
|
||||
{
|
||||
// Create a fresh decompression stream for each file (no state sharing).
|
||||
// However, the LZMA decoder has bugs in its async implementation that cause
|
||||
// state corruption even on fresh streams. The SyncOnlyStream wrapper
|
||||
// works around these bugs by forcing async operations to use sync equivalents.
|
||||
//
|
||||
// TODO: Fix the LZMA decoder async bugs (in LzmaStream, Decoder, OutWindow)
|
||||
// so this wrapper is no longer necessary.
|
||||
var entry = _currentEntry.NotNull("currentEntry is not null");
|
||||
if (entry.IsDirectory)
|
||||
{
|
||||
return CreateEntryStream(Stream.Null);
|
||||
}
|
||||
return CreateEntryStream(new SyncOnlyStream(entry.FilePart.GetCompressedStream()));
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// WORKAROUND: Forces async operations to use synchronous equivalents.
|
||||
/// This is necessary because the LZMA decoder has bugs in its async implementation
|
||||
/// that cause state corruption (IndexOutOfRangeException, DataErrorException).
|
||||
///
|
||||
/// The proper fix would be to repair the LZMA decoder's async methods
|
||||
/// (LzmaStream.ReadAsync, Decoder.CodeAsync, OutWindow async operations),
|
||||
/// but that requires deep changes to the decoder state machine.
|
||||
/// </summary>
|
||||
private sealed class SyncOnlyStream : Stream
|
||||
{
|
||||
private readonly Stream _baseStream;
|
||||
|
||||
public SyncOnlyStream(Stream baseStream) => _baseStream = baseStream;
|
||||
|
||||
public override bool CanRead => _baseStream.CanRead;
|
||||
public override bool CanSeek => _baseStream.CanSeek;
|
||||
public override bool CanWrite => _baseStream.CanWrite;
|
||||
public override long Length => _baseStream.Length;
|
||||
public override long Position
|
||||
{
|
||||
get => _baseStream.Position;
|
||||
set => _baseStream.Position = value;
|
||||
}
|
||||
|
||||
public override void Flush() => _baseStream.Flush();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) =>
|
||||
_baseStream.Read(buffer, offset, count);
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
_baseStream.Seek(offset, origin);
|
||||
|
||||
public override void SetLength(long value) => _baseStream.SetLength(value);
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
_baseStream.Write(buffer, offset, count);
|
||||
|
||||
// Force async operations to use sync equivalents to avoid LZMA decoder bugs
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return Task.FromResult(_baseStream.Read(buffer, offset, count));
|
||||
}
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
_baseStream.Write(buffer, offset, count);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public override Task FlushAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
_baseStream.Flush();
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return new ValueTask<int>(_baseStream.Read(buffer.Span));
|
||||
}
|
||||
|
||||
public override ValueTask WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
_baseStream.Write(buffer.Span);
|
||||
return ValueTask.CompletedTask;
|
||||
}
|
||||
#endif
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (disposing)
|
||||
{
|
||||
_baseStream.Dispose();
|
||||
}
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
}
|
||||
|
||||
private class PasswordProvider : IPasswordProvider
|
||||
|
||||
@@ -103,6 +103,70 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a TarArchive asynchronously from a stream.
|
||||
/// </summary>
|
||||
/// <param name="stream"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
Stream stream,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(stream, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a TarArchive asynchronously from a FileInfo.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
FileInfo fileInfo,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(fileInfo, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a TarArchive asynchronously from multiple streams.
|
||||
/// </summary>
|
||||
/// <param name="streams"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<Stream> streams,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(streams, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a TarArchive asynchronously from multiple FileInfo objects.
|
||||
/// </summary>
|
||||
/// <param name="fileInfos"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<FileInfo> fileInfos,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(fileInfos, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public static bool IsTarFile(string filePath) => IsTarFile(new FileInfo(filePath));
|
||||
|
||||
public static bool IsTarFile(FileInfo fileInfo)
|
||||
|
||||
@@ -124,6 +124,70 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a ZipArchive asynchronously from a stream.
|
||||
/// </summary>
|
||||
/// <param name="stream"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
Stream stream,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(stream, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a ZipArchive asynchronously from a FileInfo.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
FileInfo fileInfo,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(fileInfo, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a ZipArchive asynchronously from multiple streams.
|
||||
/// </summary>
|
||||
/// <param name="streams"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<Stream> streams,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(streams, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a ZipArchive asynchronously from multiple FileInfo objects.
|
||||
/// </summary>
|
||||
/// <param name="fileInfos"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public static async Task<IArchive> OpenAsync(
|
||||
IReadOnlyList<FileInfo> fileInfos,
|
||||
ReaderOptions? readerOptions = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return await Task.FromResult(Open(fileInfos, readerOptions)).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public static bool IsZipFile(
|
||||
string filePath,
|
||||
string? password = null,
|
||||
@@ -199,7 +263,93 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
if (stream.CanSeek) //could be multipart. Test for central directory - might not be z64 safe
|
||||
{
|
||||
var z = new SeekableZipHeaderFactory(password, new ArchiveEncoding());
|
||||
var x = z.ReadSeekableHeader(stream).FirstOrDefault();
|
||||
var x = z.ReadSeekableHeader(stream, useSync: true).FirstOrDefault();
|
||||
return x?.ZipHeaderType == ZipHeaderType.DirectoryEntry;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return Enum.IsDefined(typeof(ZipHeaderType), header.ZipHeaderType);
|
||||
}
|
||||
catch (CryptographicException)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
catch
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public static async Task<bool> IsZipFileAsync(
|
||||
Stream stream,
|
||||
string? password = null,
|
||||
int bufferSize = ReaderOptions.DefaultBufferSize,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
var headerFactory = new StreamingZipHeaderFactory(password, new ArchiveEncoding(), null);
|
||||
try
|
||||
{
|
||||
if (stream is not SharpCompressStream)
|
||||
{
|
||||
stream = new SharpCompressStream(stream, bufferSize: bufferSize);
|
||||
}
|
||||
|
||||
var header = headerFactory
|
||||
.ReadStreamHeader(stream)
|
||||
.FirstOrDefault(x => x.ZipHeaderType != ZipHeaderType.Split);
|
||||
if (header is null)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return Enum.IsDefined(typeof(ZipHeaderType), header.ZipHeaderType);
|
||||
}
|
||||
catch (CryptographicException)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
catch
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public static async Task<bool> IsZipMultiAsync(
|
||||
Stream stream,
|
||||
string? password = null,
|
||||
int bufferSize = ReaderOptions.DefaultBufferSize,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
var headerFactory = new StreamingZipHeaderFactory(password, new ArchiveEncoding(), null);
|
||||
try
|
||||
{
|
||||
if (stream is not SharpCompressStream)
|
||||
{
|
||||
stream = new SharpCompressStream(stream, bufferSize: bufferSize);
|
||||
}
|
||||
|
||||
var header = headerFactory
|
||||
.ReadStreamHeader(stream)
|
||||
.FirstOrDefault(x => x.ZipHeaderType != ZipHeaderType.Split);
|
||||
if (header is null)
|
||||
{
|
||||
if (stream.CanSeek) //could be multipart. Test for central directory - might not be z64 safe
|
||||
{
|
||||
var z = new SeekableZipHeaderFactory(password, new ArchiveEncoding());
|
||||
ZipHeader? x = null;
|
||||
await foreach (
|
||||
var h in z.ReadSeekableHeader(stream).WithCancellation(cancellationToken)
|
||||
)
|
||||
{
|
||||
x = h;
|
||||
break;
|
||||
}
|
||||
return x?.ZipHeaderType == ZipHeaderType.DirectoryEntry;
|
||||
}
|
||||
else
|
||||
@@ -254,7 +404,9 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
protected override IEnumerable<ZipArchiveEntry> LoadEntries(IEnumerable<ZipVolume> volumes)
|
||||
{
|
||||
var vols = volumes.ToArray();
|
||||
foreach (var h in headerFactory.NotNull().ReadSeekableHeader(vols.Last().Stream))
|
||||
foreach (
|
||||
var h in headerFactory.NotNull().ReadSeekableHeader(vols.Last().Stream, useSync: true)
|
||||
)
|
||||
{
|
||||
if (h != null)
|
||||
{
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
[assembly: CLSCompliant(true)]
|
||||
// CLSCompliant(false) is required because ZStandard integration uses unsafe code
|
||||
[assembly: CLSCompliant(false)]
|
||||
[assembly: InternalsVisibleTo(
|
||||
"SharpCompress.Test,PublicKey=0024000004800000940000000602000000240000525341310004000001000100158bebf1433f76dffc356733c138babea7a47536c65ed8009b16372c6f4edbb20554db74a62687f56b97c20a6ce8c4b123280279e33c894e7b3aa93ab3c573656fde4db576cfe07dba09619ead26375b25d2c4a8e43f7be257d712b0dd2eb546f67adb09281338618a58ac834fc038dd7e2740a7ab3591826252e4f4516306dc"
|
||||
)]
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
public class ArchiveExtractionEventArgs<T> : EventArgs
|
||||
{
|
||||
internal ArchiveExtractionEventArgs(T entry) => Item = entry;
|
||||
|
||||
public T Item { get; }
|
||||
}
|
||||
117
src/SharpCompress/Common/AsyncBinaryReader.cs
Normal file
117
src/SharpCompress/Common/AsyncBinaryReader.cs
Normal file
@@ -0,0 +1,117 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common
|
||||
{
|
||||
public sealed class AsyncBinaryReader : IDisposable
|
||||
{
|
||||
private readonly Stream _stream;
|
||||
private readonly Stream _originalStream;
|
||||
private readonly bool _leaveOpen;
|
||||
private readonly byte[] _buffer = new byte[8];
|
||||
private bool _disposed;
|
||||
|
||||
public AsyncBinaryReader(Stream stream, bool leaveOpen = false, int bufferSize = 4096)
|
||||
{
|
||||
_originalStream = stream ?? throw new ArgumentNullException(nameof(stream));
|
||||
_leaveOpen = leaveOpen;
|
||||
|
||||
// Use the stream directly without wrapping in BufferedStream
|
||||
// BufferedStream uses synchronous Read internally which doesn't work with async-only streams
|
||||
// SharpCompress uses SharpCompressStream for buffering which supports true async reads
|
||||
_stream = stream;
|
||||
}
|
||||
|
||||
public Stream BaseStream => _stream;
|
||||
|
||||
public async ValueTask<byte> ReadByteAsync(CancellationToken ct = default)
|
||||
{
|
||||
await ReadExactAsync(_buffer, 0, 1, ct).ConfigureAwait(false);
|
||||
return _buffer[0];
|
||||
}
|
||||
|
||||
public async ValueTask<ushort> ReadUInt16Async(CancellationToken ct = default)
|
||||
{
|
||||
await ReadExactAsync(_buffer, 0, 2, ct).ConfigureAwait(false);
|
||||
return BinaryPrimitives.ReadUInt16LittleEndian(_buffer);
|
||||
}
|
||||
|
||||
public async ValueTask<uint> ReadUInt32Async(CancellationToken ct = default)
|
||||
{
|
||||
await ReadExactAsync(_buffer, 0, 4, ct).ConfigureAwait(false);
|
||||
return BinaryPrimitives.ReadUInt32LittleEndian(_buffer);
|
||||
}
|
||||
|
||||
public async ValueTask<ulong> ReadUInt64Async(CancellationToken ct = default)
|
||||
{
|
||||
await ReadExactAsync(_buffer, 0, 8, ct).ConfigureAwait(false);
|
||||
return BinaryPrimitives.ReadUInt64LittleEndian(_buffer);
|
||||
}
|
||||
|
||||
public async ValueTask<byte[]> ReadBytesAsync(int count, CancellationToken ct = default)
|
||||
{
|
||||
var result = new byte[count];
|
||||
await ReadExactAsync(result, 0, count, ct).ConfigureAwait(false);
|
||||
return result;
|
||||
}
|
||||
|
||||
private async ValueTask ReadExactAsync(
|
||||
byte[] destination,
|
||||
int offset,
|
||||
int length,
|
||||
CancellationToken ct
|
||||
)
|
||||
{
|
||||
var read = 0;
|
||||
while (read < length)
|
||||
{
|
||||
var n = await _stream
|
||||
.ReadAsync(destination, offset + read, length - read, ct)
|
||||
.ConfigureAwait(false);
|
||||
if (n == 0)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
|
||||
read += n;
|
||||
}
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
_disposed = true;
|
||||
|
||||
// Dispose the original stream if we own it
|
||||
if (!_leaveOpen)
|
||||
{
|
||||
_originalStream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
#if NET6_0_OR_GREATER
|
||||
public async ValueTask DisposeAsync()
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
_disposed = true;
|
||||
|
||||
// Dispose the original stream if we own it
|
||||
if (!_leaveOpen)
|
||||
{
|
||||
await _originalStream.DisposeAsync().ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
public sealed class CompressedBytesReadEventArgs : EventArgs
|
||||
{
|
||||
public CompressedBytesReadEventArgs(
|
||||
long compressedBytesRead,
|
||||
long currentFilePartCompressedBytesRead
|
||||
)
|
||||
{
|
||||
CompressedBytesRead = compressedBytesRead;
|
||||
CurrentFilePartCompressedBytesRead = currentFilePartCompressedBytesRead;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Compressed bytes read for the current entry
|
||||
/// </summary>
|
||||
public long CompressedBytesRead { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Current file part read for Multipart files (e.g. Rar)
|
||||
/// </summary>
|
||||
public long CurrentFilePartCompressedBytesRead { get; }
|
||||
}
|
||||
@@ -128,7 +128,7 @@ internal static class ExtractionMethods
|
||||
IEntry entry,
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options,
|
||||
Func<string, ExtractionOptions?, Task> writeAsync,
|
||||
Func<string, ExtractionOptions?, CancellationToken, Task> writeAsync,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
@@ -189,7 +189,7 @@ internal static class ExtractionMethods
|
||||
"Entry is trying to write a file outside of the destination directory."
|
||||
);
|
||||
}
|
||||
await writeAsync(destinationFileName, options).ConfigureAwait(false);
|
||||
await writeAsync(destinationFileName, options, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
else if (options.ExtractFullPath && !Directory.Exists(destinationFileName))
|
||||
{
|
||||
@@ -201,7 +201,7 @@ internal static class ExtractionMethods
|
||||
IEntry entry,
|
||||
string destinationFileName,
|
||||
ExtractionOptions? options,
|
||||
Func<string, FileMode, Task> openAndWriteAsync,
|
||||
Func<string, FileMode, CancellationToken, Task> openAndWriteAsync,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
@@ -225,7 +225,8 @@ internal static class ExtractionMethods
|
||||
fm = FileMode.CreateNew;
|
||||
}
|
||||
|
||||
await openAndWriteAsync(destinationFileName, fm).ConfigureAwait(false);
|
||||
await openAndWriteAsync(destinationFileName, fm, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
entry.PreserveExtractionOptions(destinationFileName, options);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
@@ -14,4 +16,8 @@ public abstract class FilePart
|
||||
internal abstract Stream? GetCompressedStream();
|
||||
internal abstract Stream? GetRawStream();
|
||||
internal bool Skipped { get; set; }
|
||||
|
||||
internal virtual Task<Stream?> GetCompressedStreamAsync(
|
||||
CancellationToken cancellationToken = default
|
||||
) => Task.FromResult(GetCompressedStream());
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
public sealed class FilePartExtractionBeginEventArgs : EventArgs
|
||||
{
|
||||
public FilePartExtractionBeginEventArgs(string name, long size, long compressedSize)
|
||||
{
|
||||
Name = name;
|
||||
Size = size;
|
||||
CompressedSize = compressedSize;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// File name for the part for the current entry
|
||||
/// </summary>
|
||||
public string Name { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Uncompressed size of the current entry in the part
|
||||
/// </summary>
|
||||
public long Size { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Compressed size of the current entry in the part
|
||||
/// </summary>
|
||||
public long CompressedSize { get; }
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
public interface IExtractionListener
|
||||
{
|
||||
void FireFilePartExtractionBegin(string name, long size, long compressedSize);
|
||||
void FireCompressedBytesRead(long currentPartCompressedBytes, long compressedReadBytes);
|
||||
}
|
||||
43
src/SharpCompress/Common/ProgressReport.cs
Normal file
43
src/SharpCompress/Common/ProgressReport.cs
Normal file
@@ -0,0 +1,43 @@
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
/// <summary>
|
||||
/// Represents progress information for compression or extraction operations.
|
||||
/// </summary>
|
||||
public sealed class ProgressReport
|
||||
{
|
||||
/// <summary>
|
||||
/// Initializes a new instance of the <see cref="ProgressReport"/> class.
|
||||
/// </summary>
|
||||
/// <param name="entryPath">The path of the entry being processed.</param>
|
||||
/// <param name="bytesTransferred">Number of bytes transferred so far.</param>
|
||||
/// <param name="totalBytes">Total bytes to be transferred, or null if unknown.</param>
|
||||
public ProgressReport(string entryPath, long bytesTransferred, long? totalBytes)
|
||||
{
|
||||
EntryPath = entryPath;
|
||||
BytesTransferred = bytesTransferred;
|
||||
TotalBytes = totalBytes;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Gets the path of the entry being processed.
|
||||
/// </summary>
|
||||
public string EntryPath { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Gets the number of bytes transferred so far.
|
||||
/// </summary>
|
||||
public long BytesTransferred { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Gets the total number of bytes to be transferred, or null if unknown.
|
||||
/// </summary>
|
||||
public long? TotalBytes { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Gets the progress percentage (0-100), or null if total bytes is unknown.
|
||||
/// </summary>
|
||||
public double? PercentComplete =>
|
||||
TotalBytes.HasValue && TotalBytes.Value > 0
|
||||
? (double)BytesTransferred / TotalBytes.Value * 100
|
||||
: null;
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
using System;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
public sealed class ReaderExtractionEventArgs<T> : EventArgs
|
||||
{
|
||||
internal ReaderExtractionEventArgs(T entry, ReaderProgress? readerProgress = null)
|
||||
{
|
||||
Item = entry;
|
||||
ReaderProgress = readerProgress;
|
||||
}
|
||||
|
||||
public T Item { get; }
|
||||
|
||||
public ReaderProgress? ReaderProgress { get; }
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
@@ -9,8 +10,16 @@ internal sealed class TarHeader
|
||||
{
|
||||
internal static readonly DateTime EPOCH = new(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
public TarHeader(ArchiveEncoding archiveEncoding) => ArchiveEncoding = archiveEncoding;
|
||||
public TarHeader(
|
||||
ArchiveEncoding archiveEncoding,
|
||||
TarHeaderWriteFormat writeFormat = TarHeaderWriteFormat.GNU_TAR_LONG_LINK
|
||||
)
|
||||
{
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
WriteFormat = writeFormat;
|
||||
}
|
||||
|
||||
internal TarHeaderWriteFormat WriteFormat { get; set; }
|
||||
internal string? Name { get; set; }
|
||||
internal string? LinkName { get; set; }
|
||||
|
||||
@@ -30,6 +39,114 @@ internal sealed class TarHeader
|
||||
private const int MAX_LONG_NAME_SIZE = 32768;
|
||||
|
||||
internal void Write(Stream output)
|
||||
{
|
||||
switch (WriteFormat)
|
||||
{
|
||||
case TarHeaderWriteFormat.GNU_TAR_LONG_LINK:
|
||||
WriteGnuTarLongLink(output);
|
||||
break;
|
||||
case TarHeaderWriteFormat.USTAR:
|
||||
WriteUstar(output);
|
||||
break;
|
||||
default:
|
||||
throw new Exception("This should be impossible...");
|
||||
}
|
||||
}
|
||||
|
||||
internal void WriteUstar(Stream output)
|
||||
{
|
||||
var buffer = new byte[BLOCK_SIZE];
|
||||
|
||||
WriteOctalBytes(511, buffer, 100, 8); // file mode
|
||||
WriteOctalBytes(0, buffer, 108, 8); // owner ID
|
||||
WriteOctalBytes(0, buffer, 116, 8); // group ID
|
||||
|
||||
//ArchiveEncoding.UTF8.GetBytes("magic").CopyTo(buffer, 257);
|
||||
var nameByteCount = ArchiveEncoding
|
||||
.GetEncoding()
|
||||
.GetByteCount(Name.NotNull("Name is null"));
|
||||
|
||||
if (nameByteCount > 100)
|
||||
{
|
||||
// if name is longer, try to split it into name and namePrefix
|
||||
|
||||
string fullName = Name.NotNull("Name is null");
|
||||
|
||||
// find all directory separators
|
||||
List<int> dirSeps = new List<int>();
|
||||
for (int i = 0; i < fullName.Length; i++)
|
||||
{
|
||||
if (fullName[i] == Path.DirectorySeparatorChar)
|
||||
{
|
||||
dirSeps.Add(i);
|
||||
}
|
||||
}
|
||||
|
||||
// find the right place to split the name
|
||||
int splitIndex = -1;
|
||||
for (int i = 0; i < dirSeps.Count; i++)
|
||||
{
|
||||
int count = ArchiveEncoding
|
||||
.GetEncoding()
|
||||
.GetByteCount(fullName.Substring(0, dirSeps[i]));
|
||||
if (count < 155)
|
||||
{
|
||||
splitIndex = dirSeps[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (splitIndex == -1)
|
||||
{
|
||||
throw new Exception(
|
||||
$"Tar header USTAR format can not fit file name \"{fullName}\" of length {nameByteCount}! Directory separator not found! Try using GNU Tar format instead!"
|
||||
);
|
||||
}
|
||||
|
||||
string namePrefix = fullName.Substring(0, splitIndex);
|
||||
string name = fullName.Substring(splitIndex + 1);
|
||||
|
||||
if (this.ArchiveEncoding.GetEncoding().GetByteCount(namePrefix) >= 155)
|
||||
throw new Exception(
|
||||
$"Tar header USTAR format can not fit file name \"{fullName}\" of length {nameByteCount}! Try using GNU Tar format instead!"
|
||||
);
|
||||
|
||||
if (this.ArchiveEncoding.GetEncoding().GetByteCount(name) >= 100)
|
||||
throw new Exception(
|
||||
$"Tar header USTAR format can not fit file name \"{fullName}\" of length {nameByteCount}! Try using GNU Tar format instead!"
|
||||
);
|
||||
|
||||
// write name prefix
|
||||
WriteStringBytes(ArchiveEncoding.Encode(namePrefix), buffer, 345, 100);
|
||||
// write partial name
|
||||
WriteStringBytes(ArchiveEncoding.Encode(name), buffer, 100);
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteStringBytes(ArchiveEncoding.Encode(Name.NotNull("Name is null")), buffer, 100);
|
||||
}
|
||||
|
||||
WriteOctalBytes(Size, buffer, 124, 12);
|
||||
var time = (long)(LastModifiedTime.ToUniversalTime() - EPOCH).TotalSeconds;
|
||||
WriteOctalBytes(time, buffer, 136, 12);
|
||||
buffer[156] = (byte)EntryType;
|
||||
|
||||
// write ustar magic field
|
||||
WriteStringBytes(Encoding.ASCII.GetBytes("ustar"), buffer, 257, 6);
|
||||
// write ustar version "00"
|
||||
buffer[263] = 0x30;
|
||||
buffer[264] = 0x30;
|
||||
|
||||
var crc = RecalculateChecksum(buffer);
|
||||
WriteOctalBytes(crc, buffer, 148, 8);
|
||||
|
||||
output.Write(buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
internal void WriteGnuTarLongLink(Stream output)
|
||||
{
|
||||
var buffer = new byte[BLOCK_SIZE];
|
||||
|
||||
@@ -85,7 +202,7 @@ internal sealed class TarHeader
|
||||
0,
|
||||
100 - ArchiveEncoding.GetEncoding().GetMaxByteCount(1)
|
||||
);
|
||||
Write(output);
|
||||
WriteGnuTarLongLink(output);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,6 +358,18 @@ internal sealed class TarHeader
|
||||
buffer.Slice(i, length - i).Clear();
|
||||
}
|
||||
|
||||
private static void WriteStringBytes(
|
||||
ReadOnlySpan<byte> name,
|
||||
Span<byte> buffer,
|
||||
int offset,
|
||||
int length
|
||||
)
|
||||
{
|
||||
name.CopyTo(buffer.Slice(offset));
|
||||
var i = Math.Min(length, name.Length);
|
||||
buffer.Slice(offset + i, length - i).Clear();
|
||||
}
|
||||
|
||||
private static void WriteStringBytes(string name, byte[] buffer, int offset, int length)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Common.Tar.Headers;
|
||||
|
||||
public enum TarHeaderWriteFormat
|
||||
{
|
||||
GNU_TAR_LONG_LINK,
|
||||
USTAR,
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
using System.IO;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers;
|
||||
|
||||
@@ -19,6 +20,18 @@ internal class DirectoryEndHeader : ZipHeader
|
||||
Comment = reader.ReadBytes(CommentLength);
|
||||
}
|
||||
|
||||
internal override async ValueTask Read(AsyncBinaryReader reader)
|
||||
{
|
||||
VolumeNumber = await reader.ReadUInt16Async();
|
||||
FirstVolumeWithDirectory = await reader.ReadUInt16Async();
|
||||
TotalNumberOfEntriesInDisk = await reader.ReadUInt16Async();
|
||||
TotalNumberOfEntries = await reader.ReadUInt16Async();
|
||||
DirectorySize = await reader.ReadUInt32Async();
|
||||
DirectoryStartOffsetRelativeToDisk = await reader.ReadUInt32Async();
|
||||
CommentLength = await reader.ReadUInt16Async();
|
||||
Comment = await reader.ReadBytesAsync(CommentLength);
|
||||
}
|
||||
|
||||
public ushort VolumeNumber { get; private set; }
|
||||
|
||||
public ushort FirstVolumeWithDirectory { get; private set; }
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers;
|
||||
|
||||
@@ -31,7 +32,37 @@ internal class DirectoryEntryHeader : ZipFileEntry
|
||||
var extra = reader.ReadBytes(extraLength);
|
||||
var comment = reader.ReadBytes(commentLength);
|
||||
|
||||
// According to .ZIP File Format Specification
|
||||
ProcessReadData(name, extra, comment);
|
||||
}
|
||||
|
||||
internal override async ValueTask Read(AsyncBinaryReader reader)
|
||||
{
|
||||
Version = await reader.ReadUInt16Async();
|
||||
VersionNeededToExtract = await reader.ReadUInt16Async();
|
||||
Flags = (HeaderFlags)await reader.ReadUInt16Async();
|
||||
CompressionMethod = (ZipCompressionMethod)await reader.ReadUInt16Async();
|
||||
OriginalLastModifiedTime = LastModifiedTime = await reader.ReadUInt16Async();
|
||||
OriginalLastModifiedDate = LastModifiedDate = await reader.ReadUInt16Async();
|
||||
Crc = await reader.ReadUInt32Async();
|
||||
CompressedSize = await reader.ReadUInt32Async();
|
||||
UncompressedSize = await reader.ReadUInt32Async();
|
||||
var nameLength = await reader.ReadUInt16Async();
|
||||
var extraLength = await reader.ReadUInt16Async();
|
||||
var commentLength = await reader.ReadUInt16Async();
|
||||
DiskNumberStart = await reader.ReadUInt16Async();
|
||||
InternalFileAttributes = await reader.ReadUInt16Async();
|
||||
ExternalFileAttributes = await reader.ReadUInt32Async();
|
||||
RelativeOffsetOfEntryHeader = await reader.ReadUInt32Async();
|
||||
|
||||
var name = await reader.ReadBytesAsync(nameLength);
|
||||
var extra = await reader.ReadBytesAsync(extraLength);
|
||||
var comment = await reader.ReadBytesAsync(commentLength);
|
||||
|
||||
ProcessReadData(name, extra, comment);
|
||||
}
|
||||
|
||||
private void ProcessReadData(byte[] name, byte[] extra, byte[] comment)
|
||||
{
|
||||
//
|
||||
// For example: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
|
||||
//
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
using System.IO;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers;
|
||||
|
||||
@@ -8,4 +9,6 @@ internal class IgnoreHeader : ZipHeader
|
||||
: base(type) { }
|
||||
|
||||
internal override void Read(BinaryReader reader) { }
|
||||
|
||||
internal override ValueTask Read(AsyncBinaryReader reader) => default;
|
||||
}
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers;
|
||||
|
||||
internal class LocalEntryHeader : ZipFileEntry
|
||||
internal class LocalEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
: ZipFileEntry(ZipHeaderType.LocalEntry, archiveEncoding)
|
||||
{
|
||||
public LocalEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
: base(ZipHeaderType.LocalEntry, archiveEncoding) { }
|
||||
|
||||
internal override void Read(BinaryReader reader)
|
||||
{
|
||||
Version = reader.ReadUInt16();
|
||||
@@ -23,7 +22,29 @@ internal class LocalEntryHeader : ZipFileEntry
|
||||
var name = reader.ReadBytes(nameLength);
|
||||
var extra = reader.ReadBytes(extraLength);
|
||||
|
||||
// According to .ZIP File Format Specification
|
||||
ProcessReadData(name, extra);
|
||||
}
|
||||
|
||||
internal override async ValueTask Read(AsyncBinaryReader reader)
|
||||
{
|
||||
Version = await reader.ReadUInt16Async();
|
||||
Flags = (HeaderFlags)await reader.ReadUInt16Async();
|
||||
CompressionMethod = (ZipCompressionMethod)await reader.ReadUInt16Async();
|
||||
OriginalLastModifiedTime = LastModifiedTime = await reader.ReadUInt16Async();
|
||||
OriginalLastModifiedDate = LastModifiedDate = await reader.ReadUInt16Async();
|
||||
Crc = await reader.ReadUInt32Async();
|
||||
CompressedSize = await reader.ReadUInt32Async();
|
||||
UncompressedSize = await reader.ReadUInt32Async();
|
||||
var nameLength = await reader.ReadUInt16Async();
|
||||
var extraLength = await reader.ReadUInt16Async();
|
||||
var name = await reader.ReadBytesAsync(nameLength);
|
||||
var extra = await reader.ReadBytesAsync(extraLength);
|
||||
|
||||
ProcessReadData(name, extra);
|
||||
}
|
||||
|
||||
private void ProcessReadData(byte[] name, byte[] extra)
|
||||
{
|
||||
//
|
||||
// For example: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
|
||||
//
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers;
|
||||
|
||||
@@ -9,4 +10,7 @@ internal class SplitHeader : ZipHeader
|
||||
: base(ZipHeaderType.Split) { }
|
||||
|
||||
internal override void Read(BinaryReader reader) => throw new NotImplementedException();
|
||||
|
||||
internal override ValueTask Read(AsyncBinaryReader reader) =>
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
using System.IO;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers;
|
||||
|
||||
@@ -26,6 +27,25 @@ internal class Zip64DirectoryEndHeader : ZipHeader
|
||||
);
|
||||
}
|
||||
|
||||
internal override async ValueTask Read(AsyncBinaryReader reader)
|
||||
{
|
||||
SizeOfDirectoryEndRecord = (long)await reader.ReadUInt64Async();
|
||||
VersionMadeBy = await reader.ReadUInt16Async();
|
||||
VersionNeededToExtract = await reader.ReadUInt16Async();
|
||||
VolumeNumber = await reader.ReadUInt32Async();
|
||||
FirstVolumeWithDirectory = await reader.ReadUInt32Async();
|
||||
TotalNumberOfEntriesInDisk = (long)await reader.ReadUInt64Async();
|
||||
TotalNumberOfEntries = (long)await reader.ReadUInt64Async();
|
||||
DirectorySize = (long)await reader.ReadUInt64Async();
|
||||
DirectoryStartOffsetRelativeToDisk = (long)await reader.ReadUInt64Async();
|
||||
DataSector = await reader.ReadBytesAsync(
|
||||
(int)(
|
||||
SizeOfDirectoryEndRecord
|
||||
- SIZE_OF_FIXED_HEADER_DATA_EXCEPT_SIGNATURE_AND_SIZE_FIELDS
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
private const int SIZE_OF_FIXED_HEADER_DATA_EXCEPT_SIGNATURE_AND_SIZE_FIELDS = 44;
|
||||
|
||||
public long SizeOfDirectoryEndRecord { get; private set; }
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
using System.IO;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers;
|
||||
|
||||
internal class Zip64DirectoryEndLocatorHeader : ZipHeader
|
||||
internal class Zip64DirectoryEndLocatorHeader() : ZipHeader(ZipHeaderType.Zip64DirectoryEndLocator)
|
||||
{
|
||||
public Zip64DirectoryEndLocatorHeader()
|
||||
: base(ZipHeaderType.Zip64DirectoryEndLocator) { }
|
||||
|
||||
internal override void Read(BinaryReader reader)
|
||||
{
|
||||
FirstVolumeWithDirectory = reader.ReadUInt32();
|
||||
@@ -14,6 +12,13 @@ internal class Zip64DirectoryEndLocatorHeader : ZipHeader
|
||||
TotalNumberOfVolumes = reader.ReadUInt32();
|
||||
}
|
||||
|
||||
internal override async ValueTask Read(AsyncBinaryReader reader)
|
||||
{
|
||||
FirstVolumeWithDirectory = await reader.ReadUInt32Async();
|
||||
RelativeOffsetOfTheEndOfDirectoryRecord = (long)await reader.ReadUInt64Async();
|
||||
TotalNumberOfVolumes = await reader.ReadUInt32Async();
|
||||
}
|
||||
|
||||
public uint FirstVolumeWithDirectory { get; private set; }
|
||||
|
||||
public long RelativeOffsetOfTheEndOfDirectoryRecord { get; private set; }
|
||||
|
||||
@@ -2,18 +2,14 @@ using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers;
|
||||
|
||||
internal abstract class ZipFileEntry : ZipHeader
|
||||
internal abstract class ZipFileEntry(ZipHeaderType type, ArchiveEncoding archiveEncoding)
|
||||
: ZipHeader(type)
|
||||
{
|
||||
protected ZipFileEntry(ZipHeaderType type, ArchiveEncoding archiveEncoding)
|
||||
: base(type)
|
||||
{
|
||||
Extra = new List<ExtraData>();
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
internal bool IsDirectory
|
||||
{
|
||||
get
|
||||
@@ -30,7 +26,7 @@ internal abstract class ZipFileEntry : ZipHeader
|
||||
|
||||
internal Stream? PackedStream { get; set; }
|
||||
|
||||
internal ArchiveEncoding ArchiveEncoding { get; }
|
||||
internal ArchiveEncoding ArchiveEncoding { get; } = archiveEncoding;
|
||||
|
||||
internal string? Name { get; set; }
|
||||
|
||||
@@ -44,7 +40,7 @@ internal abstract class ZipFileEntry : ZipHeader
|
||||
|
||||
internal long UncompressedSize { get; set; }
|
||||
|
||||
internal List<ExtraData> Extra { get; set; }
|
||||
internal List<ExtraData> Extra { get; set; } = new();
|
||||
|
||||
public string? Password { get; set; }
|
||||
|
||||
@@ -63,6 +59,24 @@ internal abstract class ZipFileEntry : ZipHeader
|
||||
return encryptionData;
|
||||
}
|
||||
|
||||
internal async Task<PkwareTraditionalEncryptionData> ComposeEncryptionDataAsync(
|
||||
Stream archiveStream,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (archiveStream is null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(archiveStream));
|
||||
}
|
||||
|
||||
var buffer = new byte[12];
|
||||
await archiveStream.ReadFullyAsync(buffer, 0, 12, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
var encryptionData = PkwareTraditionalEncryptionData.ForRead(Password!, this, buffer);
|
||||
|
||||
return encryptionData;
|
||||
}
|
||||
|
||||
internal WinzipAesEncryptionData? WinzipAesEncryptionData { get; set; }
|
||||
|
||||
/// <summary>
|
||||
|
||||
@@ -1,18 +1,14 @@
|
||||
using System.IO;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Zip.Headers;
|
||||
|
||||
internal abstract class ZipHeader
|
||||
internal abstract class ZipHeader(ZipHeaderType type)
|
||||
{
|
||||
protected ZipHeader(ZipHeaderType type)
|
||||
{
|
||||
ZipHeaderType = type;
|
||||
HasData = true;
|
||||
}
|
||||
|
||||
internal ZipHeaderType ZipHeaderType { get; }
|
||||
internal ZipHeaderType ZipHeaderType { get; } = type;
|
||||
|
||||
internal abstract void Read(BinaryReader reader);
|
||||
internal abstract ValueTask Read(AsyncBinaryReader reader);
|
||||
|
||||
internal bool HasData { get; set; }
|
||||
internal bool HasData { get; set; } = true;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
|
||||
@@ -18,7 +19,74 @@ internal sealed class SeekableZipHeaderFactory : ZipHeaderFactory
|
||||
internal SeekableZipHeaderFactory(string? password, ArchiveEncoding archiveEncoding)
|
||||
: base(StreamingMode.Seekable, password, archiveEncoding) { }
|
||||
|
||||
internal IEnumerable<ZipHeader> ReadSeekableHeader(Stream stream)
|
||||
internal async IAsyncEnumerable<ZipHeader> ReadSeekableHeader(Stream stream)
|
||||
{
|
||||
var reader = new AsyncBinaryReader(stream);
|
||||
|
||||
await SeekBackToHeader(stream, reader);
|
||||
|
||||
var eocd_location = stream.Position;
|
||||
var entry = new DirectoryEndHeader();
|
||||
await entry.Read(reader);
|
||||
|
||||
if (entry.IsZip64)
|
||||
{
|
||||
_zip64 = true;
|
||||
|
||||
// ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR should be before the EOCD
|
||||
stream.Seek(eocd_location - ZIP64_EOCD_LENGTH - 4, SeekOrigin.Begin);
|
||||
uint zip64_locator = await reader.ReadUInt32Async();
|
||||
if (zip64_locator != ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR)
|
||||
{
|
||||
throw new ArchiveException("Failed to locate the Zip64 Directory Locator");
|
||||
}
|
||||
|
||||
var zip64Locator = new Zip64DirectoryEndLocatorHeader();
|
||||
await zip64Locator.Read(reader);
|
||||
|
||||
stream.Seek(zip64Locator.RelativeOffsetOfTheEndOfDirectoryRecord, SeekOrigin.Begin);
|
||||
var zip64Signature = await reader.ReadUInt32Async();
|
||||
if (zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
|
||||
{
|
||||
throw new ArchiveException("Failed to locate the Zip64 Header");
|
||||
}
|
||||
|
||||
var zip64Entry = new Zip64DirectoryEndHeader();
|
||||
await zip64Entry.Read(reader);
|
||||
stream.Seek(zip64Entry.DirectoryStartOffsetRelativeToDisk, SeekOrigin.Begin);
|
||||
}
|
||||
else
|
||||
{
|
||||
stream.Seek(entry.DirectoryStartOffsetRelativeToDisk, SeekOrigin.Begin);
|
||||
}
|
||||
|
||||
var position = stream.Position;
|
||||
while (true)
|
||||
{
|
||||
stream.Position = position;
|
||||
var signature = await reader.ReadUInt32Async();
|
||||
var nextHeader = await ReadHeader(signature, reader, _zip64);
|
||||
position = stream.Position;
|
||||
|
||||
if (nextHeader is null)
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
|
||||
if (nextHeader is DirectoryEntryHeader entryHeader)
|
||||
{
|
||||
//entry could be zero bytes so we need to know that.
|
||||
entryHeader.HasData = entryHeader.CompressedSize != 0;
|
||||
yield return entryHeader;
|
||||
}
|
||||
else if (nextHeader is DirectoryEndHeader endHeader)
|
||||
{
|
||||
yield return endHeader;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
internal IEnumerable<ZipHeader> ReadSeekableHeader(Stream stream, bool useSync)
|
||||
{
|
||||
var reader = new BinaryReader(stream);
|
||||
|
||||
@@ -98,6 +166,45 @@ internal sealed class SeekableZipHeaderFactory : ZipHeaderFactory
|
||||
return true;
|
||||
}
|
||||
|
||||
private static async ValueTask SeekBackToHeader(Stream stream, AsyncBinaryReader reader)
|
||||
{
|
||||
// Minimum EOCD length
|
||||
if (stream.Length < MINIMUM_EOCD_LENGTH)
|
||||
{
|
||||
throw new ArchiveException(
|
||||
"Could not find Zip file Directory at the end of the file. File may be corrupted."
|
||||
);
|
||||
}
|
||||
|
||||
var len =
|
||||
stream.Length < MAX_SEARCH_LENGTH_FOR_EOCD
|
||||
? (int)stream.Length
|
||||
: MAX_SEARCH_LENGTH_FOR_EOCD;
|
||||
// We search for marker in reverse to find the first occurance
|
||||
byte[] needle = { 0x06, 0x05, 0x4b, 0x50 };
|
||||
|
||||
stream.Seek(-len, SeekOrigin.End);
|
||||
|
||||
var seek = await reader.ReadBytesAsync(len);
|
||||
|
||||
// Search in reverse
|
||||
Array.Reverse(seek);
|
||||
|
||||
// don't exclude the minimum eocd region, otherwise you fail to locate the header in empty zip files
|
||||
var max_search_area = len; // - MINIMUM_EOCD_LENGTH;
|
||||
|
||||
for (var pos_from_end = 0; pos_from_end < max_search_area; ++pos_from_end)
|
||||
{
|
||||
if (IsMatch(seek, pos_from_end, needle))
|
||||
{
|
||||
stream.Seek(-pos_from_end, SeekOrigin.End);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
throw new ArchiveException("Failed to locate the Zip Header");
|
||||
}
|
||||
|
||||
private static void SeekBackToHeader(Stream stream, BinaryReader reader)
|
||||
{
|
||||
// Minimum EOCD length
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.IO;
|
||||
@@ -31,6 +33,28 @@ internal sealed class StreamingZipFilePart : ZipFilePart
|
||||
return _decompressionStream;
|
||||
}
|
||||
|
||||
internal override async Task<Stream?> GetCompressedStreamAsync(
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (!Header.HasData)
|
||||
{
|
||||
return Stream.Null;
|
||||
}
|
||||
_decompressionStream = await CreateDecompressionStreamAsync(
|
||||
await GetCryptoStreamAsync(CreateBaseStream(), cancellationToken)
|
||||
.ConfigureAwait(false),
|
||||
Header.CompressionMethod,
|
||||
cancellationToken
|
||||
)
|
||||
.ConfigureAwait(false);
|
||||
if (LeaveStreamOpen)
|
||||
{
|
||||
return SharpCompressStream.Create(_decompressionStream, leaveOpen: true);
|
||||
}
|
||||
return _decompressionStream;
|
||||
}
|
||||
|
||||
internal BinaryReader FixStreamedFileLocation(ref SharpCompressStream rewindableStream)
|
||||
{
|
||||
if (Header.IsDirectory)
|
||||
|
||||
@@ -2,6 +2,8 @@ using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.BZip2;
|
||||
@@ -13,8 +15,8 @@ using SharpCompress.Compressors.PPMd;
|
||||
using SharpCompress.Compressors.Reduce;
|
||||
using SharpCompress.Compressors.Shrink;
|
||||
using SharpCompress.Compressors.Xz;
|
||||
using SharpCompress.Compressors.ZStandard;
|
||||
using SharpCompress.IO;
|
||||
using ZstdSharp;
|
||||
|
||||
namespace SharpCompress.Common.Zip;
|
||||
|
||||
@@ -264,4 +266,220 @@ internal abstract class ZipFilePart : FilePart
|
||||
}
|
||||
return plainStream;
|
||||
}
|
||||
|
||||
internal override async Task<Stream?> GetCompressedStreamAsync(
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (!Header.HasData)
|
||||
{
|
||||
return Stream.Null;
|
||||
}
|
||||
var decompressionStream = await CreateDecompressionStreamAsync(
|
||||
await GetCryptoStreamAsync(CreateBaseStream(), cancellationToken)
|
||||
.ConfigureAwait(false),
|
||||
Header.CompressionMethod,
|
||||
cancellationToken
|
||||
)
|
||||
.ConfigureAwait(false);
|
||||
if (LeaveStreamOpen)
|
||||
{
|
||||
return SharpCompressStream.Create(decompressionStream, leaveOpen: true);
|
||||
}
|
||||
return decompressionStream;
|
||||
}
|
||||
|
||||
protected async Task<Stream> GetCryptoStreamAsync(
|
||||
Stream plainStream,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var isFileEncrypted = FlagUtility.HasFlag(Header.Flags, HeaderFlags.Encrypted);
|
||||
|
||||
if (Header.CompressedSize == 0 && isFileEncrypted)
|
||||
{
|
||||
throw new NotSupportedException("Cannot encrypt file with unknown size at start.");
|
||||
}
|
||||
|
||||
if (
|
||||
(
|
||||
Header.CompressedSize == 0
|
||||
&& FlagUtility.HasFlag(Header.Flags, HeaderFlags.UsePostDataDescriptor)
|
||||
) || Header.IsZip64
|
||||
)
|
||||
{
|
||||
plainStream = SharpCompressStream.Create(plainStream, leaveOpen: true); //make sure AES doesn't close
|
||||
}
|
||||
else
|
||||
{
|
||||
plainStream = new ReadOnlySubStream(plainStream, Header.CompressedSize); //make sure AES doesn't close
|
||||
}
|
||||
|
||||
if (isFileEncrypted)
|
||||
{
|
||||
switch (Header.CompressionMethod)
|
||||
{
|
||||
case ZipCompressionMethod.None:
|
||||
case ZipCompressionMethod.Shrink:
|
||||
case ZipCompressionMethod.Reduce1:
|
||||
case ZipCompressionMethod.Reduce2:
|
||||
case ZipCompressionMethod.Reduce3:
|
||||
case ZipCompressionMethod.Reduce4:
|
||||
case ZipCompressionMethod.Deflate:
|
||||
case ZipCompressionMethod.Deflate64:
|
||||
case ZipCompressionMethod.BZip2:
|
||||
case ZipCompressionMethod.LZMA:
|
||||
case ZipCompressionMethod.PPMd:
|
||||
{
|
||||
return new PkwareTraditionalCryptoStream(
|
||||
plainStream,
|
||||
await Header
|
||||
.ComposeEncryptionDataAsync(plainStream, cancellationToken)
|
||||
.ConfigureAwait(false),
|
||||
CryptoMode.Decrypt
|
||||
);
|
||||
}
|
||||
|
||||
case ZipCompressionMethod.WinzipAes:
|
||||
{
|
||||
if (Header.WinzipAesEncryptionData != null)
|
||||
{
|
||||
return new WinzipAesCryptoStream(
|
||||
plainStream,
|
||||
Header.WinzipAesEncryptionData,
|
||||
Header.CompressedSize - 10
|
||||
);
|
||||
}
|
||||
return plainStream;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
throw new InvalidOperationException("Header.CompressionMethod is invalid");
|
||||
}
|
||||
}
|
||||
}
|
||||
return plainStream;
|
||||
}
|
||||
|
||||
protected async Task<Stream> CreateDecompressionStreamAsync(
|
||||
Stream stream,
|
||||
ZipCompressionMethod method,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
switch (method)
|
||||
{
|
||||
case ZipCompressionMethod.None:
|
||||
{
|
||||
if (Header.CompressedSize is 0)
|
||||
{
|
||||
return new DataDescriptorStream(stream);
|
||||
}
|
||||
|
||||
return stream;
|
||||
}
|
||||
case ZipCompressionMethod.Shrink:
|
||||
{
|
||||
return new ShrinkStream(
|
||||
stream,
|
||||
CompressionMode.Decompress,
|
||||
Header.CompressedSize,
|
||||
Header.UncompressedSize
|
||||
);
|
||||
}
|
||||
case ZipCompressionMethod.Reduce1:
|
||||
{
|
||||
return new ReduceStream(stream, Header.CompressedSize, Header.UncompressedSize, 1);
|
||||
}
|
||||
case ZipCompressionMethod.Reduce2:
|
||||
{
|
||||
return new ReduceStream(stream, Header.CompressedSize, Header.UncompressedSize, 2);
|
||||
}
|
||||
case ZipCompressionMethod.Reduce3:
|
||||
{
|
||||
return new ReduceStream(stream, Header.CompressedSize, Header.UncompressedSize, 3);
|
||||
}
|
||||
case ZipCompressionMethod.Reduce4:
|
||||
{
|
||||
return new ReduceStream(stream, Header.CompressedSize, Header.UncompressedSize, 4);
|
||||
}
|
||||
case ZipCompressionMethod.Explode:
|
||||
{
|
||||
return new ExplodeStream(
|
||||
stream,
|
||||
Header.CompressedSize,
|
||||
Header.UncompressedSize,
|
||||
Header.Flags
|
||||
);
|
||||
}
|
||||
|
||||
case ZipCompressionMethod.Deflate:
|
||||
{
|
||||
return new DeflateStream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
case ZipCompressionMethod.Deflate64:
|
||||
{
|
||||
return new Deflate64Stream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
case ZipCompressionMethod.BZip2:
|
||||
{
|
||||
return new BZip2Stream(stream, CompressionMode.Decompress, false);
|
||||
}
|
||||
case ZipCompressionMethod.LZMA:
|
||||
{
|
||||
if (FlagUtility.HasFlag(Header.Flags, HeaderFlags.Encrypted))
|
||||
{
|
||||
throw new NotSupportedException("LZMA with pkware encryption.");
|
||||
}
|
||||
var buffer = new byte[4];
|
||||
await stream.ReadFullyAsync(buffer, 0, 4, cancellationToken).ConfigureAwait(false);
|
||||
var version = BinaryPrimitives.ReadUInt16LittleEndian(buffer.AsSpan(0, 2));
|
||||
var propsSize = BinaryPrimitives.ReadUInt16LittleEndian(buffer.AsSpan(2, 2));
|
||||
var props = new byte[propsSize];
|
||||
await stream
|
||||
.ReadFullyAsync(props, 0, propsSize, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
return new LzmaStream(
|
||||
props,
|
||||
stream,
|
||||
Header.CompressedSize > 0 ? Header.CompressedSize - 4 - props.Length : -1,
|
||||
FlagUtility.HasFlag(Header.Flags, HeaderFlags.Bit1)
|
||||
? -1
|
||||
: Header.UncompressedSize
|
||||
);
|
||||
}
|
||||
case ZipCompressionMethod.Xz:
|
||||
{
|
||||
return new XZStream(stream);
|
||||
}
|
||||
case ZipCompressionMethod.ZStandard:
|
||||
{
|
||||
return new DecompressionStream(stream);
|
||||
}
|
||||
case ZipCompressionMethod.PPMd:
|
||||
{
|
||||
var props = new byte[2];
|
||||
await stream.ReadFullyAsync(props, 0, 2, cancellationToken).ConfigureAwait(false);
|
||||
return new PpmdStream(new PpmdProperties(props), stream, false);
|
||||
}
|
||||
case ZipCompressionMethod.WinzipAes:
|
||||
{
|
||||
var data = Header.Extra.SingleOrDefault(x => x.Type == ExtraDataType.WinZipAes);
|
||||
if (data is null)
|
||||
{
|
||||
throw new InvalidFormatException("No Winzip AES extra data found.");
|
||||
}
|
||||
if (data.Length != 7)
|
||||
{
|
||||
throw new InvalidFormatException("Winzip data length is not 7.");
|
||||
}
|
||||
throw new NotSupportedException("WinzipAes isn't supported for streaming");
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw new NotSupportedException("CompressionMethod: " + Header.CompressionMethod);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.IO;
|
||||
|
||||
@@ -34,6 +35,82 @@ internal class ZipHeaderFactory
|
||||
_archiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
protected async ValueTask<ZipHeader?> ReadHeader(
|
||||
uint headerBytes,
|
||||
AsyncBinaryReader reader,
|
||||
bool zip64 = false
|
||||
)
|
||||
{
|
||||
switch (headerBytes)
|
||||
{
|
||||
case ENTRY_HEADER_BYTES:
|
||||
{
|
||||
var entryHeader = new LocalEntryHeader(_archiveEncoding);
|
||||
await entryHeader.Read(reader);
|
||||
LoadHeader(entryHeader, reader.BaseStream);
|
||||
|
||||
_lastEntryHeader = entryHeader;
|
||||
return entryHeader;
|
||||
}
|
||||
case DIRECTORY_START_HEADER_BYTES:
|
||||
{
|
||||
var entry = new DirectoryEntryHeader(_archiveEncoding);
|
||||
await entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case POST_DATA_DESCRIPTOR:
|
||||
{
|
||||
if (
|
||||
_lastEntryHeader != null
|
||||
&& FlagUtility.HasFlag(
|
||||
_lastEntryHeader.NotNull().Flags,
|
||||
HeaderFlags.UsePostDataDescriptor
|
||||
)
|
||||
)
|
||||
{
|
||||
_lastEntryHeader.Crc = await reader.ReadUInt32Async();
|
||||
_lastEntryHeader.CompressedSize = zip64
|
||||
? (long)await reader.ReadUInt64Async()
|
||||
: await reader.ReadUInt32Async();
|
||||
_lastEntryHeader.UncompressedSize = zip64
|
||||
? (long)await reader.ReadUInt64Async()
|
||||
: await reader.ReadUInt32Async();
|
||||
}
|
||||
else
|
||||
{
|
||||
await reader.ReadBytesAsync(zip64 ? 20 : 12);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
case DIGITAL_SIGNATURE:
|
||||
return null;
|
||||
case DIRECTORY_END_HEADER_BYTES:
|
||||
{
|
||||
var entry = new DirectoryEndHeader();
|
||||
await entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case SPLIT_ARCHIVE_HEADER_BYTES:
|
||||
{
|
||||
return new SplitHeader();
|
||||
}
|
||||
case ZIP64_END_OF_CENTRAL_DIRECTORY:
|
||||
{
|
||||
var entry = new Zip64DirectoryEndHeader();
|
||||
await entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR:
|
||||
{
|
||||
var entry = new Zip64DirectoryEndLocatorHeader();
|
||||
await entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
protected ZipHeader? ReadHeader(uint headerBytes, BinaryReader reader, bool zip64 = false)
|
||||
{
|
||||
switch (headerBytes)
|
||||
|
||||
@@ -428,7 +428,9 @@ public class LzmaStream : Stream, IStreamStack
|
||||
private async Task DecodeChunkHeaderAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
var controlBuffer = new byte[1];
|
||||
await _inputStream.ReadAsync(controlBuffer, 0, 1, cancellationToken).ConfigureAwait(false);
|
||||
await _inputStream
|
||||
.ReadExactlyAsync(controlBuffer, 0, 1, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
var control = controlBuffer[0];
|
||||
_inputPosition++;
|
||||
|
||||
@@ -455,11 +457,15 @@ public class LzmaStream : Stream, IStreamStack
|
||||
|
||||
_availableBytes = (control & 0x1F) << 16;
|
||||
var buffer = new byte[2];
|
||||
await _inputStream.ReadAsync(buffer, 0, 2, cancellationToken).ConfigureAwait(false);
|
||||
await _inputStream
|
||||
.ReadExactlyAsync(buffer, 0, 2, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
_availableBytes += (buffer[0] << 8) + buffer[1] + 1;
|
||||
_inputPosition += 2;
|
||||
|
||||
await _inputStream.ReadAsync(buffer, 0, 2, cancellationToken).ConfigureAwait(false);
|
||||
await _inputStream
|
||||
.ReadExactlyAsync(buffer, 0, 2, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
_rangeDecoderLimit = (buffer[0] << 8) + buffer[1] + 1;
|
||||
_inputPosition += 2;
|
||||
|
||||
@@ -467,7 +473,7 @@ public class LzmaStream : Stream, IStreamStack
|
||||
{
|
||||
_needProps = false;
|
||||
await _inputStream
|
||||
.ReadAsync(controlBuffer, 0, 1, cancellationToken)
|
||||
.ReadExactlyAsync(controlBuffer, 0, 1, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
Properties[0] = controlBuffer[0];
|
||||
_inputPosition++;
|
||||
@@ -495,7 +501,9 @@ public class LzmaStream : Stream, IStreamStack
|
||||
{
|
||||
_uncompressedChunk = true;
|
||||
var buffer = new byte[2];
|
||||
await _inputStream.ReadAsync(buffer, 0, 2, cancellationToken).ConfigureAwait(false);
|
||||
await _inputStream
|
||||
.ReadExactlyAsync(buffer, 0, 2, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
_availableBytes = (buffer[0] << 8) + buffer[1] + 1;
|
||||
_inputPosition += 2;
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Compressors.Filters;
|
||||
using SharpCompress.Compressors.LZMA.Utilites;
|
||||
using SharpCompress.Compressors.PPMd;
|
||||
using ZstdSharp;
|
||||
using SharpCompress.Compressors.ZStandard;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA;
|
||||
|
||||
|
||||
@@ -37,18 +37,8 @@ internal sealed class MultiVolumeReadOnlyStream : Stream, IStreamStack
|
||||
private IEnumerator<RarFilePart> filePartEnumerator;
|
||||
private Stream currentStream;
|
||||
|
||||
private readonly IExtractionListener streamListener;
|
||||
|
||||
private long currentPartTotalReadBytes;
|
||||
private long currentEntryTotalReadBytes;
|
||||
|
||||
internal MultiVolumeReadOnlyStream(
|
||||
IEnumerable<RarFilePart> parts,
|
||||
IExtractionListener streamListener
|
||||
)
|
||||
internal MultiVolumeReadOnlyStream(IEnumerable<RarFilePart> parts)
|
||||
{
|
||||
this.streamListener = streamListener;
|
||||
|
||||
filePartEnumerator = parts.GetEnumerator();
|
||||
filePartEnumerator.MoveNext();
|
||||
InitializeNextFilePart();
|
||||
@@ -81,15 +71,7 @@ internal sealed class MultiVolumeReadOnlyStream : Stream, IStreamStack
|
||||
currentPosition = 0;
|
||||
currentStream = filePartEnumerator.Current.GetCompressedStream();
|
||||
|
||||
currentPartTotalReadBytes = 0;
|
||||
|
||||
CurrentCrc = filePartEnumerator.Current.FileHeader.FileCrc;
|
||||
|
||||
streamListener.FireFilePartExtractionBegin(
|
||||
filePartEnumerator.Current.FilePartName,
|
||||
filePartEnumerator.Current.FileHeader.CompressedSize,
|
||||
filePartEnumerator.Current.FileHeader.UncompressedSize
|
||||
);
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
@@ -141,12 +123,6 @@ internal sealed class MultiVolumeReadOnlyStream : Stream, IStreamStack
|
||||
break;
|
||||
}
|
||||
}
|
||||
currentPartTotalReadBytes += totalRead;
|
||||
currentEntryTotalReadBytes += totalRead;
|
||||
streamListener.FireCompressedBytesRead(
|
||||
currentPartTotalReadBytes,
|
||||
currentEntryTotalReadBytes
|
||||
);
|
||||
return totalRead;
|
||||
}
|
||||
|
||||
@@ -206,12 +182,6 @@ internal sealed class MultiVolumeReadOnlyStream : Stream, IStreamStack
|
||||
break;
|
||||
}
|
||||
}
|
||||
currentPartTotalReadBytes += totalRead;
|
||||
currentEntryTotalReadBytes += totalRead;
|
||||
streamListener.FireCompressedBytesRead(
|
||||
currentPartTotalReadBytes,
|
||||
currentEntryTotalReadBytes
|
||||
);
|
||||
return totalRead;
|
||||
}
|
||||
|
||||
@@ -270,12 +240,6 @@ internal sealed class MultiVolumeReadOnlyStream : Stream, IStreamStack
|
||||
break;
|
||||
}
|
||||
}
|
||||
currentPartTotalReadBytes += totalRead;
|
||||
currentEntryTotalReadBytes += totalRead;
|
||||
streamListener.FireCompressedBytesRead(
|
||||
currentPartTotalReadBytes,
|
||||
currentEntryTotalReadBytes
|
||||
);
|
||||
return totalRead;
|
||||
}
|
||||
#endif
|
||||
|
||||
311
src/SharpCompress/Compressors/ZStandard/BitOperations.cs
Normal file
311
src/SharpCompress/Compressors/ZStandard/BitOperations.cs
Normal file
@@ -0,0 +1,311 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
|
||||
#if !NETCOREAPP3_0_OR_GREATER
|
||||
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
// Some routines inspired by the Stanford Bit Twiddling Hacks by Sean Eron Anderson:
|
||||
// http://graphics.stanford.edu/~seander/bithacks.html
|
||||
|
||||
namespace System.Numerics
|
||||
{
|
||||
/// <summary>
|
||||
/// Utility methods for intrinsic bit-twiddling operations.
|
||||
/// The methods use hardware intrinsics when available on the underlying platform,
|
||||
/// otherwise they use optimized software fallbacks.
|
||||
/// </summary>
|
||||
public static unsafe class BitOperations
|
||||
{
|
||||
// hack: should be public because of inline
|
||||
public static readonly byte* TrailingZeroCountDeBruijn = GetArrayPointer(
|
||||
new byte[]
|
||||
{
|
||||
00,
|
||||
01,
|
||||
28,
|
||||
02,
|
||||
29,
|
||||
14,
|
||||
24,
|
||||
03,
|
||||
30,
|
||||
22,
|
||||
20,
|
||||
15,
|
||||
25,
|
||||
17,
|
||||
04,
|
||||
08,
|
||||
31,
|
||||
27,
|
||||
13,
|
||||
23,
|
||||
21,
|
||||
19,
|
||||
16,
|
||||
07,
|
||||
26,
|
||||
12,
|
||||
18,
|
||||
06,
|
||||
11,
|
||||
05,
|
||||
10,
|
||||
09,
|
||||
}
|
||||
);
|
||||
|
||||
// hack: should be public because of inline
|
||||
public static readonly byte* Log2DeBruijn = GetArrayPointer(
|
||||
new byte[]
|
||||
{
|
||||
00,
|
||||
09,
|
||||
01,
|
||||
10,
|
||||
13,
|
||||
21,
|
||||
02,
|
||||
29,
|
||||
11,
|
||||
14,
|
||||
16,
|
||||
18,
|
||||
22,
|
||||
25,
|
||||
03,
|
||||
30,
|
||||
08,
|
||||
12,
|
||||
20,
|
||||
28,
|
||||
15,
|
||||
17,
|
||||
24,
|
||||
07,
|
||||
19,
|
||||
27,
|
||||
23,
|
||||
06,
|
||||
26,
|
||||
05,
|
||||
04,
|
||||
31,
|
||||
}
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Returns the integer (floor) log of the specified value, base 2.
|
||||
/// Note that by convention, input value 0 returns 0 since log(0) is undefined.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int Log2(uint value)
|
||||
{
|
||||
// The 0->0 contract is fulfilled by setting the LSB to 1.
|
||||
// Log(1) is 0, and setting the LSB for values > 1 does not change the log2 result.
|
||||
value |= 1;
|
||||
|
||||
// value lzcnt actual expected
|
||||
// ..0001 31 31-31 0
|
||||
// ..0010 30 31-30 1
|
||||
// 0010.. 2 31-2 29
|
||||
// 0100.. 1 31-1 30
|
||||
// 1000.. 0 31-0 31
|
||||
|
||||
// Fallback contract is 0->0
|
||||
// No AggressiveInlining due to large method size
|
||||
// Has conventional contract 0->0 (Log(0) is undefined)
|
||||
|
||||
// Fill trailing zeros with ones, eg 00010010 becomes 00011111
|
||||
value |= value >> 01;
|
||||
value |= value >> 02;
|
||||
value |= value >> 04;
|
||||
value |= value >> 08;
|
||||
value |= value >> 16;
|
||||
|
||||
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
|
||||
return Log2DeBruijn[
|
||||
// Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_1100_0100_1010_1100_1101_1101u
|
||||
(int)((value * 0x07C4ACDDu) >> 27)
|
||||
];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns the integer (floor) log of the specified value, base 2.
|
||||
/// Note that by convention, input value 0 returns 0 since log(0) is undefined.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int Log2(ulong value)
|
||||
{
|
||||
value |= 1;
|
||||
|
||||
uint hi = (uint)(value >> 32);
|
||||
|
||||
if (hi == 0)
|
||||
{
|
||||
return Log2((uint)value);
|
||||
}
|
||||
|
||||
return 32 + Log2(hi);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in an integer value.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(int value) => TrailingZeroCount((uint)value);
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in an integer value.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(uint value)
|
||||
{
|
||||
// Unguarded fallback contract is 0->0, BSF contract is 0->undefined
|
||||
if (value == 0)
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
|
||||
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
|
||||
return TrailingZeroCountDeBruijn[
|
||||
// Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_0111_1100_1011_0101_0011_0001u
|
||||
(int)(((value & (uint)-(int)value) * 0x077CB531u) >> 27)
|
||||
]; // Multi-cast mitigates redundant conv.u8
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(long value) => TrailingZeroCount((ulong)value);
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(ulong value)
|
||||
{
|
||||
uint lo = (uint)value;
|
||||
|
||||
if (lo == 0)
|
||||
{
|
||||
return 32 + TrailingZeroCount((uint)(value >> 32));
|
||||
}
|
||||
|
||||
return TrailingZeroCount(lo);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value left by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROL.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..31] is treated as congruent mod 32.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static uint RotateLeft(uint value, int offset) =>
|
||||
(value << offset) | (value >> (32 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value left by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROL.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..63] is treated as congruent mod 64.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ulong RotateLeft(ulong value, int offset) =>
|
||||
(value << offset) | (value >> (64 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value right by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROR.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..31] is treated as congruent mod 32.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static uint RotateRight(uint value, int offset) =>
|
||||
(value >> offset) | (value << (32 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value right by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROR.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..63] is treated as congruent mod 64.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ulong RotateRight(ulong value, int offset) =>
|
||||
(value >> offset) | (value << (64 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of leading zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction LZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int LeadingZeroCount(uint value)
|
||||
{
|
||||
// Unguarded fallback contract is 0->31, BSR contract is 0->undefined
|
||||
if (value == 0)
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
|
||||
// No AggressiveInlining due to large method size
|
||||
// Has conventional contract 0->0 (Log(0) is undefined)
|
||||
|
||||
// Fill trailing zeros with ones, eg 00010010 becomes 00011111
|
||||
value |= value >> 01;
|
||||
value |= value >> 02;
|
||||
value |= value >> 04;
|
||||
value |= value >> 08;
|
||||
value |= value >> 16;
|
||||
|
||||
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
|
||||
return 31
|
||||
^ Log2DeBruijn[
|
||||
// uint|long -> IntPtr cast on 32-bit platforms does expensive overflow checks not needed here
|
||||
(int)((value * 0x07C4ACDDu) >> 27)
|
||||
];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of leading zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction LZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int LeadingZeroCount(ulong value)
|
||||
{
|
||||
uint hi = (uint)(value >> 32);
|
||||
|
||||
if (hi == 0)
|
||||
{
|
||||
return 32 + LeadingZeroCount((uint)value);
|
||||
}
|
||||
|
||||
return LeadingZeroCount(hi);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
301
src/SharpCompress/Compressors/ZStandard/CompressionStream.cs
Normal file
301
src/SharpCompress/Compressors/ZStandard/CompressionStream.cs
Normal file
@@ -0,0 +1,301 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public class CompressionStream : Stream
|
||||
{
|
||||
private readonly Stream innerStream;
|
||||
private readonly byte[] outputBuffer;
|
||||
private readonly bool preserveCompressor;
|
||||
private readonly bool leaveOpen;
|
||||
private Compressor? compressor;
|
||||
private ZSTD_outBuffer_s output;
|
||||
|
||||
public CompressionStream(
|
||||
Stream stream,
|
||||
int level = Compressor.DefaultCompressionLevel,
|
||||
int bufferSize = 0,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
: this(stream, new Compressor(level), bufferSize, false, leaveOpen) { }
|
||||
|
||||
public CompressionStream(
|
||||
Stream stream,
|
||||
Compressor compressor,
|
||||
int bufferSize = 0,
|
||||
bool preserveCompressor = true,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
{
|
||||
if (stream == null)
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
|
||||
if (!stream.CanWrite)
|
||||
throw new ArgumentException("Stream is not writable", nameof(stream));
|
||||
|
||||
if (bufferSize < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(bufferSize));
|
||||
|
||||
innerStream = stream;
|
||||
this.compressor = compressor;
|
||||
this.preserveCompressor = preserveCompressor;
|
||||
this.leaveOpen = leaveOpen;
|
||||
|
||||
var outputBufferSize =
|
||||
bufferSize > 0
|
||||
? bufferSize
|
||||
: (int)Unsafe.Methods.ZSTD_CStreamOutSize().EnsureZstdSuccess();
|
||||
outputBuffer = ArrayPool<byte>.Shared.Rent(outputBufferSize);
|
||||
output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)outputBufferSize };
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_cParameter parameter, int value)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
compressor.NotNull().SetParameter(parameter, value);
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_cParameter parameter)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
return compressor.NotNull().GetParameter(parameter);
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
compressor.NotNull().LoadDictionary(dict);
|
||||
}
|
||||
|
||||
~CompressionStream() => Dispose(false);
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override async ValueTask DisposeAsync()
|
||||
#else
|
||||
public async Task DisposeAsync()
|
||||
#endif
|
||||
{
|
||||
if (compressor == null)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_end).ConfigureAwait(false);
|
||||
}
|
||||
finally
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (compressor == null)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
if (disposing)
|
||||
FlushInternal(ZSTD_EndDirective.ZSTD_e_end);
|
||||
}
|
||||
finally
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
}
|
||||
}
|
||||
|
||||
private void ReleaseUnmanagedResources()
|
||||
{
|
||||
if (!preserveCompressor)
|
||||
{
|
||||
compressor.NotNull().Dispose();
|
||||
}
|
||||
compressor = null;
|
||||
|
||||
if (outputBuffer != null)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(outputBuffer);
|
||||
}
|
||||
|
||||
if (!leaveOpen)
|
||||
{
|
||||
innerStream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
public override void Flush() => FlushInternal(ZSTD_EndDirective.ZSTD_e_flush);
|
||||
|
||||
public override async Task FlushAsync(CancellationToken cancellationToken) =>
|
||||
await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_flush, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
private void FlushInternal(ZSTD_EndDirective directive) => WriteInternal(null, directive);
|
||||
|
||||
private async Task FlushInternalAsync(
|
||||
ZSTD_EndDirective directive,
|
||||
CancellationToken cancellationToken = default
|
||||
) => await WriteInternalAsync(null, directive, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
Write(new ReadOnlySpan<byte>(buffer, offset, count));
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override void Write(ReadOnlySpan<byte> buffer) =>
|
||||
WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue);
|
||||
#else
|
||||
public void Write(ReadOnlySpan<byte> buffer) =>
|
||||
WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue);
|
||||
#endif
|
||||
|
||||
private void WriteInternal(ReadOnlySpan<byte> buffer, ZSTD_EndDirective directive)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
var input = new ZSTD_inBuffer_s
|
||||
{
|
||||
pos = 0,
|
||||
size = buffer != null ? (nuint)buffer.Length : 0,
|
||||
};
|
||||
nuint remaining;
|
||||
do
|
||||
{
|
||||
output.pos = 0;
|
||||
remaining = CompressStream(ref input, buffer, directive);
|
||||
|
||||
var written = (int)output.pos;
|
||||
if (written > 0)
|
||||
innerStream.Write(outputBuffer, 0, written);
|
||||
} while (
|
||||
directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0
|
||||
);
|
||||
}
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
private async ValueTask WriteInternalAsync(
|
||||
ReadOnlyMemory<byte>? buffer,
|
||||
ZSTD_EndDirective directive,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#else
|
||||
private async Task WriteInternalAsync(
|
||||
ReadOnlyMemory<byte>? buffer,
|
||||
ZSTD_EndDirective directive,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#endif
|
||||
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
var input = new ZSTD_inBuffer_s
|
||||
{
|
||||
pos = 0,
|
||||
size = buffer.HasValue ? (nuint)buffer.Value.Length : 0,
|
||||
};
|
||||
nuint remaining;
|
||||
do
|
||||
{
|
||||
output.pos = 0;
|
||||
remaining = CompressStream(
|
||||
ref input,
|
||||
buffer.HasValue ? buffer.Value.Span : null,
|
||||
directive
|
||||
);
|
||||
|
||||
var written = (int)output.pos;
|
||||
if (written > 0)
|
||||
await innerStream
|
||||
.WriteAsync(outputBuffer, 0, written, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
} while (
|
||||
directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0
|
||||
);
|
||||
}
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => WriteAsync(new ReadOnlyMemory<byte>(buffer, offset, count), cancellationToken).AsTask();
|
||||
|
||||
public override async ValueTask WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
#else
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => WriteAsync(new ReadOnlyMemory<byte>(buffer, offset, count), cancellationToken);
|
||||
|
||||
public async Task WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
#endif
|
||||
|
||||
internal unsafe nuint CompressStream(
|
||||
ref ZSTD_inBuffer_s input,
|
||||
ReadOnlySpan<byte> inputBuffer,
|
||||
ZSTD_EndDirective directive
|
||||
)
|
||||
{
|
||||
fixed (byte* inputBufferPtr = inputBuffer)
|
||||
fixed (byte* outputBufferPtr = outputBuffer)
|
||||
{
|
||||
input.src = inputBufferPtr;
|
||||
output.dst = outputBufferPtr;
|
||||
return compressor
|
||||
.NotNull()
|
||||
.CompressStream(ref input, ref output, directive)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanRead => false;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => true;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (compressor == null)
|
||||
throw new ObjectDisposedException(nameof(CompressionStream));
|
||||
}
|
||||
|
||||
public void SetPledgedSrcSize(ulong pledgedSrcSize)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
compressor.NotNull().SetPledgedSrcSize(pledgedSrcSize);
|
||||
}
|
||||
}
|
||||
204
src/SharpCompress/Compressors/ZStandard/Compressor.cs
Normal file
204
src/SharpCompress/Compressors/ZStandard/Compressor.cs
Normal file
@@ -0,0 +1,204 @@
|
||||
using System;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public unsafe class Compressor : IDisposable
|
||||
{
|
||||
/// <summary>
|
||||
/// Minimum negative compression level allowed
|
||||
/// </summary>
|
||||
public static int MinCompressionLevel => Unsafe.Methods.ZSTD_minCLevel();
|
||||
|
||||
/// <summary>
|
||||
/// Maximum compression level available
|
||||
/// </summary>
|
||||
public static int MaxCompressionLevel => Unsafe.Methods.ZSTD_maxCLevel();
|
||||
|
||||
/// <summary>
|
||||
/// Default compression level
|
||||
/// </summary>
|
||||
/// <see cref="Unsafe.Methods.ZSTD_defaultCLevel"/>
|
||||
public const int DefaultCompressionLevel = 3;
|
||||
|
||||
private int level = DefaultCompressionLevel;
|
||||
|
||||
private readonly SafeCctxHandle handle;
|
||||
|
||||
public int Level
|
||||
{
|
||||
get => level;
|
||||
set
|
||||
{
|
||||
if (level != value)
|
||||
{
|
||||
level = value;
|
||||
SetParameter(ZSTD_cParameter.ZSTD_c_compressionLevel, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_cParameter parameter, int value)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
Unsafe.Methods.ZSTD_CCtx_setParameter(cctx, parameter, value).EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_cParameter parameter)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
int value;
|
||||
Unsafe.Methods.ZSTD_CCtx_getParameter(cctx, parameter, &value).EnsureZstdSuccess();
|
||||
return value;
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
var dictReadOnlySpan = new ReadOnlySpan<byte>(dict);
|
||||
LoadDictionary(dictReadOnlySpan);
|
||||
}
|
||||
|
||||
public void LoadDictionary(ReadOnlySpan<byte> dict)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
fixed (byte* dictPtr = dict)
|
||||
Unsafe
|
||||
.Methods.ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint)dict.Length)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public Compressor(int level = DefaultCompressionLevel)
|
||||
{
|
||||
handle = SafeCctxHandle.Create();
|
||||
Level = level;
|
||||
}
|
||||
|
||||
public static int GetCompressBound(int length) =>
|
||||
(int)Unsafe.Methods.ZSTD_compressBound((nuint)length);
|
||||
|
||||
public static ulong GetCompressBoundLong(ulong length) =>
|
||||
Unsafe.Methods.ZSTD_compressBound((nuint)length);
|
||||
|
||||
public Span<byte> Wrap(ReadOnlySpan<byte> src)
|
||||
{
|
||||
var dest = new byte[GetCompressBound(src.Length)];
|
||||
var length = Wrap(src, dest);
|
||||
return new Span<byte>(dest, 0, length);
|
||||
}
|
||||
|
||||
public int Wrap(byte[] src, byte[] dest, int offset) =>
|
||||
Wrap(src, new Span<byte>(dest, offset, dest.Length - offset));
|
||||
|
||||
public int Wrap(ReadOnlySpan<byte> src, Span<byte> dest)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
return (int)
|
||||
Unsafe
|
||||
.Methods.ZSTD_compress2(
|
||||
cctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public int Wrap(ArraySegment<byte> src, ArraySegment<byte> dest) =>
|
||||
Wrap((ReadOnlySpan<byte>)src, dest);
|
||||
|
||||
public int Wrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength
|
||||
) =>
|
||||
Wrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength)
|
||||
);
|
||||
|
||||
public bool TryWrap(byte[] src, byte[] dest, int offset, out int written) =>
|
||||
TryWrap(src, new Span<byte>(dest, offset, dest.Length - offset), out written);
|
||||
|
||||
public bool TryWrap(ReadOnlySpan<byte> src, Span<byte> dest, out int written)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
nuint returnValue;
|
||||
using (var cctx = handle.Acquire())
|
||||
{
|
||||
returnValue = Unsafe.Methods.ZSTD_compress2(
|
||||
cctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
);
|
||||
}
|
||||
|
||||
if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))
|
||||
{
|
||||
written = default;
|
||||
return false;
|
||||
}
|
||||
|
||||
returnValue.EnsureZstdSuccess();
|
||||
written = (int)returnValue;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public bool TryWrap(ArraySegment<byte> src, ArraySegment<byte> dest, out int written) =>
|
||||
TryWrap((ReadOnlySpan<byte>)src, dest, out written);
|
||||
|
||||
public bool TryWrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength,
|
||||
out int written
|
||||
) =>
|
||||
TryWrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength),
|
||||
out written
|
||||
);
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
handle.Dispose();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
|
||||
internal nuint CompressStream(
|
||||
ref ZSTD_inBuffer_s input,
|
||||
ref ZSTD_outBuffer_s output,
|
||||
ZSTD_EndDirective directive
|
||||
)
|
||||
{
|
||||
fixed (ZSTD_inBuffer_s* inputPtr = &input)
|
||||
fixed (ZSTD_outBuffer_s* outputPtr = &output)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
return Unsafe
|
||||
.Methods.ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public void SetPledgedSrcSize(ulong pledgedSrcSize)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
Unsafe.Methods.ZSTD_CCtx_setPledgedSrcSize(cctx, pledgedSrcSize).EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
8
src/SharpCompress/Compressors/ZStandard/Constants.cs
Normal file
8
src/SharpCompress/Compressors/ZStandard/Constants.cs
Normal file
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
internal class Constants
|
||||
{
|
||||
//NOTE: https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element#remarks
|
||||
//NOTE: https://github.com/dotnet/runtime/blob/v5.0.0-rtm.20519.4/src/libraries/System.Private.CoreLib/src/System/Array.cs#L27
|
||||
public const ulong MaxByteArrayLength = 0x7FFFFFC7;
|
||||
}
|
||||
293
src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs
Normal file
293
src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs
Normal file
@@ -0,0 +1,293 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public class DecompressionStream : Stream
|
||||
{
|
||||
private readonly Stream innerStream;
|
||||
private readonly byte[] inputBuffer;
|
||||
private readonly int inputBufferSize;
|
||||
private readonly bool preserveDecompressor;
|
||||
private readonly bool leaveOpen;
|
||||
private readonly bool checkEndOfStream;
|
||||
private Decompressor? decompressor;
|
||||
private ZSTD_inBuffer_s input;
|
||||
private nuint lastDecompressResult = 0;
|
||||
private bool contextDrained = true;
|
||||
|
||||
public DecompressionStream(
|
||||
Stream stream,
|
||||
int bufferSize = 0,
|
||||
bool checkEndOfStream = true,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
: this(stream, new Decompressor(), bufferSize, checkEndOfStream, false, leaveOpen) { }
|
||||
|
||||
public DecompressionStream(
|
||||
Stream stream,
|
||||
Decompressor decompressor,
|
||||
int bufferSize = 0,
|
||||
bool checkEndOfStream = true,
|
||||
bool preserveDecompressor = true,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
{
|
||||
if (stream == null)
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
|
||||
if (!stream.CanRead)
|
||||
throw new ArgumentException("Stream is not readable", nameof(stream));
|
||||
|
||||
if (bufferSize < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(bufferSize));
|
||||
|
||||
innerStream = stream;
|
||||
this.decompressor = decompressor;
|
||||
this.preserveDecompressor = preserveDecompressor;
|
||||
this.leaveOpen = leaveOpen;
|
||||
this.checkEndOfStream = checkEndOfStream;
|
||||
|
||||
inputBufferSize =
|
||||
bufferSize > 0
|
||||
? bufferSize
|
||||
: (int)Unsafe.Methods.ZSTD_DStreamInSize().EnsureZstdSuccess();
|
||||
inputBuffer = ArrayPool<byte>.Shared.Rent(inputBufferSize);
|
||||
input = new ZSTD_inBuffer_s { pos = (nuint)inputBufferSize, size = (nuint)inputBufferSize };
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_dParameter parameter, int value)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
decompressor.NotNull().SetParameter(parameter, value);
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_dParameter parameter)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
return decompressor.NotNull().GetParameter(parameter);
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
decompressor.NotNull().LoadDictionary(dict);
|
||||
}
|
||||
|
||||
~DecompressionStream() => Dispose(false);
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (decompressor == null)
|
||||
return;
|
||||
|
||||
if (!preserveDecompressor)
|
||||
{
|
||||
decompressor.Dispose();
|
||||
}
|
||||
decompressor = null;
|
||||
|
||||
if (inputBuffer != null)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(inputBuffer);
|
||||
}
|
||||
|
||||
if (!leaveOpen)
|
||||
{
|
||||
innerStream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) =>
|
||||
Read(new Span<byte>(buffer, offset, count));
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override int Read(Span<byte> buffer)
|
||||
#else
|
||||
public int Read(Span<byte> buffer)
|
||||
#endif
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
// Guard against infinite loop (output.pos would never become non-zero)
|
||||
if (buffer.Length == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length };
|
||||
while (true)
|
||||
{
|
||||
// If there is still input available, or there might be data buffered in the decompressor context, flush that out
|
||||
while (input.pos < input.size || !contextDrained)
|
||||
{
|
||||
nuint oldInputPos = input.pos;
|
||||
nuint result = DecompressStream(ref output, buffer);
|
||||
if (output.pos > 0 || oldInputPos != input.pos)
|
||||
{
|
||||
// Keep result from last decompress call that made some progress, so we known if we're at end of frame
|
||||
lastDecompressResult = result;
|
||||
}
|
||||
// If decompression filled the output buffer, there might still be data buffered in the decompressor context
|
||||
contextDrained = output.pos < output.size;
|
||||
// If we have data to return, return it immediately, so we won't stall on Read
|
||||
if (output.pos > 0)
|
||||
{
|
||||
return (int)output.pos;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, read some more input
|
||||
int bytesRead;
|
||||
if ((bytesRead = innerStream.Read(inputBuffer, 0, inputBufferSize)) == 0)
|
||||
{
|
||||
if (checkEndOfStream && lastDecompressResult != 0)
|
||||
{
|
||||
throw new EndOfStreamException("Premature end of stream");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
input.size = (nuint)bytesRead;
|
||||
input.pos = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => ReadAsync(new Memory<byte>(buffer, offset, count), cancellationToken).AsTask();
|
||||
|
||||
public override async ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#else
|
||||
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => ReadAsync(new Memory<byte>(buffer, offset, count), cancellationToken);
|
||||
|
||||
public async Task<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#endif
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
// Guard against infinite loop (output.pos would never become non-zero)
|
||||
if (buffer.Length == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length };
|
||||
while (true)
|
||||
{
|
||||
// If there is still input available, or there might be data buffered in the decompressor context, flush that out
|
||||
while (input.pos < input.size || !contextDrained)
|
||||
{
|
||||
nuint oldInputPos = input.pos;
|
||||
nuint result = DecompressStream(ref output, buffer.Span);
|
||||
if (output.pos > 0 || oldInputPos != input.pos)
|
||||
{
|
||||
// Keep result from last decompress call that made some progress, so we known if we're at end of frame
|
||||
lastDecompressResult = result;
|
||||
}
|
||||
// If decompression filled the output buffer, there might still be data buffered in the decompressor context
|
||||
contextDrained = output.pos < output.size;
|
||||
// If we have data to return, return it immediately, so we won't stall on Read
|
||||
if (output.pos > 0)
|
||||
{
|
||||
return (int)output.pos;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, read some more input
|
||||
int bytesRead;
|
||||
if (
|
||||
(
|
||||
bytesRead = await innerStream
|
||||
.ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken)
|
||||
.ConfigureAwait(false)
|
||||
) == 0
|
||||
)
|
||||
{
|
||||
if (checkEndOfStream && lastDecompressResult != 0)
|
||||
{
|
||||
throw new EndOfStreamException("Premature end of stream");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
input.size = (nuint)bytesRead;
|
||||
input.pos = 0;
|
||||
}
|
||||
}
|
||||
|
||||
private unsafe nuint DecompressStream(ref ZSTD_outBuffer_s output, Span<byte> outputBuffer)
|
||||
{
|
||||
fixed (byte* inputBufferPtr = inputBuffer)
|
||||
fixed (byte* outputBufferPtr = outputBuffer)
|
||||
{
|
||||
input.src = inputBufferPtr;
|
||||
output.dst = outputBufferPtr;
|
||||
return decompressor.NotNull().DecompressStream(ref input, ref output);
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotSupportedException();
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (decompressor == null)
|
||||
throw new ObjectDisposedException(nameof(DecompressionStream));
|
||||
}
|
||||
|
||||
#if NETSTANDARD2_0 || NETFRAMEWORK
|
||||
public virtual Task DisposeAsync()
|
||||
{
|
||||
try
|
||||
{
|
||||
Dispose();
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
catch (Exception exc)
|
||||
{
|
||||
return Task.FromException(exc);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
176
src/SharpCompress/Compressors/ZStandard/Decompressor.cs
Normal file
176
src/SharpCompress/Compressors/ZStandard/Decompressor.cs
Normal file
@@ -0,0 +1,176 @@
|
||||
using System;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public unsafe class Decompressor : IDisposable
|
||||
{
|
||||
private readonly SafeDctxHandle handle;
|
||||
|
||||
public Decompressor()
|
||||
{
|
||||
handle = SafeDctxHandle.Create();
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_dParameter parameter, int value)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
Unsafe.Methods.ZSTD_DCtx_setParameter(dctx, parameter, value).EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_dParameter parameter)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
int value;
|
||||
Unsafe.Methods.ZSTD_DCtx_getParameter(dctx, parameter, &value).EnsureZstdSuccess();
|
||||
return value;
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
var dictReadOnlySpan = new ReadOnlySpan<byte>(dict);
|
||||
this.LoadDictionary(dictReadOnlySpan);
|
||||
}
|
||||
|
||||
public void LoadDictionary(ReadOnlySpan<byte> dict)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
fixed (byte* dictPtr = dict)
|
||||
Unsafe
|
||||
.Methods.ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint)dict.Length)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public static ulong GetDecompressedSize(ReadOnlySpan<byte> src)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
return Unsafe
|
||||
.Methods.ZSTD_decompressBound(srcPtr, (nuint)src.Length)
|
||||
.EnsureContentSizeOk();
|
||||
}
|
||||
|
||||
public static ulong GetDecompressedSize(ArraySegment<byte> src) =>
|
||||
GetDecompressedSize((ReadOnlySpan<byte>)src);
|
||||
|
||||
public static ulong GetDecompressedSize(byte[] src, int srcOffset, int srcLength) =>
|
||||
GetDecompressedSize(new ReadOnlySpan<byte>(src, srcOffset, srcLength));
|
||||
|
||||
public Span<byte> Unwrap(ReadOnlySpan<byte> src, int maxDecompressedSize = int.MaxValue)
|
||||
{
|
||||
var expectedDstSize = GetDecompressedSize(src);
|
||||
if (expectedDstSize > (ulong)maxDecompressedSize)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall,
|
||||
$"Decompressed content size {expectedDstSize} is greater than {nameof(maxDecompressedSize)} {maxDecompressedSize}"
|
||||
);
|
||||
if (expectedDstSize > Constants.MaxByteArrayLength)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall,
|
||||
$"Decompressed content size {expectedDstSize} is greater than max possible byte array size {Constants.MaxByteArrayLength}"
|
||||
);
|
||||
|
||||
var dest = new byte[expectedDstSize];
|
||||
var length = Unwrap(src, dest);
|
||||
return new Span<byte>(dest, 0, length);
|
||||
}
|
||||
|
||||
public int Unwrap(byte[] src, byte[] dest, int offset) =>
|
||||
Unwrap(src, new Span<byte>(dest, offset, dest.Length - offset));
|
||||
|
||||
public int Unwrap(ReadOnlySpan<byte> src, Span<byte> dest)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
return (int)
|
||||
Unsafe
|
||||
.Methods.ZSTD_decompressDCtx(
|
||||
dctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public int Unwrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength
|
||||
) =>
|
||||
Unwrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength)
|
||||
);
|
||||
|
||||
public bool TryUnwrap(byte[] src, byte[] dest, int offset, out int written) =>
|
||||
TryUnwrap(src, new Span<byte>(dest, offset, dest.Length - offset), out written);
|
||||
|
||||
public bool TryUnwrap(ReadOnlySpan<byte> src, Span<byte> dest, out int written)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
nuint returnValue;
|
||||
using (var dctx = handle.Acquire())
|
||||
{
|
||||
returnValue = Unsafe.Methods.ZSTD_decompressDCtx(
|
||||
dctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
);
|
||||
}
|
||||
|
||||
if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))
|
||||
{
|
||||
written = default;
|
||||
return false;
|
||||
}
|
||||
|
||||
returnValue.EnsureZstdSuccess();
|
||||
written = (int)returnValue;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public bool TryUnwrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength,
|
||||
out int written
|
||||
) =>
|
||||
TryUnwrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength),
|
||||
out written
|
||||
);
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
handle.Dispose();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
|
||||
internal nuint DecompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s output)
|
||||
{
|
||||
fixed (ZSTD_inBuffer_s* inputPtr = &input)
|
||||
fixed (ZSTD_outBuffer_s* outputPtr = &output)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
return Unsafe
|
||||
.Methods.ZSTD_decompressStream(dctx, outputPtr, inputPtr)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
}
|
||||
141
src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs
Normal file
141
src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs
Normal file
@@ -0,0 +1,141 @@
|
||||
using System;
|
||||
using System.Collections.Concurrent;
|
||||
using System.Collections.Generic;
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
internal unsafe class JobThreadPool : IDisposable
|
||||
{
|
||||
private int numThreads;
|
||||
private readonly List<JobThread> threads;
|
||||
private readonly BlockingCollection<Job> queue;
|
||||
|
||||
private struct Job
|
||||
{
|
||||
public void* function;
|
||||
public void* opaque;
|
||||
}
|
||||
|
||||
private class JobThread
|
||||
{
|
||||
private Thread Thread { get; }
|
||||
public CancellationTokenSource CancellationTokenSource { get; }
|
||||
|
||||
public JobThread(Thread thread)
|
||||
{
|
||||
CancellationTokenSource = new CancellationTokenSource();
|
||||
Thread = thread;
|
||||
}
|
||||
|
||||
public void Start()
|
||||
{
|
||||
Thread.Start(this);
|
||||
}
|
||||
|
||||
public void Cancel()
|
||||
{
|
||||
CancellationTokenSource.Cancel();
|
||||
}
|
||||
|
||||
public void Join()
|
||||
{
|
||||
Thread.Join();
|
||||
}
|
||||
}
|
||||
|
||||
private void Worker(object? obj)
|
||||
{
|
||||
if (obj is not JobThread poolThread)
|
||||
return;
|
||||
|
||||
var cancellationToken = poolThread.CancellationTokenSource.Token;
|
||||
while (!queue.IsCompleted && !cancellationToken.IsCancellationRequested)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (queue.TryTake(out var job, -1, cancellationToken))
|
||||
((delegate* managed<void*, void>)job.function)(job.opaque);
|
||||
}
|
||||
catch (InvalidOperationException) { }
|
||||
catch (OperationCanceledException) { }
|
||||
}
|
||||
}
|
||||
|
||||
public JobThreadPool(int num, int queueSize)
|
||||
{
|
||||
numThreads = num;
|
||||
queue = new BlockingCollection<Job>(queueSize + 1);
|
||||
threads = new List<JobThread>(num);
|
||||
for (var i = 0; i < numThreads; i++)
|
||||
CreateThread();
|
||||
}
|
||||
|
||||
private void CreateThread()
|
||||
{
|
||||
var poolThread = new JobThread(new Thread(Worker));
|
||||
threads.Add(poolThread);
|
||||
poolThread.Start();
|
||||
}
|
||||
|
||||
public void Resize(int num)
|
||||
{
|
||||
lock (threads)
|
||||
{
|
||||
if (num < numThreads)
|
||||
{
|
||||
for (var i = numThreads - 1; i >= num; i--)
|
||||
{
|
||||
threads[i].Cancel();
|
||||
threads.RemoveAt(i);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (var i = numThreads; i < num; i++)
|
||||
CreateThread();
|
||||
}
|
||||
}
|
||||
|
||||
numThreads = num;
|
||||
}
|
||||
|
||||
public void Add(void* function, void* opaque)
|
||||
{
|
||||
queue.Add(new Job { function = function, opaque = opaque });
|
||||
}
|
||||
|
||||
public bool TryAdd(void* function, void* opaque)
|
||||
{
|
||||
return queue.TryAdd(new Job { function = function, opaque = opaque });
|
||||
}
|
||||
|
||||
public void Join(bool cancel = true)
|
||||
{
|
||||
queue.CompleteAdding();
|
||||
List<JobThread> jobThreads;
|
||||
lock (threads)
|
||||
jobThreads = new List<JobThread>(threads);
|
||||
|
||||
if (cancel)
|
||||
{
|
||||
foreach (var thread in jobThreads)
|
||||
thread.Cancel();
|
||||
}
|
||||
|
||||
foreach (var thread in jobThreads)
|
||||
thread.Join();
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
queue.Dispose();
|
||||
}
|
||||
|
||||
public int Size()
|
||||
{
|
||||
// todo not implemented
|
||||
// https://github.com/dotnet/runtime/issues/24200
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
163
src/SharpCompress/Compressors/ZStandard/SafeHandles.cs
Normal file
163
src/SharpCompress/Compressors/ZStandard/SafeHandles.cs
Normal file
@@ -0,0 +1,163 @@
|
||||
using System;
|
||||
using System.Runtime.InteropServices;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
/// <summary>
|
||||
/// Provides the base class for ZstdSharp <see cref="SafeHandle"/> implementations.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Even though ZstdSharp is a managed library, its internals are using unmanaged
|
||||
/// memory and we are using safe handles in the library's high-level API to ensure
|
||||
/// proper disposal of unmanaged resources and increase safety.
|
||||
/// </remarks>
|
||||
/// <seealso cref="SafeCctxHandle"/>
|
||||
/// <seealso cref="SafeDctxHandle"/>
|
||||
internal abstract unsafe class SafeZstdHandle : SafeHandle
|
||||
{
|
||||
/// <summary>
|
||||
/// Parameterless constructor is hidden. Use the static <c>Create</c> factory
|
||||
/// method to create a new safe handle instance.
|
||||
/// </summary>
|
||||
protected SafeZstdHandle()
|
||||
: base(IntPtr.Zero, true) { }
|
||||
|
||||
public sealed override bool IsInvalid => handle == IntPtr.Zero;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Safely wraps an unmanaged Zstd compression context.
|
||||
/// </summary>
|
||||
internal sealed unsafe class SafeCctxHandle : SafeZstdHandle
|
||||
{
|
||||
/// <inheritdoc/>
|
||||
private SafeCctxHandle() { }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new instance of <see cref="SafeCctxHandle"/>.
|
||||
/// </summary>
|
||||
/// <returns></returns>
|
||||
/// <exception cref="ZstdException">Creation failed.</exception>
|
||||
public static SafeCctxHandle Create()
|
||||
{
|
||||
var safeHandle = new SafeCctxHandle();
|
||||
bool success = false;
|
||||
try
|
||||
{
|
||||
var cctx = Unsafe.Methods.ZSTD_createCCtx();
|
||||
if (cctx == null)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create cctx");
|
||||
safeHandle.SetHandle((IntPtr)cctx);
|
||||
success = true;
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (!success)
|
||||
{
|
||||
safeHandle.SetHandleAsInvalid();
|
||||
}
|
||||
}
|
||||
return safeHandle;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Acquires a reference to the safe handle.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// A <see cref="SafeHandleHolder{T}"/> instance that can be implicitly converted to a pointer
|
||||
/// to <see cref="ZSTD_CCtx_s"/>.
|
||||
/// </returns>
|
||||
public SafeHandleHolder<ZSTD_CCtx_s> Acquire() => new(this);
|
||||
|
||||
protected override bool ReleaseHandle()
|
||||
{
|
||||
return Unsafe.Methods.ZSTD_freeCCtx((ZSTD_CCtx_s*)handle) == 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Safely wraps an unmanaged Zstd compression context.
|
||||
/// </summary>
|
||||
internal sealed unsafe class SafeDctxHandle : SafeZstdHandle
|
||||
{
|
||||
/// <inheritdoc/>
|
||||
private SafeDctxHandle() { }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new instance of <see cref="SafeDctxHandle"/>.
|
||||
/// </summary>
|
||||
/// <returns></returns>
|
||||
/// <exception cref="ZstdException">Creation failed.</exception>
|
||||
public static SafeDctxHandle Create()
|
||||
{
|
||||
var safeHandle = new SafeDctxHandle();
|
||||
bool success = false;
|
||||
try
|
||||
{
|
||||
var dctx = Unsafe.Methods.ZSTD_createDCtx();
|
||||
if (dctx == null)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create dctx");
|
||||
safeHandle.SetHandle((IntPtr)dctx);
|
||||
success = true;
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (!success)
|
||||
{
|
||||
safeHandle.SetHandleAsInvalid();
|
||||
}
|
||||
}
|
||||
return safeHandle;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Acquires a reference to the safe handle.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// A <see cref="SafeHandleHolder{T}"/> instance that can be implicitly converted to a pointer
|
||||
/// to <see cref="ZSTD_DCtx_s"/>.
|
||||
/// </returns>
|
||||
public SafeHandleHolder<ZSTD_DCtx_s> Acquire() => new(this);
|
||||
|
||||
protected override bool ReleaseHandle()
|
||||
{
|
||||
return Unsafe.Methods.ZSTD_freeDCtx((ZSTD_DCtx_s*)handle) == 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Provides a convenient interface to safely acquire pointers of a specific type
|
||||
/// from a <see cref="SafeHandle"/>, by utilizing <see langword="using"/> blocks.
|
||||
/// </summary>
|
||||
/// <typeparam name="T">The type of pointers to return.</typeparam>
|
||||
/// <remarks>
|
||||
/// Safe handle holders can be <see cref="Dispose"/>d to decrement the safe handle's
|
||||
/// reference count, and can be implicitly converted to pointers to <see cref="T"/>.
|
||||
/// </remarks>
|
||||
internal unsafe ref struct SafeHandleHolder<T>
|
||||
where T : unmanaged
|
||||
{
|
||||
private readonly SafeHandle _handle;
|
||||
|
||||
private bool _refAdded;
|
||||
|
||||
public SafeHandleHolder(SafeHandle safeHandle)
|
||||
{
|
||||
_handle = safeHandle;
|
||||
_refAdded = false;
|
||||
safeHandle.DangerousAddRef(ref _refAdded);
|
||||
}
|
||||
|
||||
public static implicit operator T*(SafeHandleHolder<T> holder) =>
|
||||
(T*)holder._handle.DangerousGetHandle();
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (_refAdded)
|
||||
{
|
||||
_handle.DangerousRelease();
|
||||
_refAdded = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
internal static unsafe class SynchronizationWrapper
|
||||
{
|
||||
private static object UnwrapObject(void** obj) => UnmanagedObject.Unwrap<object>(*obj);
|
||||
|
||||
public static void Init(void** obj) => *obj = UnmanagedObject.Wrap(new object());
|
||||
|
||||
public static void Free(void** obj) => UnmanagedObject.Free(*obj);
|
||||
|
||||
public static void Enter(void** obj) => Monitor.Enter(UnwrapObject(obj));
|
||||
|
||||
public static void Exit(void** obj) => Monitor.Exit(UnwrapObject(obj));
|
||||
|
||||
public static void Pulse(void** obj) => Monitor.Pulse(UnwrapObject(obj));
|
||||
|
||||
public static void PulseAll(void** obj) => Monitor.PulseAll(UnwrapObject(obj));
|
||||
|
||||
public static void Wait(void** mutex) => Monitor.Wait(UnwrapObject(mutex));
|
||||
}
|
||||
48
src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs
Normal file
48
src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs
Normal file
@@ -0,0 +1,48 @@
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public static unsafe class ThrowHelper
|
||||
{
|
||||
private const ulong ZSTD_CONTENTSIZE_UNKNOWN = unchecked(0UL - 1);
|
||||
private const ulong ZSTD_CONTENTSIZE_ERROR = unchecked(0UL - 2);
|
||||
|
||||
public static nuint EnsureZstdSuccess(this nuint returnValue)
|
||||
{
|
||||
if (Unsafe.Methods.ZSTD_isError(returnValue))
|
||||
ThrowException(returnValue, Unsafe.Methods.ZSTD_getErrorName(returnValue));
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
public static nuint EnsureZdictSuccess(this nuint returnValue)
|
||||
{
|
||||
if (Unsafe.Methods.ZDICT_isError(returnValue))
|
||||
ThrowException(returnValue, Unsafe.Methods.ZDICT_getErrorName(returnValue));
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
public static ulong EnsureContentSizeOk(this ulong returnValue)
|
||||
{
|
||||
if (returnValue == ZSTD_CONTENTSIZE_UNKNOWN)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_GENERIC,
|
||||
"Decompressed content size is not specified"
|
||||
);
|
||||
|
||||
if (returnValue == ZSTD_CONTENTSIZE_ERROR)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_GENERIC,
|
||||
"Decompressed content size cannot be determined (e.g. invalid magic number, srcSize too small)"
|
||||
);
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
private static void ThrowException(nuint returnValue, string message)
|
||||
{
|
||||
var code = 0 - returnValue;
|
||||
throw new ZstdException((ZSTD_ErrorCode)code, message);
|
||||
}
|
||||
}
|
||||
18
src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs
Normal file
18
src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs
Normal file
@@ -0,0 +1,18 @@
|
||||
using System;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
/*
|
||||
* Wrap object to void* to make it unmanaged
|
||||
*/
|
||||
internal static unsafe class UnmanagedObject
|
||||
{
|
||||
public static void* Wrap(object obj) => (void*)GCHandle.ToIntPtr(GCHandle.Alloc(obj));
|
||||
|
||||
private static GCHandle UnwrapGcHandle(void* value) => GCHandle.FromIntPtr((IntPtr)value);
|
||||
|
||||
public static T Unwrap<T>(void* value) => (T)UnwrapGcHandle(value).Target!;
|
||||
|
||||
public static void Free(void* value) => UnwrapGcHandle(value).Free();
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/* custom memory allocation functions */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void* ZSTD_customMalloc(nuint size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc != null)
|
||||
return ((delegate* managed<void*, nuint, void*>)customMem.customAlloc)(
|
||||
customMem.opaque,
|
||||
size
|
||||
);
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void* ZSTD_customCalloc(nuint size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc != null)
|
||||
{
|
||||
/* calloc implemented as malloc+memset;
|
||||
* not as efficient as calloc, but next best guess for custom malloc */
|
||||
void* ptr = ((delegate* managed<void*, nuint, void*>)customMem.customAlloc)(
|
||||
customMem.opaque,
|
||||
size
|
||||
);
|
||||
memset(ptr, 0, (uint)size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
return calloc(1, size);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
|
||||
{
|
||||
if (ptr != null)
|
||||
{
|
||||
if (customMem.customFree != null)
|
||||
((delegate* managed<void*, void*, void>)customMem.customFree)(
|
||||
customMem.opaque,
|
||||
ptr
|
||||
);
|
||||
else
|
||||
free(ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* bitStream can mix input from multiple sources.
|
||||
* A critical property of these streams is that they encode and decode in **reverse** direction.
|
||||
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
|
||||
*/
|
||||
public unsafe struct BIT_CStream_t
|
||||
{
|
||||
public nuint bitContainer;
|
||||
public uint bitPos;
|
||||
public sbyte* startPtr;
|
||||
public sbyte* ptr;
|
||||
public sbyte* endPtr;
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public enum BIT_DStream_status
|
||||
{
|
||||
/* fully refilled */
|
||||
BIT_DStream_unfinished = 0,
|
||||
|
||||
/* still some bits left in bitstream */
|
||||
BIT_DStream_endOfBuffer = 1,
|
||||
|
||||
/* bitstream entirely consumed, bit-exact */
|
||||
BIT_DStream_completed = 2,
|
||||
|
||||
/* user requested more bits than present in bitstream */
|
||||
BIT_DStream_overflow = 3,
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-********************************************
|
||||
* bitStream decoding API (read backward)
|
||||
**********************************************/
|
||||
public unsafe struct BIT_DStream_t
|
||||
{
|
||||
public nuint bitContainer;
|
||||
public uint bitsConsumed;
|
||||
public sbyte* ptr;
|
||||
public sbyte* start;
|
||||
public sbyte* limitPtr;
|
||||
}
|
||||
60
src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs
Normal file
60
src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs
Normal file
@@ -0,0 +1,60 @@
|
||||
using System;
|
||||
using System.Numerics;
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countTrailingZeros32(uint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.TrailingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countLeadingZeros32(uint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.LeadingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countTrailingZeros64(ulong val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.TrailingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countLeadingZeros64(ulong val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.LeadingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_NbCommonBytes(nuint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
if (BitConverter.IsLittleEndian)
|
||||
{
|
||||
return MEM_64bits
|
||||
? (uint)BitOperations.TrailingZeroCount(val) >> 3
|
||||
: (uint)BitOperations.TrailingZeroCount((uint)val) >> 3;
|
||||
}
|
||||
|
||||
return MEM_64bits
|
||||
? (uint)BitOperations.LeadingZeroCount(val) >> 3
|
||||
: (uint)BitOperations.LeadingZeroCount((uint)val) >> 3;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_highbit32(uint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.Log2(val);
|
||||
}
|
||||
}
|
||||
739
src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs
Normal file
739
src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs
Normal file
@@ -0,0 +1,739 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
#if NETCOREAPP3_0_OR_GREATER
|
||||
using System.Runtime.Intrinsics.X86;
|
||||
#endif
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
#if NET7_0_OR_GREATER
|
||||
private static ReadOnlySpan<uint> Span_BIT_mask =>
|
||||
new uint[32]
|
||||
{
|
||||
0,
|
||||
1,
|
||||
3,
|
||||
7,
|
||||
0xF,
|
||||
0x1F,
|
||||
0x3F,
|
||||
0x7F,
|
||||
0xFF,
|
||||
0x1FF,
|
||||
0x3FF,
|
||||
0x7FF,
|
||||
0xFFF,
|
||||
0x1FFF,
|
||||
0x3FFF,
|
||||
0x7FFF,
|
||||
0xFFFF,
|
||||
0x1FFFF,
|
||||
0x3FFFF,
|
||||
0x7FFFF,
|
||||
0xFFFFF,
|
||||
0x1FFFFF,
|
||||
0x3FFFFF,
|
||||
0x7FFFFF,
|
||||
0xFFFFFF,
|
||||
0x1FFFFFF,
|
||||
0x3FFFFFF,
|
||||
0x7FFFFFF,
|
||||
0xFFFFFFF,
|
||||
0x1FFFFFFF,
|
||||
0x3FFFFFFF,
|
||||
0x7FFFFFFF,
|
||||
};
|
||||
private static uint* BIT_mask =>
|
||||
(uint*)
|
||||
System.Runtime.CompilerServices.Unsafe.AsPointer(
|
||||
ref MemoryMarshal.GetReference(Span_BIT_mask)
|
||||
);
|
||||
#else
|
||||
|
||||
private static readonly uint* BIT_mask = GetArrayPointer(
|
||||
new uint[32]
|
||||
{
|
||||
0,
|
||||
1,
|
||||
3,
|
||||
7,
|
||||
0xF,
|
||||
0x1F,
|
||||
0x3F,
|
||||
0x7F,
|
||||
0xFF,
|
||||
0x1FF,
|
||||
0x3FF,
|
||||
0x7FF,
|
||||
0xFFF,
|
||||
0x1FFF,
|
||||
0x3FFF,
|
||||
0x7FFF,
|
||||
0xFFFF,
|
||||
0x1FFFF,
|
||||
0x3FFFF,
|
||||
0x7FFFF,
|
||||
0xFFFFF,
|
||||
0x1FFFFF,
|
||||
0x3FFFFF,
|
||||
0x7FFFFF,
|
||||
0xFFFFFF,
|
||||
0x1FFFFFF,
|
||||
0x3FFFFFF,
|
||||
0x7FFFFFF,
|
||||
0xFFFFFFF,
|
||||
0x1FFFFFFF,
|
||||
0x3FFFFFFF,
|
||||
0x7FFFFFFF,
|
||||
}
|
||||
);
|
||||
#endif
|
||||
/*-**************************************************************
|
||||
* bitStream encoding
|
||||
****************************************************************/
|
||||
/*! BIT_initCStream() :
|
||||
* `dstCapacity` must be > sizeof(size_t)
|
||||
* @return : 0 if success,
|
||||
* otherwise an error code (can be tested using ERR_isError()) */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_initCStream(ref BIT_CStream_t bitC, void* startPtr, nuint dstCapacity)
|
||||
{
|
||||
bitC.bitContainer = 0;
|
||||
bitC.bitPos = 0;
|
||||
bitC.startPtr = (sbyte*)startPtr;
|
||||
bitC.ptr = bitC.startPtr;
|
||||
bitC.endPtr = bitC.startPtr + dstCapacity - sizeof(nuint);
|
||||
if (dstCapacity <= (nuint)sizeof(nuint))
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
return 0;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getLowerBits(nuint bitContainer, uint nbBits)
|
||||
{
|
||||
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
|
||||
#if NETCOREAPP3_1_OR_GREATER
|
||||
if (Bmi2.X64.IsSupported)
|
||||
{
|
||||
return (nuint)Bmi2.X64.ZeroHighBits(bitContainer, nbBits);
|
||||
}
|
||||
|
||||
if (Bmi2.IsSupported)
|
||||
{
|
||||
return Bmi2.ZeroHighBits((uint)bitContainer, nbBits);
|
||||
}
|
||||
#endif
|
||||
|
||||
return bitContainer & BIT_mask[nbBits];
|
||||
}
|
||||
|
||||
/*! BIT_addBits() :
|
||||
* can add up to 31 bits into `bitC`.
|
||||
* Note : does not check for register overflow ! */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_addBits(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
nuint value,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
|
||||
assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
bitC_bitContainer |= BIT_getLowerBits(value, nbBits) << (int)bitC_bitPos;
|
||||
bitC_bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_addBitsFast() :
|
||||
* works only if `value` is _clean_,
|
||||
* meaning all high bits above nbBits are 0 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_addBitsFast(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
nuint value,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
assert(value >> (int)nbBits == 0);
|
||||
assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
bitC_bitContainer |= value << (int)bitC_bitPos;
|
||||
bitC_bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_flushBitsFast() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* unsafe version; does not check buffer overflow */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_flushBitsFast(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
ref sbyte* bitC_ptr,
|
||||
sbyte* bitC_endPtr
|
||||
)
|
||||
{
|
||||
nuint nbBytes = bitC_bitPos >> 3;
|
||||
assert(bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
assert(bitC_ptr <= bitC_endPtr);
|
||||
MEM_writeLEST(bitC_ptr, bitC_bitContainer);
|
||||
bitC_ptr += nbBytes;
|
||||
bitC_bitPos &= 7;
|
||||
bitC_bitContainer >>= (int)(nbBytes * 8);
|
||||
}
|
||||
|
||||
/*! BIT_flushBits() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* safe version; check for buffer overflow, and prevents it.
|
||||
* note : does not signal buffer overflow.
|
||||
* overflow will be revealed later on using BIT_closeCStream() */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_flushBits(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
ref sbyte* bitC_ptr,
|
||||
sbyte* bitC_endPtr
|
||||
)
|
||||
{
|
||||
nuint nbBytes = bitC_bitPos >> 3;
|
||||
assert(bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
assert(bitC_ptr <= bitC_endPtr);
|
||||
MEM_writeLEST(bitC_ptr, bitC_bitContainer);
|
||||
bitC_ptr += nbBytes;
|
||||
if (bitC_ptr > bitC_endPtr)
|
||||
bitC_ptr = bitC_endPtr;
|
||||
bitC_bitPos &= 7;
|
||||
bitC_bitContainer >>= (int)(nbBytes * 8);
|
||||
}
|
||||
|
||||
/*! BIT_closeCStream() :
|
||||
* @return : size of CStream, in bytes,
|
||||
* or 0 if it could not fit into dstBuffer */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_closeCStream(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
sbyte* bitC_ptr,
|
||||
sbyte* bitC_endPtr,
|
||||
sbyte* bitC_startPtr
|
||||
)
|
||||
{
|
||||
BIT_addBitsFast(ref bitC_bitContainer, ref bitC_bitPos, 1, 1);
|
||||
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
|
||||
if (bitC_ptr >= bitC_endPtr)
|
||||
return 0;
|
||||
return (nuint)(bitC_ptr - bitC_startPtr) + (nuint)(bitC_bitPos > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint srcSize)
|
||||
{
|
||||
if (srcSize < 1)
|
||||
{
|
||||
*bitD = new BIT_DStream_t();
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
bitD->start = (sbyte*)srcBuffer;
|
||||
bitD->limitPtr = bitD->start + sizeof(nuint);
|
||||
if (srcSize >= (nuint)sizeof(nuint))
|
||||
{
|
||||
bitD->ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint);
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bitD->ptr = bitD->start;
|
||||
bitD->bitContainer = *(byte*)bitD->start;
|
||||
switch (srcSize)
|
||||
{
|
||||
case 7:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16;
|
||||
goto case 6;
|
||||
case 6:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24;
|
||||
goto case 5;
|
||||
case 5:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32;
|
||||
goto case 4;
|
||||
case 4:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[3] << 24;
|
||||
goto case 3;
|
||||
case 3:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[2] << 16;
|
||||
goto case 2;
|
||||
case 2:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[1] << 8;
|
||||
goto default;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
}
|
||||
|
||||
bitD->bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8;
|
||||
}
|
||||
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getUpperBits(nuint bitContainer, uint start)
|
||||
{
|
||||
return bitContainer >> (int)start;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getMiddleBits(nuint bitContainer, uint start, uint nbBits)
|
||||
{
|
||||
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
|
||||
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
|
||||
#if NETCOREAPP3_1_OR_GREATER
|
||||
if (Bmi2.X64.IsSupported)
|
||||
{
|
||||
return (nuint)Bmi2.X64.ZeroHighBits(bitContainer >> (int)(start & regMask), nbBits);
|
||||
}
|
||||
|
||||
if (Bmi2.IsSupported)
|
||||
{
|
||||
return Bmi2.ZeroHighBits((uint)(bitContainer >> (int)(start & regMask)), nbBits);
|
||||
}
|
||||
#endif
|
||||
|
||||
return (nuint)(bitContainer >> (int)(start & regMask) & ((ulong)1 << (int)nbBits) - 1);
|
||||
}
|
||||
|
||||
/*! BIT_lookBits() :
|
||||
* Provides next n bits from local register.
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
return BIT_getMiddleBits(
|
||||
bitD->bitContainer,
|
||||
(uint)(sizeof(nuint) * 8) - bitD->bitsConsumed - nbBits,
|
||||
nbBits
|
||||
);
|
||||
}
|
||||
|
||||
/*! BIT_lookBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBitsFast(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
|
||||
assert(nbBits >= 1);
|
||||
return bitD->bitContainer
|
||||
<< (int)(bitD->bitsConsumed & regMask)
|
||||
>> (int)(regMask + 1 - nbBits & regMask);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_skipBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
bitD->bitsConsumed += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value. */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
nuint value = BIT_lookBits(bitD, nbBits);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBitsFast(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
nuint value = BIT_lookBitsFast(bitD, nbBits);
|
||||
assert(nbBits >= 1);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream_internal() :
|
||||
* Simple variant of BIT_reloadDStream(), with two conditions:
|
||||
* 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
|
||||
* 2. look window is valid after shifted down : bitD->ptr >= bitD->start
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD)
|
||||
{
|
||||
assert(bitD->bitsConsumed <= (uint)(sizeof(nuint) * 8));
|
||||
bitD->ptr -= bitD->bitsConsumed >> 3;
|
||||
assert(bitD->ptr >= bitD->start);
|
||||
bitD->bitsConsumed &= 7;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
return BIT_DStream_status.BIT_DStream_unfinished;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStreamFast() :
|
||||
* Similar to BIT_reloadDStream(), but with two differences:
|
||||
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
|
||||
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
|
||||
* point you must use BIT_reloadDStream() to reload.
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->ptr < bitD->limitPtr)
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
return BIT_reloadDStream_internal(bitD);
|
||||
}
|
||||
|
||||
#if NET7_0_OR_GREATER
|
||||
private static ReadOnlySpan<byte> Span_static_zeroFilled =>
|
||||
new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 };
|
||||
private static nuint* static_zeroFilled =>
|
||||
(nuint*)
|
||||
System.Runtime.CompilerServices.Unsafe.AsPointer(
|
||||
ref MemoryMarshal.GetReference(Span_static_zeroFilled)
|
||||
);
|
||||
#else
|
||||
|
||||
private static readonly nuint* static_zeroFilled = (nuint*)GetArrayPointer(
|
||||
new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }
|
||||
);
|
||||
#endif
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not never beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->bitsConsumed > (uint)(sizeof(nuint) * 8))
|
||||
{
|
||||
bitD->ptr = (sbyte*)&static_zeroFilled[0];
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
}
|
||||
|
||||
assert(bitD->ptr >= bitD->start);
|
||||
if (bitD->ptr >= bitD->limitPtr)
|
||||
{
|
||||
return BIT_reloadDStream_internal(bitD);
|
||||
}
|
||||
|
||||
if (bitD->ptr == bitD->start)
|
||||
{
|
||||
if (bitD->bitsConsumed < (uint)(sizeof(nuint) * 8))
|
||||
return BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
return BIT_DStream_status.BIT_DStream_completed;
|
||||
}
|
||||
|
||||
{
|
||||
uint nbBytes = bitD->bitsConsumed >> 3;
|
||||
BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished;
|
||||
if (bitD->ptr - nbBytes < bitD->start)
|
||||
{
|
||||
nbBytes = (uint)(bitD->ptr - bitD->start);
|
||||
result = BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
}
|
||||
|
||||
bitD->ptr -= nbBytes;
|
||||
bitD->bitsConsumed -= nbBytes * 8;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint BIT_endOfDStream(BIT_DStream_t* DStream)
|
||||
{
|
||||
return DStream->ptr == DStream->start && DStream->bitsConsumed == (uint)(sizeof(nuint) * 8)
|
||||
? 1U
|
||||
: 0U;
|
||||
}
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_initDStream(ref BIT_DStream_t bitD, void* srcBuffer, nuint srcSize)
|
||||
{
|
||||
if (srcSize < 1)
|
||||
{
|
||||
bitD = new BIT_DStream_t();
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
bitD.start = (sbyte*)srcBuffer;
|
||||
bitD.limitPtr = bitD.start + sizeof(nuint);
|
||||
if (srcSize >= (nuint)sizeof(nuint))
|
||||
{
|
||||
bitD.ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint);
|
||||
bitD.bitContainer = MEM_readLEST(bitD.ptr);
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bitD.ptr = bitD.start;
|
||||
bitD.bitContainer = *(byte*)bitD.start;
|
||||
switch (srcSize)
|
||||
{
|
||||
case 7:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16;
|
||||
goto case 6;
|
||||
case 6:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24;
|
||||
goto case 5;
|
||||
case 5:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32;
|
||||
goto case 4;
|
||||
case 4:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[3] << 24;
|
||||
goto case 3;
|
||||
case 3:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[2] << 16;
|
||||
goto case 2;
|
||||
case 2:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[1] << 8;
|
||||
goto default;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
}
|
||||
|
||||
bitD.bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8;
|
||||
}
|
||||
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
/*! BIT_lookBits() :
|
||||
* Provides next n bits from local register.
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBits(nuint bitD_bitContainer, uint bitD_bitsConsumed, uint nbBits)
|
||||
{
|
||||
return BIT_getMiddleBits(
|
||||
bitD_bitContainer,
|
||||
(uint)(sizeof(nuint) * 8) - bitD_bitsConsumed - nbBits,
|
||||
nbBits
|
||||
);
|
||||
}
|
||||
|
||||
/*! BIT_lookBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBitsFast(
|
||||
nuint bitD_bitContainer,
|
||||
uint bitD_bitsConsumed,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
|
||||
assert(nbBits >= 1);
|
||||
return bitD_bitContainer
|
||||
<< (int)(bitD_bitsConsumed & regMask)
|
||||
>> (int)(regMask + 1 - nbBits & regMask);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_skipBits(ref uint bitD_bitsConsumed, uint nbBits)
|
||||
{
|
||||
bitD_bitsConsumed += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value. */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBits(
|
||||
nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
nuint value = BIT_lookBits(bitD_bitContainer, bitD_bitsConsumed, nbBits);
|
||||
BIT_skipBits(ref bitD_bitsConsumed, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBitsFast(
|
||||
nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
nuint value = BIT_lookBitsFast(bitD_bitContainer, bitD_bitsConsumed, nbBits);
|
||||
assert(nbBits >= 1);
|
||||
BIT_skipBits(ref bitD_bitsConsumed, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStreamFast() :
|
||||
* Similar to BIT_reloadDStream(), but with two differences:
|
||||
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
|
||||
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
|
||||
* point you must use BIT_reloadDStream() to reload.
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStreamFast(
|
||||
ref nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
ref sbyte* bitD_ptr,
|
||||
sbyte* bitD_start,
|
||||
sbyte* bitD_limitPtr
|
||||
)
|
||||
{
|
||||
if (bitD_ptr < bitD_limitPtr)
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
return BIT_reloadDStream_internal(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start
|
||||
);
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not never beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream(
|
||||
ref nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
ref sbyte* bitD_ptr,
|
||||
sbyte* bitD_start,
|
||||
sbyte* bitD_limitPtr
|
||||
)
|
||||
{
|
||||
if (bitD_bitsConsumed > (uint)(sizeof(nuint) * 8))
|
||||
{
|
||||
bitD_ptr = (sbyte*)&static_zeroFilled[0];
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
}
|
||||
|
||||
assert(bitD_ptr >= bitD_start);
|
||||
if (bitD_ptr >= bitD_limitPtr)
|
||||
{
|
||||
return BIT_reloadDStream_internal(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start
|
||||
);
|
||||
}
|
||||
|
||||
if (bitD_ptr == bitD_start)
|
||||
{
|
||||
if (bitD_bitsConsumed < (uint)(sizeof(nuint) * 8))
|
||||
return BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
return BIT_DStream_status.BIT_DStream_completed;
|
||||
}
|
||||
|
||||
{
|
||||
uint nbBytes = bitD_bitsConsumed >> 3;
|
||||
BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished;
|
||||
if (bitD_ptr - nbBytes < bitD_start)
|
||||
{
|
||||
nbBytes = (uint)(bitD_ptr - bitD_start);
|
||||
result = BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
}
|
||||
|
||||
bitD_ptr -= nbBytes;
|
||||
bitD_bitsConsumed -= nbBytes * 8;
|
||||
bitD_bitContainer = MEM_readLEST(bitD_ptr);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream_internal() :
|
||||
* Simple variant of BIT_reloadDStream(), with two conditions:
|
||||
* 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
|
||||
* 2. look window is valid after shifted down : bitD->ptr >= bitD->start
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream_internal(
|
||||
ref nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
ref sbyte* bitD_ptr,
|
||||
sbyte* bitD_start
|
||||
)
|
||||
{
|
||||
assert(bitD_bitsConsumed <= (uint)(sizeof(nuint) * 8));
|
||||
bitD_ptr -= bitD_bitsConsumed >> 3;
|
||||
assert(bitD_ptr >= bitD_start);
|
||||
bitD_bitsConsumed &= 7;
|
||||
bitD_bitContainer = MEM_readLEST(bitD_ptr);
|
||||
return BIT_DStream_status.BIT_DStream_unfinished;
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint BIT_endOfDStream(
|
||||
uint DStream_bitsConsumed,
|
||||
sbyte* DStream_ptr,
|
||||
sbyte* DStream_start
|
||||
)
|
||||
{
|
||||
return DStream_ptr == DStream_start && DStream_bitsConsumed == (uint)(sizeof(nuint) * 8)
|
||||
? 1U
|
||||
: 0U;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct BlockSummary
|
||||
{
|
||||
public nuint nbSequences;
|
||||
public nuint blockSize;
|
||||
public nuint litSize;
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* COVER_best_t is used for two purposes:
|
||||
* 1. Synchronizing threads.
|
||||
* 2. Saving the best parameters and dictionary.
|
||||
*
|
||||
* All of the methods except COVER_best_init() are thread safe if zstd is
|
||||
* compiled with multithreaded support.
|
||||
*/
|
||||
public unsafe struct COVER_best_s
|
||||
{
|
||||
public void* mutex;
|
||||
public void* cond;
|
||||
public nuint liveJobs;
|
||||
public void* dict;
|
||||
public nuint dictSize;
|
||||
public ZDICT_cover_params_t parameters;
|
||||
public nuint compressedSize;
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-*************************************
|
||||
* Context
|
||||
***************************************/
|
||||
public unsafe struct COVER_ctx_t
|
||||
{
|
||||
public byte* samples;
|
||||
public nuint* offsets;
|
||||
public nuint* samplesSizes;
|
||||
public nuint nbSamples;
|
||||
public nuint nbTrainSamples;
|
||||
public nuint nbTestSamples;
|
||||
public uint* suffix;
|
||||
public nuint suffixSize;
|
||||
public uint* freqs;
|
||||
public uint* dmerAt;
|
||||
public uint d;
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* Struct used for the dictionary selection function.
|
||||
*/
|
||||
public unsafe struct COVER_dictSelection
|
||||
{
|
||||
public byte* dictContent;
|
||||
public nuint dictSize;
|
||||
public nuint totalCompressedSize;
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
*Number of epochs and size of each epoch.
|
||||
*/
|
||||
public struct COVER_epoch_info_t
|
||||
{
|
||||
public uint num;
|
||||
public uint size;
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct COVER_map_pair_t_s
|
||||
{
|
||||
public uint key;
|
||||
public uint value;
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct COVER_map_s
|
||||
{
|
||||
public COVER_map_pair_t_s* data;
|
||||
public uint sizeLog;
|
||||
public uint size;
|
||||
public uint sizeMask;
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* A segment is a range in the source as well as the score of the segment.
|
||||
*/
|
||||
public struct COVER_segment_t
|
||||
{
|
||||
public uint begin;
|
||||
public uint end;
|
||||
public uint score;
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* Parameters for COVER_tryParameters().
|
||||
*/
|
||||
public unsafe struct COVER_tryParameters_data_s
|
||||
{
|
||||
public COVER_ctx_t* ctx;
|
||||
public COVER_best_s* best;
|
||||
public nuint dictBufferCapacity;
|
||||
public ZDICT_cover_params_t parameters;
|
||||
}
|
||||
849
src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs
Normal file
849
src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs
Normal file
@@ -0,0 +1,849 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
private static readonly ZSTD_compressionParameters[][] ZSTD_defaultCParameters =
|
||||
new ZSTD_compressionParameters[4][]
|
||||
{
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 19,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 19,
|
||||
chainLog: 13,
|
||||
hashLog: 14,
|
||||
searchLog: 1,
|
||||
minMatch: 7,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 20,
|
||||
chainLog: 15,
|
||||
hashLog: 16,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 18,
|
||||
hashLog: 18,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 19,
|
||||
hashLog: 20,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 19,
|
||||
hashLog: 20,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 20,
|
||||
hashLog: 21,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 21,
|
||||
hashLog: 22,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 21,
|
||||
hashLog: 22,
|
||||
searchLog: 6,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 23,
|
||||
searchLog: 6,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 22,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 23,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 23,
|
||||
hashLog: 23,
|
||||
searchLog: 6,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 22,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 48,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 23,
|
||||
chainLog: 23,
|
||||
hashLog: 22,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 64,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 23,
|
||||
chainLog: 23,
|
||||
hashLog: 22,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 64,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 23,
|
||||
chainLog: 24,
|
||||
hashLog: 22,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 25,
|
||||
chainLog: 25,
|
||||
hashLog: 23,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 26,
|
||||
chainLog: 26,
|
||||
hashLog: 24,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 27,
|
||||
chainLog: 27,
|
||||
hashLog: 25,
|
||||
searchLog: 9,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 13,
|
||||
hashLog: 14,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 16,
|
||||
hashLog: 16,
|
||||
searchLog: 1,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 17,
|
||||
hashLog: 18,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 7,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 3,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 10,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 12,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 13,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 12,
|
||||
hashLog: 12,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 13,
|
||||
hashLog: 15,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 15,
|
||||
hashLog: 16,
|
||||
searchLog: 2,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 17,
|
||||
hashLog: 17,
|
||||
searchLog: 2,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 6,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 17,
|
||||
hashLog: 17,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 7,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 4,
|
||||
minMatch: 3,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 10,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 5,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 9,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 11,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 15,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 15,
|
||||
searchLog: 1,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 15,
|
||||
searchLog: 2,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 6,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 8,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 9,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 4,
|
||||
minMatch: 3,
|
||||
targetLength: 24,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 5,
|
||||
minMatch: 3,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 64,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 5,
|
||||
minMatch: 3,
|
||||
targetLength: 48,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 9,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 10,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
};
|
||||
}
|
||||
61
src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs
Normal file
61
src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs
Normal file
@@ -0,0 +1,61 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/* @return 1 if @u is a 2^n value, 0 otherwise
|
||||
* useful to check a value is valid for alignment restrictions */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static int ZSTD_isPower2(nuint u)
|
||||
{
|
||||
return (u & u - 1) == 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to perform a wrapped pointer difference without triggering
|
||||
* UBSAN.
|
||||
*
|
||||
* @returns lhs - rhs with wrapping
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nint ZSTD_wrappedPtrDiff(byte* lhs, byte* rhs)
|
||||
{
|
||||
return (nint)(lhs - rhs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to perform a wrapped pointer add without triggering UBSAN.
|
||||
*
|
||||
* @return ptr + add with wrapping
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte* ZSTD_wrappedPtrAdd(byte* ptr, nint add)
|
||||
{
|
||||
return ptr + add;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to perform a wrapped pointer subtraction without triggering
|
||||
* UBSAN.
|
||||
*
|
||||
* @return ptr - sub with wrapping
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte* ZSTD_wrappedPtrSub(byte* ptr, nint sub)
|
||||
{
|
||||
return ptr - sub;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to add to a pointer that works around C's undefined behavior
|
||||
* of adding 0 to NULL.
|
||||
*
|
||||
* @returns `ptr + add` except it defines `NULL + 0 == NULL`.
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte* ZSTD_maybeNullPtrAdd(byte* ptr, nint add)
|
||||
{
|
||||
return add > 0 ? ptr + add : ptr;
|
||||
}
|
||||
}
|
||||
444
src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs
Normal file
444
src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs
Normal file
@@ -0,0 +1,444 @@
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
private static int g_displayLevel = 0;
|
||||
|
||||
/**
|
||||
* Returns the sum of the sample sizes.
|
||||
*/
|
||||
private static nuint COVER_sum(nuint* samplesSizes, uint nbSamples)
|
||||
{
|
||||
nuint sum = 0;
|
||||
uint i;
|
||||
for (i = 0; i < nbSamples; ++i)
|
||||
{
|
||||
sum += samplesSizes[i];
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
/**
|
||||
* Warns the user when their corpus is too small.
|
||||
*/
|
||||
private static void COVER_warnOnSmallCorpus(nuint maxDictSize, nuint nbDmers, int displayLevel)
|
||||
{
|
||||
double ratio = nbDmers / (double)maxDictSize;
|
||||
if (ratio >= 10)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the number of epochs and the size of each epoch.
|
||||
* We will make sure that each epoch gets at least 10 * k bytes.
|
||||
*
|
||||
* The COVER algorithms divide the data up into epochs of equal size and
|
||||
* select one segment from each epoch.
|
||||
*
|
||||
* @param maxDictSize The maximum allowed dictionary size.
|
||||
* @param nbDmers The number of dmers we are training on.
|
||||
* @param k The parameter k (segment size).
|
||||
* @param passes The target number of passes over the dmer corpus.
|
||||
* More passes means a better dictionary.
|
||||
*/
|
||||
private static COVER_epoch_info_t COVER_computeEpochs(
|
||||
uint maxDictSize,
|
||||
uint nbDmers,
|
||||
uint k,
|
||||
uint passes
|
||||
)
|
||||
{
|
||||
uint minEpochSize = k * 10;
|
||||
COVER_epoch_info_t epochs;
|
||||
epochs.num = 1 > maxDictSize / k / passes ? 1 : maxDictSize / k / passes;
|
||||
epochs.size = nbDmers / epochs.num;
|
||||
if (epochs.size >= minEpochSize)
|
||||
{
|
||||
assert(epochs.size * epochs.num <= nbDmers);
|
||||
return epochs;
|
||||
}
|
||||
|
||||
epochs.size = minEpochSize < nbDmers ? minEpochSize : nbDmers;
|
||||
epochs.num = nbDmers / epochs.size;
|
||||
assert(epochs.size * epochs.num <= nbDmers);
|
||||
return epochs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks total compressed size of a dictionary
|
||||
*/
|
||||
private static nuint COVER_checkTotalCompressedSize(
|
||||
ZDICT_cover_params_t parameters,
|
||||
nuint* samplesSizes,
|
||||
byte* samples,
|
||||
nuint* offsets,
|
||||
nuint nbTrainSamples,
|
||||
nuint nbSamples,
|
||||
byte* dict,
|
||||
nuint dictBufferCapacity
|
||||
)
|
||||
{
|
||||
nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
/* Pointers */
|
||||
ZSTD_CCtx_s* cctx;
|
||||
ZSTD_CDict_s* cdict;
|
||||
void* dst;
|
||||
/* Local variables */
|
||||
nuint dstCapacity;
|
||||
nuint i;
|
||||
{
|
||||
nuint maxSampleSize = 0;
|
||||
i = parameters.splitPoint < 1 ? nbTrainSamples : 0;
|
||||
for (; i < nbSamples; ++i)
|
||||
{
|
||||
maxSampleSize = samplesSizes[i] > maxSampleSize ? samplesSizes[i] : maxSampleSize;
|
||||
}
|
||||
|
||||
dstCapacity = ZSTD_compressBound(maxSampleSize);
|
||||
dst = malloc(dstCapacity);
|
||||
}
|
||||
|
||||
cctx = ZSTD_createCCtx();
|
||||
cdict = ZSTD_createCDict(dict, dictBufferCapacity, parameters.zParams.compressionLevel);
|
||||
if (dst == null || cctx == null || cdict == null)
|
||||
{
|
||||
goto _compressCleanup;
|
||||
}
|
||||
|
||||
totalCompressedSize = dictBufferCapacity;
|
||||
i = parameters.splitPoint < 1 ? nbTrainSamples : 0;
|
||||
for (; i < nbSamples; ++i)
|
||||
{
|
||||
nuint size = ZSTD_compress_usingCDict(
|
||||
cctx,
|
||||
dst,
|
||||
dstCapacity,
|
||||
samples + offsets[i],
|
||||
samplesSizes[i],
|
||||
cdict
|
||||
);
|
||||
if (ERR_isError(size))
|
||||
{
|
||||
totalCompressedSize = size;
|
||||
goto _compressCleanup;
|
||||
}
|
||||
|
||||
totalCompressedSize += size;
|
||||
}
|
||||
|
||||
_compressCleanup:
|
||||
ZSTD_freeCCtx(cctx);
|
||||
ZSTD_freeCDict(cdict);
|
||||
if (dst != null)
|
||||
{
|
||||
free(dst);
|
||||
}
|
||||
|
||||
return totalCompressedSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the `COVER_best_t`.
|
||||
*/
|
||||
private static void COVER_best_init(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
return;
|
||||
SynchronizationWrapper.Init(&best->mutex);
|
||||
best->liveJobs = 0;
|
||||
best->dict = null;
|
||||
best->dictSize = 0;
|
||||
best->compressedSize = unchecked((nuint)(-1));
|
||||
best->parameters = new ZDICT_cover_params_t();
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait until liveJobs == 0.
|
||||
*/
|
||||
private static void COVER_best_wait(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Enter(&best->mutex);
|
||||
while (best->liveJobs != 0)
|
||||
{
|
||||
SynchronizationWrapper.Wait(&best->mutex);
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Call COVER_best_wait() and then destroy the COVER_best_t.
|
||||
*/
|
||||
private static void COVER_best_destroy(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
COVER_best_wait(best);
|
||||
if (best->dict != null)
|
||||
{
|
||||
free(best->dict);
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Free(&best->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a thread is about to be launched.
|
||||
* Increments liveJobs.
|
||||
*/
|
||||
private static void COVER_best_start(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Enter(&best->mutex);
|
||||
++best->liveJobs;
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a thread finishes executing, both on error or success.
|
||||
* Decrements liveJobs and signals any waiting threads if liveJobs == 0.
|
||||
* If this dictionary is the best so far save it and its parameters.
|
||||
*/
|
||||
private static void COVER_best_finish(
|
||||
COVER_best_s* best,
|
||||
ZDICT_cover_params_t parameters,
|
||||
COVER_dictSelection selection
|
||||
)
|
||||
{
|
||||
void* dict = selection.dictContent;
|
||||
nuint compressedSize = selection.totalCompressedSize;
|
||||
nuint dictSize = selection.dictSize;
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
nuint liveJobs;
|
||||
SynchronizationWrapper.Enter(&best->mutex);
|
||||
--best->liveJobs;
|
||||
liveJobs = best->liveJobs;
|
||||
if (compressedSize < best->compressedSize)
|
||||
{
|
||||
if (best->dict == null || best->dictSize < dictSize)
|
||||
{
|
||||
if (best->dict != null)
|
||||
{
|
||||
free(best->dict);
|
||||
}
|
||||
|
||||
best->dict = malloc(dictSize);
|
||||
if (best->dict == null)
|
||||
{
|
||||
best->compressedSize = unchecked(
|
||||
(nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)
|
||||
);
|
||||
best->dictSize = 0;
|
||||
SynchronizationWrapper.Pulse(&best->mutex);
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (dict != null)
|
||||
{
|
||||
memcpy(best->dict, dict, (uint)dictSize);
|
||||
best->dictSize = dictSize;
|
||||
best->parameters = parameters;
|
||||
best->compressedSize = compressedSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (liveJobs == 0)
|
||||
{
|
||||
SynchronizationWrapper.PulseAll(&best->mutex);
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
private static COVER_dictSelection setDictSelection(byte* buf, nuint s, nuint csz)
|
||||
{
|
||||
COVER_dictSelection ds;
|
||||
ds.dictContent = buf;
|
||||
ds.dictSize = s;
|
||||
ds.totalCompressedSize = csz;
|
||||
return ds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Error function for COVER_selectDict function. Returns a struct where
|
||||
* return.totalCompressedSize is a ZSTD error.
|
||||
*/
|
||||
private static COVER_dictSelection COVER_dictSelectionError(nuint error)
|
||||
{
|
||||
return setDictSelection(null, 0, error);
|
||||
}
|
||||
|
||||
/**
|
||||
* Error function for COVER_selectDict function. Checks if the return
|
||||
* value is an error.
|
||||
*/
|
||||
private static uint COVER_dictSelectionIsError(COVER_dictSelection selection)
|
||||
{
|
||||
return ERR_isError(selection.totalCompressedSize) || selection.dictContent == null
|
||||
? 1U
|
||||
: 0U;
|
||||
}
|
||||
|
||||
/**
|
||||
* Always call after selectDict is called to free up used memory from
|
||||
* newly created dictionary.
|
||||
*/
|
||||
private static void COVER_dictSelectionFree(COVER_dictSelection selection)
|
||||
{
|
||||
free(selection.dictContent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called to finalize the dictionary and select one based on whether or not
|
||||
* the shrink-dict flag was enabled. If enabled the dictionary used is the
|
||||
* smallest dictionary within a specified regression of the compressed size
|
||||
* from the largest dictionary.
|
||||
*/
|
||||
private static COVER_dictSelection COVER_selectDict(
|
||||
byte* customDictContent,
|
||||
nuint dictBufferCapacity,
|
||||
nuint dictContentSize,
|
||||
byte* samplesBuffer,
|
||||
nuint* samplesSizes,
|
||||
uint nbFinalizeSamples,
|
||||
nuint nbCheckSamples,
|
||||
nuint nbSamples,
|
||||
ZDICT_cover_params_t @params,
|
||||
nuint* offsets,
|
||||
nuint totalCompressedSize
|
||||
)
|
||||
{
|
||||
nuint largestDict = 0;
|
||||
nuint largestCompressed = 0;
|
||||
byte* customDictContentEnd = customDictContent + dictContentSize;
|
||||
byte* largestDictbuffer = (byte*)malloc(dictBufferCapacity);
|
||||
byte* candidateDictBuffer = (byte*)malloc(dictBufferCapacity);
|
||||
double regressionTolerance = (double)@params.shrinkDictMaxRegression / 100 + 1;
|
||||
if (largestDictbuffer == null || candidateDictBuffer == null)
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
memcpy(largestDictbuffer, customDictContent, (uint)dictContentSize);
|
||||
dictContentSize = ZDICT_finalizeDictionary(
|
||||
largestDictbuffer,
|
||||
dictBufferCapacity,
|
||||
customDictContent,
|
||||
dictContentSize,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbFinalizeSamples,
|
||||
@params.zParams
|
||||
);
|
||||
if (ZDICT_isError(dictContentSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
totalCompressedSize = COVER_checkTotalCompressedSize(
|
||||
@params,
|
||||
samplesSizes,
|
||||
samplesBuffer,
|
||||
offsets,
|
||||
nbCheckSamples,
|
||||
nbSamples,
|
||||
largestDictbuffer,
|
||||
dictContentSize
|
||||
);
|
||||
if (ERR_isError(totalCompressedSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(totalCompressedSize);
|
||||
}
|
||||
|
||||
if (@params.shrinkDict == 0)
|
||||
{
|
||||
free(candidateDictBuffer);
|
||||
return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize);
|
||||
}
|
||||
|
||||
largestDict = dictContentSize;
|
||||
largestCompressed = totalCompressedSize;
|
||||
dictContentSize = 256;
|
||||
while (dictContentSize < largestDict)
|
||||
{
|
||||
memcpy(candidateDictBuffer, largestDictbuffer, (uint)largestDict);
|
||||
dictContentSize = ZDICT_finalizeDictionary(
|
||||
candidateDictBuffer,
|
||||
dictBufferCapacity,
|
||||
customDictContentEnd - dictContentSize,
|
||||
dictContentSize,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbFinalizeSamples,
|
||||
@params.zParams
|
||||
);
|
||||
if (ZDICT_isError(dictContentSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
totalCompressedSize = COVER_checkTotalCompressedSize(
|
||||
@params,
|
||||
samplesSizes,
|
||||
samplesBuffer,
|
||||
offsets,
|
||||
nbCheckSamples,
|
||||
nbSamples,
|
||||
candidateDictBuffer,
|
||||
dictContentSize
|
||||
);
|
||||
if (ERR_isError(totalCompressedSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(totalCompressedSize);
|
||||
}
|
||||
|
||||
if (totalCompressedSize <= largestCompressed * regressionTolerance)
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
return setDictSelection(candidateDictBuffer, dictContentSize, totalCompressedSize);
|
||||
}
|
||||
|
||||
dictContentSize *= 2;
|
||||
}
|
||||
|
||||
dictContentSize = largestDict;
|
||||
totalCompressedSize = largestCompressed;
|
||||
free(candidateDictBuffer);
|
||||
return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize);
|
||||
}
|
||||
}
|
||||
12
src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs
Normal file
12
src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs
Normal file
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-***************************/
|
||||
/* generic DTableDesc */
|
||||
/*-***************************/
|
||||
public struct DTableDesc
|
||||
{
|
||||
public byte maxTableLog;
|
||||
public byte tableType;
|
||||
public byte tableLog;
|
||||
public byte reserved;
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct EStats_ress_t
|
||||
{
|
||||
/* dictionary */
|
||||
public ZSTD_CDict_s* dict;
|
||||
|
||||
/* working context */
|
||||
public ZSTD_CCtx_s* zc;
|
||||
|
||||
/* must be ZSTD_BLOCKSIZE_MAX allocated */
|
||||
public void* workPlace;
|
||||
}
|
||||
447
src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs
Normal file
447
src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs
Normal file
@@ -0,0 +1,447 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/*=== Version ===*/
|
||||
private static uint FSE_versionNumber()
|
||||
{
|
||||
return 0 * 100 * 100 + 9 * 100 + 0;
|
||||
}
|
||||
|
||||
/*=== Error Management ===*/
|
||||
private static bool FSE_isError(nuint code)
|
||||
{
|
||||
return ERR_isError(code);
|
||||
}
|
||||
|
||||
private static string FSE_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorName(code);
|
||||
}
|
||||
|
||||
/* Error Management */
|
||||
private static bool HUF_isError(nuint code)
|
||||
{
|
||||
return ERR_isError(code);
|
||||
}
|
||||
|
||||
private static string HUF_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorName(code);
|
||||
}
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding-decoding
|
||||
****************************************************************/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint FSE_readNCount_body(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize
|
||||
)
|
||||
{
|
||||
byte* istart = (byte*)headerBuffer;
|
||||
byte* iend = istart + hbSize;
|
||||
byte* ip = istart;
|
||||
int nbBits;
|
||||
int remaining;
|
||||
int threshold;
|
||||
uint bitStream;
|
||||
int bitCount;
|
||||
uint charnum = 0;
|
||||
uint maxSV1 = *maxSVPtr + 1;
|
||||
int previous0 = 0;
|
||||
if (hbSize < 8)
|
||||
{
|
||||
sbyte* buffer = stackalloc sbyte[8];
|
||||
/* This function only works when hbSize >= 8 */
|
||||
memset(buffer, 0, sizeof(sbyte) * 8);
|
||||
memcpy(buffer, headerBuffer, (uint)hbSize);
|
||||
{
|
||||
nuint countSize = FSE_readNCount(
|
||||
normalizedCounter,
|
||||
maxSVPtr,
|
||||
tableLogPtr,
|
||||
buffer,
|
||||
sizeof(sbyte) * 8
|
||||
);
|
||||
if (FSE_isError(countSize))
|
||||
return countSize;
|
||||
if (countSize > hbSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
return countSize;
|
||||
}
|
||||
}
|
||||
|
||||
assert(hbSize >= 8);
|
||||
memset(normalizedCounter, 0, (*maxSVPtr + 1) * sizeof(short));
|
||||
bitStream = MEM_readLE32(ip);
|
||||
nbBits = (int)((bitStream & 0xF) + 5);
|
||||
if (nbBits > 15)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
|
||||
bitStream >>= 4;
|
||||
bitCount = 4;
|
||||
*tableLogPtr = (uint)nbBits;
|
||||
remaining = (1 << nbBits) + 1;
|
||||
threshold = 1 << nbBits;
|
||||
nbBits++;
|
||||
for (; ; )
|
||||
{
|
||||
if (previous0 != 0)
|
||||
{
|
||||
/* Count the number of repeats. Each time the
|
||||
* 2-bit repeat code is 0b11 there is another
|
||||
* repeat.
|
||||
* Avoid UB by setting the high bit to 1.
|
||||
*/
|
||||
int repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1);
|
||||
while (repeats >= 12)
|
||||
{
|
||||
charnum += 3 * 12;
|
||||
if (ip <= iend - 7)
|
||||
{
|
||||
ip += 3;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 7 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1);
|
||||
}
|
||||
|
||||
charnum += (uint)(3 * repeats);
|
||||
bitStream >>= 2 * repeats;
|
||||
bitCount += 2 * repeats;
|
||||
assert((bitStream & 3) < 3);
|
||||
charnum += bitStream & 3;
|
||||
bitCount += 2;
|
||||
if (charnum >= maxSV1)
|
||||
break;
|
||||
if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4)
|
||||
{
|
||||
assert(bitCount >> 3 <= 3);
|
||||
ip += bitCount >> 3;
|
||||
bitCount &= 7;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
}
|
||||
|
||||
{
|
||||
int max = 2 * threshold - 1 - remaining;
|
||||
int count;
|
||||
if ((bitStream & (uint)(threshold - 1)) < (uint)max)
|
||||
{
|
||||
count = (int)(bitStream & (uint)(threshold - 1));
|
||||
bitCount += nbBits - 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
count = (int)(bitStream & (uint)(2 * threshold - 1));
|
||||
if (count >= threshold)
|
||||
count -= max;
|
||||
bitCount += nbBits;
|
||||
}
|
||||
|
||||
count--;
|
||||
if (count >= 0)
|
||||
{
|
||||
remaining -= count;
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(count == -1);
|
||||
remaining += count;
|
||||
}
|
||||
|
||||
normalizedCounter[charnum++] = (short)count;
|
||||
previous0 = count == 0 ? 1 : 0;
|
||||
assert(threshold > 1);
|
||||
if (remaining < threshold)
|
||||
{
|
||||
if (remaining <= 1)
|
||||
break;
|
||||
nbBits = (int)(ZSTD_highbit32((uint)remaining) + 1);
|
||||
threshold = 1 << nbBits - 1;
|
||||
}
|
||||
|
||||
if (charnum >= maxSV1)
|
||||
break;
|
||||
if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4)
|
||||
{
|
||||
ip += bitCount >> 3;
|
||||
bitCount &= 7;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
}
|
||||
}
|
||||
|
||||
if (remaining != 1)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
if (charnum > maxSV1)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall));
|
||||
if (bitCount > 32)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
*maxSVPtr = charnum - 1;
|
||||
ip += bitCount + 7 >> 3;
|
||||
return (nuint)(ip - istart);
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
private static nuint FSE_readNCount_body_default(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize
|
||||
)
|
||||
{
|
||||
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
|
||||
/*! FSE_readNCount_bmi2():
|
||||
* Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
|
||||
*/
|
||||
private static nuint FSE_readNCount_bmi2(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize,
|
||||
int bmi2
|
||||
)
|
||||
{
|
||||
return FSE_readNCount_body_default(
|
||||
normalizedCounter,
|
||||
maxSVPtr,
|
||||
tableLogPtr,
|
||||
headerBuffer,
|
||||
hbSize
|
||||
);
|
||||
}
|
||||
|
||||
/*! FSE_readNCount():
|
||||
Read compactly saved 'normalizedCounter' from 'rBuffer'.
|
||||
@return : size read from 'rBuffer',
|
||||
or an errorCode, which can be tested using FSE_isError().
|
||||
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
|
||||
private static nuint FSE_readNCount(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize
|
||||
)
|
||||
{
|
||||
return FSE_readNCount_bmi2(
|
||||
normalizedCounter,
|
||||
maxSVPtr,
|
||||
tableLogPtr,
|
||||
headerBuffer,
|
||||
hbSize,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
|
||||
@return : size read from `src` , or an error Code .
|
||||
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
|
||||
*/
|
||||
private static nuint HUF_readStats(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize
|
||||
)
|
||||
{
|
||||
uint* wksp = stackalloc uint[219];
|
||||
return HUF_readStats_wksp(
|
||||
huffWeight,
|
||||
hwSize,
|
||||
rankStats,
|
||||
nbSymbolsPtr,
|
||||
tableLogPtr,
|
||||
src,
|
||||
srcSize,
|
||||
wksp,
|
||||
sizeof(uint) * 219,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint HUF_readStats_body(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize,
|
||||
void* workSpace,
|
||||
nuint wkspSize,
|
||||
int bmi2
|
||||
)
|
||||
{
|
||||
uint weightTotal;
|
||||
byte* ip = (byte*)src;
|
||||
nuint iSize;
|
||||
nuint oSize;
|
||||
if (srcSize == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
iSize = ip[0];
|
||||
if (iSize >= 128)
|
||||
{
|
||||
oSize = iSize - 127;
|
||||
iSize = (oSize + 1) / 2;
|
||||
if (iSize + 1 > srcSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
if (oSize >= hwSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
ip += 1;
|
||||
{
|
||||
uint n;
|
||||
for (n = 0; n < oSize; n += 2)
|
||||
{
|
||||
huffWeight[n] = (byte)(ip[n / 2] >> 4);
|
||||
huffWeight[n + 1] = (byte)(ip[n / 2] & 15);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (iSize + 1 > srcSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
oSize = FSE_decompress_wksp_bmi2(
|
||||
huffWeight,
|
||||
hwSize - 1,
|
||||
ip + 1,
|
||||
iSize,
|
||||
6,
|
||||
workSpace,
|
||||
wkspSize,
|
||||
bmi2
|
||||
);
|
||||
if (FSE_isError(oSize))
|
||||
return oSize;
|
||||
}
|
||||
|
||||
memset(rankStats, 0, (12 + 1) * sizeof(uint));
|
||||
weightTotal = 0;
|
||||
{
|
||||
uint n;
|
||||
for (n = 0; n < oSize; n++)
|
||||
{
|
||||
if (huffWeight[n] > 12)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
rankStats[huffWeight[n]]++;
|
||||
weightTotal += (uint)(1 << huffWeight[n] >> 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (weightTotal == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
{
|
||||
uint tableLog = ZSTD_highbit32(weightTotal) + 1;
|
||||
if (tableLog > 12)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
*tableLogPtr = tableLog;
|
||||
{
|
||||
uint total = (uint)(1 << (int)tableLog);
|
||||
uint rest = total - weightTotal;
|
||||
uint verif = (uint)(1 << (int)ZSTD_highbit32(rest));
|
||||
uint lastWeight = ZSTD_highbit32(rest) + 1;
|
||||
if (verif != rest)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
huffWeight[oSize] = (byte)lastWeight;
|
||||
rankStats[lastWeight]++;
|
||||
}
|
||||
}
|
||||
|
||||
if (rankStats[1] < 2 || (rankStats[1] & 1) != 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
*nbSymbolsPtr = (uint)(oSize + 1);
|
||||
return iSize + 1;
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
private static nuint HUF_readStats_body_default(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize,
|
||||
void* workSpace,
|
||||
nuint wkspSize
|
||||
)
|
||||
{
|
||||
return HUF_readStats_body(
|
||||
huffWeight,
|
||||
hwSize,
|
||||
rankStats,
|
||||
nbSymbolsPtr,
|
||||
tableLogPtr,
|
||||
src,
|
||||
srcSize,
|
||||
workSpace,
|
||||
wkspSize,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
private static nuint HUF_readStats_wksp(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize,
|
||||
void* workSpace,
|
||||
nuint wkspSize,
|
||||
int flags
|
||||
)
|
||||
{
|
||||
return HUF_readStats_body_default(
|
||||
huffWeight,
|
||||
hwSize,
|
||||
rankStats,
|
||||
nbSymbolsPtr,
|
||||
tableLogPtr,
|
||||
src,
|
||||
srcSize,
|
||||
workSpace,
|
||||
wkspSize
|
||||
);
|
||||
}
|
||||
}
|
||||
110
src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs
Normal file
110
src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs
Normal file
@@ -0,0 +1,110 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static bool ERR_isError(nuint code)
|
||||
{
|
||||
return code > unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode));
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static ZSTD_ErrorCode ERR_getErrorCode(nuint code)
|
||||
{
|
||||
if (!ERR_isError(code))
|
||||
return 0;
|
||||
return (ZSTD_ErrorCode)(0 - code);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static string ERR_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorString(ERR_getErrorCode(code));
|
||||
}
|
||||
|
||||
/*-****************************************
|
||||
* Error Strings
|
||||
******************************************/
|
||||
private static string ERR_getErrorString(ZSTD_ErrorCode code)
|
||||
{
|
||||
const string notErrorCode = "Unspecified error code";
|
||||
switch (code)
|
||||
{
|
||||
case ZSTD_ErrorCode.ZSTD_error_no_error:
|
||||
return "No error detected";
|
||||
case ZSTD_ErrorCode.ZSTD_error_GENERIC:
|
||||
return "Error (generic)";
|
||||
case ZSTD_ErrorCode.ZSTD_error_prefix_unknown:
|
||||
return "Unknown frame descriptor";
|
||||
case ZSTD_ErrorCode.ZSTD_error_version_unsupported:
|
||||
return "Version not supported";
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported:
|
||||
return "Unsupported frame parameter";
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge:
|
||||
return "Frame requires too much memory for decoding";
|
||||
case ZSTD_ErrorCode.ZSTD_error_corruption_detected:
|
||||
return "Data corruption detected";
|
||||
case ZSTD_ErrorCode.ZSTD_error_checksum_wrong:
|
||||
return "Restored data doesn't match checksum";
|
||||
case ZSTD_ErrorCode.ZSTD_error_literals_headerWrong:
|
||||
return "Header of Literals' block doesn't respect format specification";
|
||||
case ZSTD_ErrorCode.ZSTD_error_parameter_unsupported:
|
||||
return "Unsupported parameter";
|
||||
case ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported:
|
||||
return "Unsupported combination of parameters";
|
||||
case ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound:
|
||||
return "Parameter is out of bound";
|
||||
case ZSTD_ErrorCode.ZSTD_error_init_missing:
|
||||
return "Context should be init first";
|
||||
case ZSTD_ErrorCode.ZSTD_error_memory_allocation:
|
||||
return "Allocation error : not enough memory";
|
||||
case ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall:
|
||||
return "workSpace buffer is not large enough";
|
||||
case ZSTD_ErrorCode.ZSTD_error_stage_wrong:
|
||||
return "Operation not authorized at current processing stage";
|
||||
case ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge:
|
||||
return "tableLog requires too much memory : unsupported";
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge:
|
||||
return "Unsupported max Symbol Value : too large";
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall:
|
||||
return "Specified maxSymbolValue is too small";
|
||||
case ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock:
|
||||
return "This mode cannot generate an uncompressed block";
|
||||
case ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected:
|
||||
return "pledged buffer stability condition is not respected";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted:
|
||||
return "Dictionary is corrupted";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionary_wrong:
|
||||
return "Dictionary mismatch";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed:
|
||||
return "Cannot create Dictionary from provided samples";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall:
|
||||
return "Destination buffer is too small";
|
||||
case ZSTD_ErrorCode.ZSTD_error_srcSize_wrong:
|
||||
return "Src size is incorrect";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstBuffer_null:
|
||||
return "Operation on NULL destination buffer";
|
||||
case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull:
|
||||
return "Operation made no progress over multiple calls, due to output buffer being full";
|
||||
case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty:
|
||||
return "Operation made no progress over multiple calls, due to input being empty";
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameIndex_tooLarge:
|
||||
return "Frame index is too large";
|
||||
case ZSTD_ErrorCode.ZSTD_error_seekableIO:
|
||||
return "An I/O error occurred when reading/seeking";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong:
|
||||
return "Destination buffer is wrong";
|
||||
case ZSTD_ErrorCode.ZSTD_error_srcBuffer_wrong:
|
||||
return "Source buffer is wrong";
|
||||
case ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed:
|
||||
return "Block-level external sequence producer returned an error code";
|
||||
case ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid:
|
||||
return "External sequences are not valid";
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxCode:
|
||||
default:
|
||||
return notErrorCode;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct EstimatedBlockSize
|
||||
{
|
||||
public nuint estLitSize;
|
||||
public nuint estBlockSize;
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-*************************************
|
||||
* Acceleration
|
||||
***************************************/
|
||||
public struct FASTCOVER_accel_t
|
||||
{
|
||||
/* Percentage of training samples used for ZDICT_finalizeDictionary */
|
||||
public uint finalize;
|
||||
|
||||
/* Number of dmer skipped between each dmer counted in computeFrequency */
|
||||
public uint skip;
|
||||
|
||||
public FASTCOVER_accel_t(uint finalize, uint skip)
|
||||
{
|
||||
this.finalize = finalize;
|
||||
this.skip = skip;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-*************************************
|
||||
* Context
|
||||
***************************************/
|
||||
public unsafe struct FASTCOVER_ctx_t
|
||||
{
|
||||
public byte* samples;
|
||||
public nuint* offsets;
|
||||
public nuint* samplesSizes;
|
||||
public nuint nbSamples;
|
||||
public nuint nbTrainSamples;
|
||||
public nuint nbTestSamples;
|
||||
public nuint nbDmers;
|
||||
public uint* freqs;
|
||||
public uint d;
|
||||
public uint f;
|
||||
public FASTCOVER_accel_t accelParams;
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* Parameters for FASTCOVER_tryParameters().
|
||||
*/
|
||||
public unsafe struct FASTCOVER_tryParameters_data_s
|
||||
{
|
||||
public FASTCOVER_ctx_t* ctx;
|
||||
public COVER_best_s* best;
|
||||
public nuint dictBufferCapacity;
|
||||
public ZDICT_cover_params_t parameters;
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct FPStats
|
||||
{
|
||||
public Fingerprint pastEvents;
|
||||
public Fingerprint newEvents;
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol compression API
|
||||
*******************************************/
|
||||
/*!
|
||||
This API consists of small unitary functions, which highly benefit from being inlined.
|
||||
Hence their body are included in next section.
|
||||
*/
|
||||
public unsafe struct FSE_CState_t
|
||||
{
|
||||
public nint value;
|
||||
public void* stateTable;
|
||||
public void* symbolTT;
|
||||
public uint stateLog;
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol decompression API
|
||||
*******************************************/
|
||||
public unsafe struct FSE_DState_t
|
||||
{
|
||||
public nuint state;
|
||||
|
||||
/* precise table may vary, depending on U16 */
|
||||
public void* table;
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* ====== Decompression ====== */
|
||||
public struct FSE_DTableHeader
|
||||
{
|
||||
public ushort tableLog;
|
||||
public ushort fastMode;
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct FSE_DecompressWksp
|
||||
{
|
||||
public fixed short ncount[256];
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct FSE_decode_t
|
||||
{
|
||||
public ushort newState;
|
||||
public byte symbol;
|
||||
public byte nbBits;
|
||||
}
|
||||
13
src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs
Normal file
13
src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs
Normal file
@@ -0,0 +1,13 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public enum FSE_repeat
|
||||
{
|
||||
/**< Cannot use the previous table */
|
||||
FSE_repeat_none,
|
||||
|
||||
/**< Can use the previous table but it must be checked */
|
||||
FSE_repeat_check,
|
||||
|
||||
/**< Can use the previous table and it is assumed to be valid */
|
||||
FSE_repeat_valid,
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* *****************************************
|
||||
* Implementation of inlined functions
|
||||
*******************************************/
|
||||
public struct FSE_symbolCompressionTransform
|
||||
{
|
||||
public int deltaFindState;
|
||||
public uint deltaNbBits;
|
||||
}
|
||||
761
src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs
Normal file
761
src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs
Normal file
@@ -0,0 +1,761 @@
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/*-*************************************
|
||||
* Hash Functions
|
||||
***************************************/
|
||||
/**
|
||||
* Hash the d-byte value pointed to by p and mod 2^f into the frequency vector
|
||||
*/
|
||||
private static nuint FASTCOVER_hashPtrToIndex(void* p, uint f, uint d)
|
||||
{
|
||||
if (d == 6)
|
||||
{
|
||||
return ZSTD_hash6Ptr(p, f);
|
||||
}
|
||||
|
||||
return ZSTD_hash8Ptr(p, f);
|
||||
}
|
||||
|
||||
private static readonly FASTCOVER_accel_t* FASTCOVER_defaultAccelParameters = GetArrayPointer(
|
||||
new FASTCOVER_accel_t[11]
|
||||
{
|
||||
new FASTCOVER_accel_t(finalize: 100, skip: 0),
|
||||
new FASTCOVER_accel_t(finalize: 100, skip: 0),
|
||||
new FASTCOVER_accel_t(finalize: 50, skip: 1),
|
||||
new FASTCOVER_accel_t(finalize: 34, skip: 2),
|
||||
new FASTCOVER_accel_t(finalize: 25, skip: 3),
|
||||
new FASTCOVER_accel_t(finalize: 20, skip: 4),
|
||||
new FASTCOVER_accel_t(finalize: 17, skip: 5),
|
||||
new FASTCOVER_accel_t(finalize: 14, skip: 6),
|
||||
new FASTCOVER_accel_t(finalize: 13, skip: 7),
|
||||
new FASTCOVER_accel_t(finalize: 11, skip: 8),
|
||||
new FASTCOVER_accel_t(finalize: 10, skip: 9),
|
||||
}
|
||||
);
|
||||
|
||||
/*-*************************************
|
||||
* Helper functions
|
||||
***************************************/
|
||||
/**
|
||||
* Selects the best segment in an epoch.
|
||||
* Segments of are scored according to the function:
|
||||
*
|
||||
* Let F(d) be the frequency of all dmers with hash value d.
|
||||
* Let S_i be hash value of the dmer at position i of segment S which has length k.
|
||||
*
|
||||
* Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
|
||||
*
|
||||
* Once the dmer with hash value d is in the dictionary we set F(d) = 0.
|
||||
*/
|
||||
private static COVER_segment_t FASTCOVER_selectSegment(
|
||||
FASTCOVER_ctx_t* ctx,
|
||||
uint* freqs,
|
||||
uint begin,
|
||||
uint end,
|
||||
ZDICT_cover_params_t parameters,
|
||||
ushort* segmentFreqs
|
||||
)
|
||||
{
|
||||
/* Constants */
|
||||
uint k = parameters.k;
|
||||
uint d = parameters.d;
|
||||
uint f = ctx->f;
|
||||
uint dmersInK = k - d + 1;
|
||||
/* Try each segment (activeSegment) and save the best (bestSegment) */
|
||||
COVER_segment_t bestSegment = new COVER_segment_t
|
||||
{
|
||||
begin = 0,
|
||||
end = 0,
|
||||
score = 0,
|
||||
};
|
||||
COVER_segment_t activeSegment;
|
||||
activeSegment.begin = begin;
|
||||
activeSegment.end = begin;
|
||||
activeSegment.score = 0;
|
||||
while (activeSegment.end < end)
|
||||
{
|
||||
/* Get hash value of current dmer */
|
||||
nuint idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d);
|
||||
if (segmentFreqs[idx] == 0)
|
||||
{
|
||||
activeSegment.score += freqs[idx];
|
||||
}
|
||||
|
||||
activeSegment.end += 1;
|
||||
segmentFreqs[idx] += 1;
|
||||
if (activeSegment.end - activeSegment.begin == dmersInK + 1)
|
||||
{
|
||||
/* Get hash value of the dmer to be eliminated from active segment */
|
||||
nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
|
||||
segmentFreqs[delIndex] -= 1;
|
||||
if (segmentFreqs[delIndex] == 0)
|
||||
{
|
||||
activeSegment.score -= freqs[delIndex];
|
||||
}
|
||||
|
||||
activeSegment.begin += 1;
|
||||
}
|
||||
|
||||
if (activeSegment.score > bestSegment.score)
|
||||
{
|
||||
bestSegment = activeSegment;
|
||||
}
|
||||
}
|
||||
|
||||
while (activeSegment.begin < end)
|
||||
{
|
||||
nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
|
||||
segmentFreqs[delIndex] -= 1;
|
||||
activeSegment.begin += 1;
|
||||
}
|
||||
|
||||
{
|
||||
/* Zero the frequency of hash value of each dmer covered by the chosen segment. */
|
||||
uint pos;
|
||||
for (pos = bestSegment.begin; pos != bestSegment.end; ++pos)
|
||||
{
|
||||
nuint i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d);
|
||||
freqs[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return bestSegment;
|
||||
}
|
||||
|
||||
private static int FASTCOVER_checkParameters(
|
||||
ZDICT_cover_params_t parameters,
|
||||
nuint maxDictSize,
|
||||
uint f,
|
||||
uint accel
|
||||
)
|
||||
{
|
||||
if (parameters.d == 0 || parameters.k == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.d != 6 && parameters.d != 8)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.k > maxDictSize)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.d > parameters.k)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (f > 31 || f == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.splitPoint <= 0 || parameters.splitPoint > 1)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (accel > 10 || accel == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up a context initialized with `FASTCOVER_ctx_init()`.
|
||||
*/
|
||||
private static void FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx)
|
||||
{
|
||||
if (ctx == null)
|
||||
return;
|
||||
free(ctx->freqs);
|
||||
ctx->freqs = null;
|
||||
free(ctx->offsets);
|
||||
ctx->offsets = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate for frequency of hash value of each dmer in ctx->samples
|
||||
*/
|
||||
private static void FASTCOVER_computeFrequency(uint* freqs, FASTCOVER_ctx_t* ctx)
|
||||
{
|
||||
uint f = ctx->f;
|
||||
uint d = ctx->d;
|
||||
uint skip = ctx->accelParams.skip;
|
||||
uint readLength = d > 8 ? d : 8;
|
||||
nuint i;
|
||||
assert(ctx->nbTrainSamples >= 5);
|
||||
assert(ctx->nbTrainSamples <= ctx->nbSamples);
|
||||
for (i = 0; i < ctx->nbTrainSamples; i++)
|
||||
{
|
||||
/* start of current dmer */
|
||||
nuint start = ctx->offsets[i];
|
||||
nuint currSampleEnd = ctx->offsets[i + 1];
|
||||
while (start + readLength <= currSampleEnd)
|
||||
{
|
||||
nuint dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d);
|
||||
freqs[dmerIndex]++;
|
||||
start = start + skip + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare a context for dictionary building.
|
||||
* The context is only dependent on the parameter `d` and can be used multiple
|
||||
* times.
|
||||
* Returns 0 on success or error code on error.
|
||||
* The context must be destroyed with `FASTCOVER_ctx_destroy()`.
|
||||
*/
|
||||
private static nuint FASTCOVER_ctx_init(
|
||||
FASTCOVER_ctx_t* ctx,
|
||||
void* samplesBuffer,
|
||||
nuint* samplesSizes,
|
||||
uint nbSamples,
|
||||
uint d,
|
||||
double splitPoint,
|
||||
uint f,
|
||||
FASTCOVER_accel_t accelParams
|
||||
)
|
||||
{
|
||||
byte* samples = (byte*)samplesBuffer;
|
||||
nuint totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
|
||||
/* Split samples into testing and training sets */
|
||||
uint nbTrainSamples = splitPoint < 1 ? (uint)(nbSamples * splitPoint) : nbSamples;
|
||||
uint nbTestSamples = splitPoint < 1 ? nbSamples - nbTrainSamples : nbSamples;
|
||||
nuint trainingSamplesSize =
|
||||
splitPoint < 1 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
|
||||
nuint testSamplesSize =
|
||||
splitPoint < 1
|
||||
? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples)
|
||||
: totalSamplesSize;
|
||||
if (
|
||||
totalSamplesSize < (d > sizeof(ulong) ? d : sizeof(ulong))
|
||||
|| totalSamplesSize >= (sizeof(nuint) == 8 ? unchecked((uint)-1) : 1 * (1U << 30))
|
||||
)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
if (nbTrainSamples < 5)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
if (nbTestSamples < 1)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
*ctx = new FASTCOVER_ctx_t
|
||||
{
|
||||
samples = samples,
|
||||
samplesSizes = samplesSizes,
|
||||
nbSamples = nbSamples,
|
||||
nbTrainSamples = nbTrainSamples,
|
||||
nbTestSamples = nbTestSamples,
|
||||
nbDmers = trainingSamplesSize - (d > sizeof(ulong) ? d : sizeof(ulong)) + 1,
|
||||
d = d,
|
||||
f = f,
|
||||
accelParams = accelParams,
|
||||
offsets = (nuint*)calloc(nbSamples + 1, (ulong)sizeof(nuint)),
|
||||
};
|
||||
if (ctx->offsets == null)
|
||||
{
|
||||
FASTCOVER_ctx_destroy(ctx);
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
|
||||
}
|
||||
|
||||
{
|
||||
uint i;
|
||||
ctx->offsets[0] = 0;
|
||||
assert(nbSamples >= 5);
|
||||
for (i = 1; i <= nbSamples; ++i)
|
||||
{
|
||||
ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
|
||||
}
|
||||
}
|
||||
|
||||
ctx->freqs = (uint*)calloc((ulong)1 << (int)f, sizeof(uint));
|
||||
if (ctx->freqs == null)
|
||||
{
|
||||
FASTCOVER_ctx_destroy(ctx);
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
|
||||
}
|
||||
|
||||
FASTCOVER_computeFrequency(ctx->freqs, ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given the prepared context build the dictionary.
|
||||
*/
|
||||
private static nuint FASTCOVER_buildDictionary(
|
||||
FASTCOVER_ctx_t* ctx,
|
||||
uint* freqs,
|
||||
void* dictBuffer,
|
||||
nuint dictBufferCapacity,
|
||||
ZDICT_cover_params_t parameters,
|
||||
ushort* segmentFreqs
|
||||
)
|
||||
{
|
||||
byte* dict = (byte*)dictBuffer;
|
||||
nuint tail = dictBufferCapacity;
|
||||
/* Divide the data into epochs. We will select one segment from each epoch. */
|
||||
COVER_epoch_info_t epochs = COVER_computeEpochs(
|
||||
(uint)dictBufferCapacity,
|
||||
(uint)ctx->nbDmers,
|
||||
parameters.k,
|
||||
1
|
||||
);
|
||||
const nuint maxZeroScoreRun = 10;
|
||||
nuint zeroScoreRun = 0;
|
||||
nuint epoch;
|
||||
for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num)
|
||||
{
|
||||
uint epochBegin = (uint)(epoch * epochs.size);
|
||||
uint epochEnd = epochBegin + epochs.size;
|
||||
nuint segmentSize;
|
||||
/* Select a segment */
|
||||
COVER_segment_t segment = FASTCOVER_selectSegment(
|
||||
ctx,
|
||||
freqs,
|
||||
epochBegin,
|
||||
epochEnd,
|
||||
parameters,
|
||||
segmentFreqs
|
||||
);
|
||||
if (segment.score == 0)
|
||||
{
|
||||
if (++zeroScoreRun >= maxZeroScoreRun)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
zeroScoreRun = 0;
|
||||
segmentSize =
|
||||
segment.end - segment.begin + parameters.d - 1 < tail
|
||||
? segment.end - segment.begin + parameters.d - 1
|
||||
: tail;
|
||||
if (segmentSize < parameters.d)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
tail -= segmentSize;
|
||||
memcpy(dict + tail, ctx->samples + segment.begin, (uint)segmentSize);
|
||||
}
|
||||
|
||||
return tail;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries a set of parameters and updates the COVER_best_t with the results.
|
||||
* This function is thread safe if zstd is compiled with multithreaded support.
|
||||
* It takes its parameters as an *OWNING* opaque pointer to support threading.
|
||||
*/
|
||||
private static void FASTCOVER_tryParameters(void* opaque)
|
||||
{
|
||||
/* Save parameters as local variables */
|
||||
FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)opaque;
|
||||
FASTCOVER_ctx_t* ctx = data->ctx;
|
||||
ZDICT_cover_params_t parameters = data->parameters;
|
||||
nuint dictBufferCapacity = data->dictBufferCapacity;
|
||||
nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
/* Initialize array to keep track of frequency of dmer within activeSegment */
|
||||
ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)ctx->f, sizeof(ushort));
|
||||
/* Allocate space for hash table, dict, and freqs */
|
||||
byte* dict = (byte*)malloc(dictBufferCapacity);
|
||||
COVER_dictSelection selection = COVER_dictSelectionError(
|
||||
unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC))
|
||||
);
|
||||
uint* freqs = (uint*)malloc(((ulong)1 << (int)ctx->f) * sizeof(uint));
|
||||
if (segmentFreqs == null || dict == null || freqs == null)
|
||||
{
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
memcpy(freqs, ctx->freqs, (uint)(((ulong)1 << (int)ctx->f) * sizeof(uint)));
|
||||
{
|
||||
nuint tail = FASTCOVER_buildDictionary(
|
||||
ctx,
|
||||
freqs,
|
||||
dict,
|
||||
dictBufferCapacity,
|
||||
parameters,
|
||||
segmentFreqs
|
||||
);
|
||||
uint nbFinalizeSamples = (uint)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);
|
||||
selection = COVER_selectDict(
|
||||
dict + tail,
|
||||
dictBufferCapacity,
|
||||
dictBufferCapacity - tail,
|
||||
ctx->samples,
|
||||
ctx->samplesSizes,
|
||||
nbFinalizeSamples,
|
||||
ctx->nbTrainSamples,
|
||||
ctx->nbSamples,
|
||||
parameters,
|
||||
ctx->offsets,
|
||||
totalCompressedSize
|
||||
);
|
||||
if (COVER_dictSelectionIsError(selection) != 0)
|
||||
{
|
||||
goto _cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
_cleanup:
|
||||
free(dict);
|
||||
COVER_best_finish(data->best, parameters, selection);
|
||||
free(data);
|
||||
free(segmentFreqs);
|
||||
COVER_dictSelectionFree(selection);
|
||||
free(freqs);
|
||||
}
|
||||
|
||||
private static void FASTCOVER_convertToCoverParams(
|
||||
ZDICT_fastCover_params_t fastCoverParams,
|
||||
ZDICT_cover_params_t* coverParams
|
||||
)
|
||||
{
|
||||
coverParams->k = fastCoverParams.k;
|
||||
coverParams->d = fastCoverParams.d;
|
||||
coverParams->steps = fastCoverParams.steps;
|
||||
coverParams->nbThreads = fastCoverParams.nbThreads;
|
||||
coverParams->splitPoint = fastCoverParams.splitPoint;
|
||||
coverParams->zParams = fastCoverParams.zParams;
|
||||
coverParams->shrinkDict = fastCoverParams.shrinkDict;
|
||||
}
|
||||
|
||||
private static void FASTCOVER_convertToFastCoverParams(
|
||||
ZDICT_cover_params_t coverParams,
|
||||
ZDICT_fastCover_params_t* fastCoverParams,
|
||||
uint f,
|
||||
uint accel
|
||||
)
|
||||
{
|
||||
fastCoverParams->k = coverParams.k;
|
||||
fastCoverParams->d = coverParams.d;
|
||||
fastCoverParams->steps = coverParams.steps;
|
||||
fastCoverParams->nbThreads = coverParams.nbThreads;
|
||||
fastCoverParams->splitPoint = coverParams.splitPoint;
|
||||
fastCoverParams->f = f;
|
||||
fastCoverParams->accel = accel;
|
||||
fastCoverParams->zParams = coverParams.zParams;
|
||||
fastCoverParams->shrinkDict = coverParams.shrinkDict;
|
||||
}
|
||||
|
||||
/*! ZDICT_trainFromBuffer_fastCover():
|
||||
* Train a dictionary from an array of samples using a modified version of COVER algorithm.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* d and k are required.
|
||||
* All other parameters are optional, will use default values if not provided
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
public static nuint ZDICT_trainFromBuffer_fastCover(
|
||||
void* dictBuffer,
|
||||
nuint dictBufferCapacity,
|
||||
void* samplesBuffer,
|
||||
nuint* samplesSizes,
|
||||
uint nbSamples,
|
||||
ZDICT_fastCover_params_t parameters
|
||||
)
|
||||
{
|
||||
byte* dict = (byte*)dictBuffer;
|
||||
FASTCOVER_ctx_t ctx;
|
||||
ZDICT_cover_params_t coverParams;
|
||||
FASTCOVER_accel_t accelParams;
|
||||
g_displayLevel = (int)parameters.zParams.notificationLevel;
|
||||
parameters.splitPoint = 1;
|
||||
parameters.f = parameters.f == 0 ? 20 : parameters.f;
|
||||
parameters.accel = parameters.accel == 0 ? 1 : parameters.accel;
|
||||
coverParams = new ZDICT_cover_params_t();
|
||||
FASTCOVER_convertToCoverParams(parameters, &coverParams);
|
||||
if (
|
||||
FASTCOVER_checkParameters(
|
||||
coverParams,
|
||||
dictBufferCapacity,
|
||||
parameters.f,
|
||||
parameters.accel
|
||||
) == 0
|
||||
)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
|
||||
}
|
||||
|
||||
if (nbSamples == 0)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
if (dictBufferCapacity < 256)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
}
|
||||
|
||||
accelParams = FASTCOVER_defaultAccelParameters[parameters.accel];
|
||||
{
|
||||
nuint initVal = FASTCOVER_ctx_init(
|
||||
&ctx,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbSamples,
|
||||
coverParams.d,
|
||||
parameters.splitPoint,
|
||||
parameters.f,
|
||||
accelParams
|
||||
);
|
||||
if (ERR_isError(initVal))
|
||||
{
|
||||
return initVal;
|
||||
}
|
||||
}
|
||||
|
||||
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel);
|
||||
{
|
||||
/* Initialize array to keep track of frequency of dmer within activeSegment */
|
||||
ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)parameters.f, sizeof(ushort));
|
||||
nuint tail = FASTCOVER_buildDictionary(
|
||||
&ctx,
|
||||
ctx.freqs,
|
||||
dictBuffer,
|
||||
dictBufferCapacity,
|
||||
coverParams,
|
||||
segmentFreqs
|
||||
);
|
||||
uint nbFinalizeSamples = (uint)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100);
|
||||
nuint dictionarySize = ZDICT_finalizeDictionary(
|
||||
dict,
|
||||
dictBufferCapacity,
|
||||
dict + tail,
|
||||
dictBufferCapacity - tail,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbFinalizeSamples,
|
||||
coverParams.zParams
|
||||
);
|
||||
if (!ERR_isError(dictionarySize)) { }
|
||||
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
free(segmentFreqs);
|
||||
return dictionarySize;
|
||||
}
|
||||
}
|
||||
|
||||
/*! ZDICT_optimizeTrainFromBuffer_fastCover():
|
||||
* The same requirements as above hold for all the parameters except `parameters`.
|
||||
* This function tries many parameter combinations (specifically, k and d combinations)
|
||||
* and picks the best parameters. `*parameters` is filled with the best parameters found,
|
||||
* dictionary constructed with those parameters is stored in `dictBuffer`.
|
||||
* All of the parameters d, k, steps, f, and accel are optional.
|
||||
* If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
|
||||
* if steps is zero it defaults to its default value.
|
||||
* If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
|
||||
* If f is zero, default value of 20 is used.
|
||||
* If accel is zero, default value of 1 is used.
|
||||
*
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* On success `*parameters` contains the parameters selected.
|
||||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.
|
||||
*/
|
||||
public static nuint ZDICT_optimizeTrainFromBuffer_fastCover(
|
||||
void* dictBuffer,
|
||||
nuint dictBufferCapacity,
|
||||
void* samplesBuffer,
|
||||
nuint* samplesSizes,
|
||||
uint nbSamples,
|
||||
ZDICT_fastCover_params_t* parameters
|
||||
)
|
||||
{
|
||||
ZDICT_cover_params_t coverParams;
|
||||
FASTCOVER_accel_t accelParams;
|
||||
/* constants */
|
||||
uint nbThreads = parameters->nbThreads;
|
||||
double splitPoint = parameters->splitPoint <= 0 ? 0.75 : parameters->splitPoint;
|
||||
uint kMinD = parameters->d == 0 ? 6 : parameters->d;
|
||||
uint kMaxD = parameters->d == 0 ? 8 : parameters->d;
|
||||
uint kMinK = parameters->k == 0 ? 50 : parameters->k;
|
||||
uint kMaxK = parameters->k == 0 ? 2000 : parameters->k;
|
||||
uint kSteps = parameters->steps == 0 ? 40 : parameters->steps;
|
||||
uint kStepSize = (kMaxK - kMinK) / kSteps > 1 ? (kMaxK - kMinK) / kSteps : 1;
|
||||
uint kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
|
||||
uint f = parameters->f == 0 ? 20 : parameters->f;
|
||||
uint accel = parameters->accel == 0 ? 1 : parameters->accel;
|
||||
const uint shrinkDict = 0;
|
||||
/* Local variables */
|
||||
int displayLevel = (int)parameters->zParams.notificationLevel;
|
||||
uint iteration = 1;
|
||||
uint d;
|
||||
uint k;
|
||||
COVER_best_s best;
|
||||
void* pool = null;
|
||||
int warned = 0;
|
||||
if (splitPoint <= 0 || splitPoint > 1)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
|
||||
}
|
||||
|
||||
if (accel == 0 || accel > 10)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
|
||||
}
|
||||
|
||||
if (kMinK < kMaxD || kMaxK < kMinK)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
|
||||
}
|
||||
|
||||
if (nbSamples == 0)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
if (dictBufferCapacity < 256)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
}
|
||||
|
||||
if (nbThreads > 1)
|
||||
{
|
||||
pool = POOL_create(nbThreads, 1);
|
||||
if (pool == null)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
|
||||
}
|
||||
}
|
||||
|
||||
COVER_best_init(&best);
|
||||
coverParams = new ZDICT_cover_params_t();
|
||||
FASTCOVER_convertToCoverParams(*parameters, &coverParams);
|
||||
accelParams = FASTCOVER_defaultAccelParameters[accel];
|
||||
g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
|
||||
for (d = kMinD; d <= kMaxD; d += 2)
|
||||
{
|
||||
/* Initialize the context for this value of d */
|
||||
FASTCOVER_ctx_t ctx;
|
||||
{
|
||||
nuint initVal = FASTCOVER_ctx_init(
|
||||
&ctx,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbSamples,
|
||||
d,
|
||||
splitPoint,
|
||||
f,
|
||||
accelParams
|
||||
);
|
||||
if (ERR_isError(initVal))
|
||||
{
|
||||
COVER_best_destroy(&best);
|
||||
POOL_free(pool);
|
||||
return initVal;
|
||||
}
|
||||
}
|
||||
|
||||
if (warned == 0)
|
||||
{
|
||||
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel);
|
||||
warned = 1;
|
||||
}
|
||||
|
||||
for (k = kMinK; k <= kMaxK; k += kStepSize)
|
||||
{
|
||||
/* Prepare the arguments */
|
||||
FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)malloc(
|
||||
(ulong)sizeof(FASTCOVER_tryParameters_data_s)
|
||||
);
|
||||
if (data == null)
|
||||
{
|
||||
COVER_best_destroy(&best);
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
POOL_free(pool);
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
|
||||
}
|
||||
|
||||
data->ctx = &ctx;
|
||||
data->best = &best;
|
||||
data->dictBufferCapacity = dictBufferCapacity;
|
||||
data->parameters = coverParams;
|
||||
data->parameters.k = k;
|
||||
data->parameters.d = d;
|
||||
data->parameters.splitPoint = splitPoint;
|
||||
data->parameters.steps = kSteps;
|
||||
data->parameters.shrinkDict = shrinkDict;
|
||||
data->parameters.zParams.notificationLevel = (uint)g_displayLevel;
|
||||
if (
|
||||
FASTCOVER_checkParameters(
|
||||
data->parameters,
|
||||
dictBufferCapacity,
|
||||
data->ctx->f,
|
||||
accel
|
||||
) == 0
|
||||
)
|
||||
{
|
||||
free(data);
|
||||
continue;
|
||||
}
|
||||
|
||||
COVER_best_start(&best);
|
||||
if (pool != null)
|
||||
{
|
||||
POOL_add(
|
||||
pool,
|
||||
(delegate* managed<void*, void>)(&FASTCOVER_tryParameters),
|
||||
data
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
FASTCOVER_tryParameters(data);
|
||||
}
|
||||
|
||||
++iteration;
|
||||
}
|
||||
|
||||
COVER_best_wait(&best);
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
}
|
||||
|
||||
{
|
||||
nuint dictSize = best.dictSize;
|
||||
if (ERR_isError(best.compressedSize))
|
||||
{
|
||||
nuint compressedSize = best.compressedSize;
|
||||
COVER_best_destroy(&best);
|
||||
POOL_free(pool);
|
||||
return compressedSize;
|
||||
}
|
||||
|
||||
FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel);
|
||||
memcpy(dictBuffer, best.dict, (uint)dictSize);
|
||||
COVER_best_destroy(&best);
|
||||
POOL_free(pool);
|
||||
return dictSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user