mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-08 13:34:57 +00:00
Compare commits
7 Commits
copilot/fi
...
adam/zstd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b010cce1ca | ||
|
|
ee2cbc8051 | ||
|
|
906baf18d2 | ||
|
|
0a7ffd003b | ||
|
|
b545973c55 | ||
|
|
999af800af | ||
|
|
5b5336f456 |
@@ -1,7 +0,0 @@
|
||||
enabled: true
|
||||
agent:
|
||||
name: copilot-coding-agent
|
||||
allow:
|
||||
- paths: ["src/**/*", "tests/**/*", "README.md", "AGENTS.md"]
|
||||
actions: ["create", "modify"]
|
||||
require_review_before_merge: true
|
||||
13
.github/COPILOT_AGENT_README.md
vendored
13
.github/COPILOT_AGENT_README.md
vendored
@@ -1,13 +0,0 @@
|
||||
# Copilot Coding Agent Configuration
|
||||
|
||||
This repository includes a minimal opt-in configuration and CI workflow to allow the GitHub Copilot coding agent to open and validate PRs.
|
||||
|
||||
- .copilot-agent.yml: opt-in config for automated agents
|
||||
- .github/workflows/dotnetcore.yml: CI runs on PRs touching the solution, source, or tests to validate changes
|
||||
- AGENTS.yml: general information for this project
|
||||
|
||||
Maintainers can adjust the allowed paths or disable the agent by editing or removing .copilot-agent.yml.
|
||||
|
||||
Notes:
|
||||
- Do not change any other files in the repository.
|
||||
- If build/test paths are different, update the workflow accordingly; this workflow targets SharpCompress.sln and the SharpCompress.Tests test project.
|
||||
17
.github/agents/copilot-agent.yml
vendored
17
.github/agents/copilot-agent.yml
vendored
@@ -1,17 +0,0 @@
|
||||
enabled: true
|
||||
agent:
|
||||
name: copilot-coding-agent
|
||||
allow:
|
||||
- paths: ["src/**/*", "tests/**/*", "README.md", "AGENTS.md"]
|
||||
actions: ["create", "modify", "delete"]
|
||||
require_review_before_merge: true
|
||||
required_approvals: 1
|
||||
allowed_merge_strategies:
|
||||
- squash
|
||||
- merge
|
||||
auto_merge_on_green: false
|
||||
run_workflows: true
|
||||
notes: |
|
||||
- This manifest expresses the policy for the Copilot coding agent in this repository.
|
||||
- It does NOT install or authorize the agent; a repository admin must install the Copilot coding agent app and grant the repository the necessary permissions (contents: write, pull_requests: write, checks: write, actions: write/read, issues: write) to allow the agent to act.
|
||||
- Keep allow paths narrow and prefer require_review_before_merge during initial rollout.
|
||||
6
.github/workflows/dotnetcore.yml
vendored
6
.github/workflows/dotnetcore.yml
vendored
@@ -14,12 +14,12 @@ jobs:
|
||||
os: [windows-latest, ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-dotnet@v5
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-dotnet@v4
|
||||
with:
|
||||
dotnet-version: 8.0.x
|
||||
- run: dotnet run --project build/build.csproj
|
||||
- uses: actions/upload-artifact@v5
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.os }}-sharpcompress.nupkg
|
||||
path: artifacts/*
|
||||
|
||||
@@ -26,7 +26,6 @@ applyTo: '**/*.cs'
|
||||
- Use CSharpier for all code formatting to ensure consistent style across the project.
|
||||
- Install CSharpier globally: `dotnet tool install -g csharpier`
|
||||
- Format files with: `dotnet csharpier format .`
|
||||
- **ALWAYS run `dotnet csharpier format .` after making code changes before committing.**
|
||||
- Configure your IDE to format on save using CSharpier.
|
||||
- CSharpier configuration can be customized via `.csharpierrc` file in the project root.
|
||||
- Trust CSharpier's opinionated formatting decisions to maintain consistency.
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
<Project>
|
||||
<ItemGroup>
|
||||
<PackageVersion Include="Bullseye" Version="6.0.0" />
|
||||
<PackageVersion Include="AwesomeAssertions" Version="9.2.1" />
|
||||
<PackageVersion Include="AwesomeAssertions" Version="9.2.0" />
|
||||
<PackageVersion Include="Glob" Version="1.1.9" />
|
||||
<PackageVersion Include="JetBrains.Profiler.SelfApi" Version="2.5.14" />
|
||||
<PackageVersion Include="Microsoft.Bcl.AsyncInterfaces" Version="8.0.0" />
|
||||
<PackageVersion Include="Microsoft.NET.Test.Sdk" Version="18.0.0" />
|
||||
<PackageVersion Include="Microsoft.NET.Test.Sdk" Version="17.13.0" />
|
||||
<PackageVersion Include="Mono.Posix.NETStandard" Version="1.0.0" />
|
||||
<PackageVersion Include="SimpleExec" Version="12.0.0" />
|
||||
<PackageVersion Include="System.Buffers" Version="4.6.1" />
|
||||
@@ -13,6 +12,7 @@
|
||||
<PackageVersion Include="System.Text.Encoding.CodePages" Version="8.0.0" />
|
||||
<PackageVersion Include="xunit" Version="2.9.3" />
|
||||
<PackageVersion Include="xunit.runner.visualstudio" Version="3.1.5" />
|
||||
<PackageVersion Include="xunit.SkippableFact" Version="1.5.23" />
|
||||
<PackageVersion Include="ZstdSharp.Port" Version="0.8.6" />
|
||||
<PackageVersion Include="Microsoft.SourceLink.GitHub" Version="8.0.0" />
|
||||
<PackageVersion Include="Microsoft.NETFramework.ReferenceAssemblies" Version="1.0.3" />
|
||||
|
||||
78
README.md
78
README.md
@@ -4,8 +4,6 @@ SharpCompress is a compression library in pure C# for .NET Framework 4.62, .NET
|
||||
|
||||
The major feature is support for non-seekable streams so large files can be processed on the fly (i.e. download stream).
|
||||
|
||||
**NEW:** All I/O operations now support async/await for improved performance and scalability. See the [Async Usage](#async-usage) section below.
|
||||
|
||||
GitHub Actions Build -
|
||||
[](https://github.com/adamhathcock/sharpcompress/actions/workflows/dotnetcore.yml)
|
||||
[](https://dndocs.com/d/sharpcompress/api/index.html)
|
||||
@@ -34,82 +32,6 @@ Hi everyone. I hope you're using SharpCompress and finding it useful. Please giv
|
||||
|
||||
Please do not email me directly to ask for help. If you think there is a real issue, please report it here.
|
||||
|
||||
## Async Usage
|
||||
|
||||
SharpCompress now provides full async/await support for all I/O operations, allowing for better performance and scalability in modern applications.
|
||||
|
||||
### Async Reading Examples
|
||||
|
||||
Extract entries asynchronously:
|
||||
```csharp
|
||||
using (Stream stream = File.OpenRead("archive.zip"))
|
||||
using (var reader = ReaderFactory.Open(stream))
|
||||
{
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
if (!reader.Entry.IsDirectory)
|
||||
{
|
||||
// Async extraction
|
||||
await reader.WriteEntryToDirectoryAsync(
|
||||
@"C:\temp",
|
||||
new ExtractionOptions() { ExtractFullPath = true, Overwrite = true },
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Extract all entries to directory asynchronously:
|
||||
```csharp
|
||||
using (Stream stream = File.OpenRead("archive.tar.gz"))
|
||||
using (var reader = ReaderFactory.Open(stream))
|
||||
{
|
||||
await reader.WriteAllToDirectoryAsync(
|
||||
@"C:\temp",
|
||||
new ExtractionOptions() { ExtractFullPath = true, Overwrite = true },
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
Open entry stream asynchronously:
|
||||
```csharp
|
||||
using (var archive = ZipArchive.Open("archive.zip"))
|
||||
{
|
||||
foreach (var entry in archive.Entries.Where(e => !e.IsDirectory))
|
||||
{
|
||||
using (var entryStream = await entry.OpenEntryStreamAsync(cancellationToken))
|
||||
{
|
||||
// Process stream asynchronously
|
||||
await entryStream.CopyToAsync(outputStream, cancellationToken);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Async Writing Examples
|
||||
|
||||
Write files asynchronously:
|
||||
```csharp
|
||||
using (Stream stream = File.OpenWrite("output.zip"))
|
||||
using (var writer = WriterFactory.Open(stream, ArchiveType.Zip, CompressionType.Deflate))
|
||||
{
|
||||
await writer.WriteAsync("file1.txt", fileStream, DateTime.Now, cancellationToken);
|
||||
}
|
||||
```
|
||||
|
||||
Write all files from directory asynchronously:
|
||||
```csharp
|
||||
using (Stream stream = File.OpenWrite("output.tar.gz"))
|
||||
using (var writer = WriterFactory.Open(stream, ArchiveType.Tar, new WriterOptions(CompressionType.GZip)))
|
||||
{
|
||||
await writer.WriteAllAsync(@"D:\files", "*", SearchOption.AllDirectories, cancellationToken);
|
||||
}
|
||||
```
|
||||
|
||||
All async methods support `CancellationToken` for graceful cancellation of long-running operations.
|
||||
|
||||
## Want to contribute?
|
||||
|
||||
I'm always looking for help or ideas. Please submit code or email with ideas. Unfortunately, just letting me know you'd like to help is not enough because I really have no overall plan of what needs to be done. I'll definitely accept code submissions and add you as a member of the project!
|
||||
|
||||
@@ -21,14 +21,8 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Config", "Config", "{CDB425
|
||||
Directory.Packages.props = Directory.Packages.props
|
||||
NuGet.config = NuGet.config
|
||||
.github\workflows\dotnetcore.yml = .github\workflows\dotnetcore.yml
|
||||
USAGE.md = USAGE.md
|
||||
README.md = README.md
|
||||
FORMATS.md = FORMATS.md
|
||||
AGENTS.md = AGENTS.md
|
||||
EndProjectSection
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SharpCompress.Performance", "tests\SharpCompress.Performance\SharpCompress.Performance.csproj", "{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17}"
|
||||
EndProject
|
||||
Global
|
||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
||||
Debug|Any CPU = Debug|Any CPU
|
||||
@@ -47,10 +41,6 @@ Global
|
||||
{D4D613CB-5E94-47FB-85BE-B8423D20C545}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{D4D613CB-5E94-47FB-85BE-B8423D20C545}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{D4D613CB-5E94-47FB-85BE-B8423D20C545}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
EndGlobalSection
|
||||
GlobalSection(SolutionProperties) = preSolution
|
||||
HideSolutionNode = FALSE
|
||||
@@ -58,6 +48,5 @@ Global
|
||||
GlobalSection(NestedProjects) = preSolution
|
||||
{FD19DDD8-72B2-4024-8665-0D1F7A2AA998} = {3C5BE746-03E5-4895-9988-0B57F162F86C}
|
||||
{F2B1A1EB-0FA6-40D0-8908-E13247C7226F} = {0F0901FF-E8D9-426A-B5A2-17C7F47C1529}
|
||||
{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17} = {0F0901FF-E8D9-426A-B5A2-17C7F47C1529}
|
||||
EndGlobalSection
|
||||
EndGlobal
|
||||
|
||||
143
USAGE.md
143
USAGE.md
@@ -1,18 +1,5 @@
|
||||
# SharpCompress Usage
|
||||
|
||||
## Async/Await Support
|
||||
|
||||
SharpCompress now provides full async/await support for all I/O operations. All `Read`, `Write`, and extraction operations have async equivalents ending in `Async` that accept an optional `CancellationToken`. This enables better performance and scalability for I/O-bound operations.
|
||||
|
||||
**Key Async Methods:**
|
||||
- `reader.WriteEntryToAsync(stream, cancellationToken)` - Extract entry asynchronously
|
||||
- `reader.WriteAllToDirectoryAsync(path, options, cancellationToken)` - Extract all asynchronously
|
||||
- `writer.WriteAsync(filename, stream, modTime, cancellationToken)` - Write entry asynchronously
|
||||
- `writer.WriteAllAsync(directory, pattern, searchOption, cancellationToken)` - Write directory asynchronously
|
||||
- `entry.OpenEntryStreamAsync(cancellationToken)` - Open entry stream asynchronously
|
||||
|
||||
See [Async Examples](#async-examples) section below for usage patterns.
|
||||
|
||||
## Stream Rules (changed with 0.21)
|
||||
|
||||
When dealing with Streams, the rule should be that you don't close a stream you didn't create. This, in effect, should mean you should always put a Stream in a using block to dispose it.
|
||||
@@ -185,133 +172,3 @@ foreach(var entry in tr.Entries)
|
||||
Console.WriteLine($"{entry.Key}");
|
||||
}
|
||||
```
|
||||
|
||||
## Async Examples
|
||||
|
||||
### Async Reader Examples
|
||||
|
||||
**Extract single entry asynchronously:**
|
||||
```C#
|
||||
using (Stream stream = File.OpenRead("archive.zip"))
|
||||
using (var reader = ReaderFactory.Open(stream))
|
||||
{
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
if (!reader.Entry.IsDirectory)
|
||||
{
|
||||
using (var entryStream = reader.OpenEntryStream())
|
||||
{
|
||||
using (var outputStream = File.Create("output.bin"))
|
||||
{
|
||||
await reader.WriteEntryToAsync(outputStream, cancellationToken);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Extract all entries asynchronously:**
|
||||
```C#
|
||||
using (Stream stream = File.OpenRead("archive.tar.gz"))
|
||||
using (var reader = ReaderFactory.Open(stream))
|
||||
{
|
||||
await reader.WriteAllToDirectoryAsync(
|
||||
@"D:\temp",
|
||||
new ExtractionOptions()
|
||||
{
|
||||
ExtractFullPath = true,
|
||||
Overwrite = true
|
||||
},
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
**Open and process entry stream asynchronously:**
|
||||
```C#
|
||||
using (var archive = ZipArchive.Open("archive.zip"))
|
||||
{
|
||||
foreach (var entry in archive.Entries.Where(e => !e.IsDirectory))
|
||||
{
|
||||
using (var entryStream = await entry.OpenEntryStreamAsync(cancellationToken))
|
||||
{
|
||||
// Process the decompressed stream asynchronously
|
||||
await ProcessStreamAsync(entryStream, cancellationToken);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Async Writer Examples
|
||||
|
||||
**Write single file asynchronously:**
|
||||
```C#
|
||||
using (Stream archiveStream = File.OpenWrite("output.zip"))
|
||||
using (var writer = WriterFactory.Open(archiveStream, ArchiveType.Zip, CompressionType.Deflate))
|
||||
{
|
||||
using (Stream fileStream = File.OpenRead("input.txt"))
|
||||
{
|
||||
await writer.WriteAsync("entry.txt", fileStream, DateTime.Now, cancellationToken);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Write entire directory asynchronously:**
|
||||
```C#
|
||||
using (Stream stream = File.OpenWrite("backup.tar.gz"))
|
||||
using (var writer = WriterFactory.Open(stream, ArchiveType.Tar, new WriterOptions(CompressionType.GZip)))
|
||||
{
|
||||
await writer.WriteAllAsync(
|
||||
@"D:\files",
|
||||
"*",
|
||||
SearchOption.AllDirectories,
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
**Write with progress tracking and cancellation:**
|
||||
```C#
|
||||
var cts = new CancellationTokenSource();
|
||||
|
||||
// Set timeout or cancel from UI
|
||||
cts.CancelAfter(TimeSpan.FromMinutes(5));
|
||||
|
||||
using (Stream stream = File.OpenWrite("archive.zip"))
|
||||
using (var writer = WriterFactory.Open(stream, ArchiveType.Zip, CompressionType.Deflate))
|
||||
{
|
||||
try
|
||||
{
|
||||
await writer.WriteAllAsync(@"D:\data", "*", SearchOption.AllDirectories, cts.Token);
|
||||
}
|
||||
catch (OperationCanceledException)
|
||||
{
|
||||
Console.WriteLine("Operation was cancelled");
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Archive Async Examples
|
||||
|
||||
**Extract from archive asynchronously:**
|
||||
```C#
|
||||
using (var archive = ZipArchive.Open("archive.zip"))
|
||||
{
|
||||
using (var reader = archive.ExtractAllEntries())
|
||||
{
|
||||
await reader.WriteAllToDirectoryAsync(
|
||||
@"C:\output",
|
||||
new ExtractionOptions() { ExtractFullPath = true, Overwrite = true },
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits of Async Operations:**
|
||||
- Non-blocking I/O for better application responsiveness
|
||||
- Improved scalability for server applications
|
||||
- Support for cancellation via CancellationToken
|
||||
- Better resource utilization in async/await contexts
|
||||
- Compatible with modern .NET async patterns
|
||||
|
||||
@@ -144,12 +144,6 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
/// <returns></returns>
|
||||
public IReader ExtractAllEntries()
|
||||
{
|
||||
if (!IsSolid && Type != ArchiveType.SevenZip)
|
||||
{
|
||||
throw new InvalidOperationException(
|
||||
"ExtractAllEntries can only be used on solid archives or 7Zip archives (which require random access)."
|
||||
);
|
||||
}
|
||||
((IArchiveExtractionListener)this).EnsureEntriesLoaded();
|
||||
return CreateReaderForSolidExtraction();
|
||||
}
|
||||
|
||||
@@ -20,9 +20,8 @@ public static class ArchiveFactory
|
||||
public static IArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
readerOptions ??= new ReaderOptions();
|
||||
var factory = FindFactory<IArchiveFactory>(stream);
|
||||
stream = new SharpCompressStream(stream, bufferSize: readerOptions.BufferSize);
|
||||
return factory.Open(stream, readerOptions);
|
||||
return FindFactory<IArchiveFactory>(stream).Open(stream, readerOptions);
|
||||
}
|
||||
|
||||
public static IWritableArchive Create(ArchiveType type)
|
||||
@@ -46,7 +45,7 @@ public static class ArchiveFactory
|
||||
/// <param name="options"></param>
|
||||
public static IArchive Open(string filePath, ReaderOptions? options = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
return Open(new FileInfo(filePath), options);
|
||||
}
|
||||
|
||||
@@ -69,7 +68,7 @@ public static class ArchiveFactory
|
||||
/// <param name="options"></param>
|
||||
public static IArchive Open(IEnumerable<FileInfo> fileInfos, ReaderOptions? options = null)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var filesArray = fileInfos.ToArray();
|
||||
if (filesArray.Length == 0)
|
||||
{
|
||||
@@ -82,7 +81,7 @@ public static class ArchiveFactory
|
||||
return Open(fileInfo, options);
|
||||
}
|
||||
|
||||
fileInfo.NotNull(nameof(fileInfo));
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
options ??= new ReaderOptions { LeaveStreamOpen = false };
|
||||
|
||||
return FindFactory<IMultiArchiveFactory>(fileInfo).Open(filesArray, options);
|
||||
@@ -95,7 +94,7 @@ public static class ArchiveFactory
|
||||
/// <param name="options"></param>
|
||||
public static IArchive Open(IEnumerable<Stream> streams, ReaderOptions? options = null)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var streamsArray = streams.ToArray();
|
||||
if (streamsArray.Length == 0)
|
||||
{
|
||||
@@ -108,7 +107,7 @@ public static class ArchiveFactory
|
||||
return Open(firstStream, options);
|
||||
}
|
||||
|
||||
firstStream.NotNull(nameof(firstStream));
|
||||
firstStream.CheckNotNull(nameof(firstStream));
|
||||
options ??= new ReaderOptions();
|
||||
|
||||
return FindFactory<IMultiArchiveFactory>(firstStream).Open(streamsArray, options);
|
||||
@@ -130,7 +129,7 @@ public static class ArchiveFactory
|
||||
private static T FindFactory<T>(FileInfo finfo)
|
||||
where T : IFactory
|
||||
{
|
||||
finfo.NotNull(nameof(finfo));
|
||||
finfo.CheckNotNull(nameof(finfo));
|
||||
using Stream stream = finfo.OpenRead();
|
||||
return FindFactory<T>(stream);
|
||||
}
|
||||
@@ -138,7 +137,7 @@ public static class ArchiveFactory
|
||||
private static T FindFactory<T>(Stream stream)
|
||||
where T : IFactory
|
||||
{
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
if (!stream.CanRead || !stream.CanSeek)
|
||||
{
|
||||
throw new ArgumentException("Stream should be readable and seekable");
|
||||
@@ -173,7 +172,7 @@ public static class ArchiveFactory
|
||||
int bufferSize = ReaderOptions.DefaultBufferSize
|
||||
)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
using Stream s = File.OpenRead(filePath);
|
||||
return IsArchive(s, out type, bufferSize);
|
||||
}
|
||||
@@ -185,7 +184,7 @@ public static class ArchiveFactory
|
||||
)
|
||||
{
|
||||
type = null;
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
|
||||
if (!stream.CanRead || !stream.CanSeek)
|
||||
{
|
||||
@@ -216,7 +215,7 @@ public static class ArchiveFactory
|
||||
/// <returns></returns>
|
||||
public static IEnumerable<string> GetFileParts(string part1)
|
||||
{
|
||||
part1.NotNullOrEmpty(nameof(part1));
|
||||
part1.CheckNotNullOrEmpty(nameof(part1));
|
||||
return GetFileParts(new FileInfo(part1)).Select(a => a.FullName);
|
||||
}
|
||||
|
||||
@@ -227,7 +226,7 @@ public static class ArchiveFactory
|
||||
/// <returns></returns>
|
||||
public static IEnumerable<FileInfo> GetFileParts(FileInfo part1)
|
||||
{
|
||||
part1.NotNull(nameof(part1));
|
||||
part1.CheckNotNull(nameof(part1));
|
||||
yield return part1;
|
||||
|
||||
foreach (var factory in Factory.Factories.OfType<IFactory>())
|
||||
|
||||
@@ -21,7 +21,7 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static GZipArchive Open(string filePath, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static GZipArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
fileInfo.NotNull(nameof(fileInfo));
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
return new GZipArchive(
|
||||
new SourceStream(
|
||||
fileInfo,
|
||||
@@ -52,7 +52,7 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var files = fileInfos.ToArray();
|
||||
return new GZipArchive(
|
||||
new SourceStream(
|
||||
@@ -70,7 +70,7 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static GZipArchive Open(IEnumerable<Stream> streams, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var strms = streams.ToArray();
|
||||
return new GZipArchive(
|
||||
new SourceStream(
|
||||
@@ -88,7 +88,7 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static GZipArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
|
||||
if (stream is not { CanSeek: true })
|
||||
{
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.GZip;
|
||||
|
||||
namespace SharpCompress.Archives.GZip;
|
||||
@@ -22,12 +20,6 @@ public class GZipArchiveEntry : GZipEntry, IArchiveEntry
|
||||
return Parts.Single().GetCompressedStream().NotNull();
|
||||
}
|
||||
|
||||
public virtual Task<Stream> OpenEntryStreamAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
// GZip synchronous implementation is fast enough, just wrap it
|
||||
return Task.FromResult(OpenEntryStream());
|
||||
}
|
||||
|
||||
#region IArchiveEntry Members
|
||||
|
||||
public IArchive Archive { get; }
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
|
||||
namespace SharpCompress.Archives;
|
||||
@@ -13,12 +11,6 @@ public interface IArchiveEntry : IEntry
|
||||
/// </summary>
|
||||
Stream OpenEntryStream();
|
||||
|
||||
/// <summary>
|
||||
/// Opens the current entry as a stream that will decompress as it is read asynchronously.
|
||||
/// Read the entire stream or use SkipEntry on EntryStream.
|
||||
/// </summary>
|
||||
Task<Stream> OpenEntryStreamAsync(CancellationToken cancellationToken = default);
|
||||
|
||||
/// <summary>
|
||||
/// The archive can find all the parts of the archive needed to extract this entry.
|
||||
/// </summary>
|
||||
|
||||
@@ -25,7 +25,7 @@ public static class IArchiveEntryExtensions
|
||||
using (entryStream)
|
||||
{
|
||||
using Stream s = new ListeningStream(streamListener, entryStream);
|
||||
s.CopyTo(streamToWriteTo);
|
||||
s.TransferTo(streamToWriteTo);
|
||||
}
|
||||
streamListener.FireEntryExtractionEnd(archiveEntry);
|
||||
}
|
||||
|
||||
@@ -45,10 +45,12 @@ public static class IArchiveExtensions
|
||||
var seenDirectories = new HashSet<string>();
|
||||
|
||||
// Extract
|
||||
foreach (var entry in archive.Entries)
|
||||
var entries = archive.ExtractAllEntries();
|
||||
while (entries.MoveToNextEntry())
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var entry = entries.Entry;
|
||||
if (entry.IsDirectory)
|
||||
{
|
||||
var dirPath = Path.Combine(destination, entry.Key.NotNull("Entry Key is null"));
|
||||
@@ -75,7 +77,7 @@ public static class IArchiveExtensions
|
||||
|
||||
// Write file
|
||||
using var fs = File.OpenWrite(path);
|
||||
entry.WriteTo(fs);
|
||||
entries.WriteEntryTo(fs);
|
||||
|
||||
// Update progress
|
||||
bytesRead += entry.Size;
|
||||
|
||||
@@ -95,7 +95,7 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
/// <param name="options"></param>
|
||||
public static RarArchive Open(string filePath, ReaderOptions? options = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
var fileInfo = new FileInfo(filePath);
|
||||
return new RarArchive(
|
||||
new SourceStream(
|
||||
@@ -113,7 +113,7 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
/// <param name="options"></param>
|
||||
public static RarArchive Open(FileInfo fileInfo, ReaderOptions? options = null)
|
||||
{
|
||||
fileInfo.NotNull(nameof(fileInfo));
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
return new RarArchive(
|
||||
new SourceStream(
|
||||
fileInfo,
|
||||
@@ -130,7 +130,7 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
/// <param name="options"></param>
|
||||
public static RarArchive Open(Stream stream, ReaderOptions? options = null)
|
||||
{
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
|
||||
if (stream is not { CanSeek: true })
|
||||
{
|
||||
@@ -150,7 +150,7 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var files = fileInfos.ToArray();
|
||||
return new RarArchive(
|
||||
new SourceStream(
|
||||
@@ -168,7 +168,7 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static RarArchive Open(IEnumerable<Stream> streams, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var strms = streams.ToArray();
|
||||
return new RarArchive(
|
||||
new SourceStream(
|
||||
|
||||
@@ -2,8 +2,6 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Rar;
|
||||
using SharpCompress.Common.Rar.Headers;
|
||||
@@ -86,9 +84,6 @@ public class RarArchiveEntry : RarEntry, IArchiveEntry
|
||||
);
|
||||
}
|
||||
|
||||
public Task<Stream> OpenEntryStreamAsync(CancellationToken cancellationToken = default) =>
|
||||
Task.FromResult(OpenEntryStream());
|
||||
|
||||
public bool IsComplete
|
||||
{
|
||||
get
|
||||
|
||||
@@ -21,7 +21,7 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
/// <param name="readerOptions"></param>
|
||||
public static SevenZipArchive Open(string filePath, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty("filePath");
|
||||
filePath.CheckNotNullOrEmpty("filePath");
|
||||
return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
/// <param name="readerOptions"></param>
|
||||
public static SevenZipArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
fileInfo.NotNull("fileInfo");
|
||||
fileInfo.CheckNotNull("fileInfo");
|
||||
return new SevenZipArchive(
|
||||
new SourceStream(
|
||||
fileInfo,
|
||||
@@ -52,7 +52,7 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var files = fileInfos.ToArray();
|
||||
return new SevenZipArchive(
|
||||
new SourceStream(
|
||||
@@ -73,7 +73,7 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var strms = streams.ToArray();
|
||||
return new SevenZipArchive(
|
||||
new SourceStream(
|
||||
@@ -91,7 +91,7 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
/// <param name="readerOptions"></param>
|
||||
public static SevenZipArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
stream.NotNull("stream");
|
||||
stream.CheckNotNull("stream");
|
||||
|
||||
if (stream is not { CanSeek: true })
|
||||
{
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.SevenZip;
|
||||
|
||||
namespace SharpCompress.Archives.SevenZip;
|
||||
@@ -12,9 +10,6 @@ public class SevenZipArchiveEntry : SevenZipEntry, IArchiveEntry
|
||||
|
||||
public Stream OpenEntryStream() => FilePart.GetCompressedStream();
|
||||
|
||||
public Task<Stream> OpenEntryStreamAsync(CancellationToken cancellationToken = default) =>
|
||||
Task.FromResult(OpenEntryStream());
|
||||
|
||||
public IArchive Archive { get; }
|
||||
|
||||
public bool IsComplete => true;
|
||||
|
||||
@@ -22,7 +22,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static TarArchive Open(string filePath, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static TarArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
fileInfo.NotNull(nameof(fileInfo));
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
return new TarArchive(
|
||||
new SourceStream(
|
||||
fileInfo,
|
||||
@@ -53,7 +53,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var files = fileInfos.ToArray();
|
||||
return new TarArchive(
|
||||
new SourceStream(
|
||||
@@ -71,7 +71,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static TarArchive Open(IEnumerable<Stream> streams, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var strms = streams.ToArray();
|
||||
return new TarArchive(
|
||||
new SourceStream(
|
||||
@@ -89,7 +89,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static TarArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
|
||||
if (stream is not { CanSeek: true })
|
||||
{
|
||||
@@ -178,7 +178,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
using (var entryStream = entry.OpenEntryStream())
|
||||
{
|
||||
using var memoryStream = new MemoryStream();
|
||||
entryStream.CopyTo(memoryStream);
|
||||
entryStream.TransferTo(memoryStream);
|
||||
memoryStream.Position = 0;
|
||||
var bytes = memoryStream.ToArray();
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar;
|
||||
|
||||
@@ -14,10 +12,6 @@ public class TarArchiveEntry : TarEntry, IArchiveEntry
|
||||
|
||||
public virtual Stream OpenEntryStream() => Parts.Single().GetCompressedStream().NotNull();
|
||||
|
||||
public virtual Task<Stream> OpenEntryStreamAsync(
|
||||
CancellationToken cancellationToken = default
|
||||
) => Task.FromResult(OpenEntryStream());
|
||||
|
||||
#region IArchiveEntry Members
|
||||
|
||||
public IArchive Archive { get; }
|
||||
|
||||
@@ -43,7 +43,7 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static ZipArchive Open(string filePath, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static ZipArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
fileInfo.NotNull(nameof(fileInfo));
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
return new ZipArchive(
|
||||
new SourceStream(
|
||||
fileInfo,
|
||||
@@ -74,7 +74,7 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var files = fileInfos.ToArray();
|
||||
return new ZipArchive(
|
||||
new SourceStream(
|
||||
@@ -92,7 +92,7 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static ZipArchive Open(IEnumerable<Stream> streams, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var strms = streams.ToArray();
|
||||
return new ZipArchive(
|
||||
new SourceStream(
|
||||
@@ -110,7 +110,7 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static ZipArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
|
||||
if (stream is not { CanSeek: true })
|
||||
{
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Zip;
|
||||
|
||||
namespace SharpCompress.Archives.Zip;
|
||||
@@ -13,10 +11,6 @@ public class ZipArchiveEntry : ZipEntry, IArchiveEntry
|
||||
|
||||
public virtual Stream OpenEntryStream() => Parts.Single().GetCompressedStream().NotNull();
|
||||
|
||||
public virtual Task<Stream> OpenEntryStreamAsync(
|
||||
CancellationToken cancellationToken = default
|
||||
) => Task.FromResult(OpenEntryStream());
|
||||
|
||||
#region IArchiveEntry Members
|
||||
|
||||
public IArchive Archive { get; }
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
[assembly: CLSCompliant(true)]
|
||||
[assembly: CLSCompliant(false)]
|
||||
[assembly: InternalsVisibleTo(
|
||||
"SharpCompress.Test,PublicKey=0024000004800000940000000602000000240000525341310004000001000100158bebf1433f76dffc356733c138babea7a47536c65ed8009b16372c6f4edbb20554db74a62687f56b97c20a6ce8c4b123280279e33c894e7b3aa93ab3c573656fde4db576cfe07dba09619ead26375b25d2c4a8e43f7be257d712b0dd2eb546f67adb09281338618a58ac834fc038dd7e2740a7ab3591826252e4f4516306dc"
|
||||
)]
|
||||
|
||||
@@ -7,54 +7,53 @@ using System.Threading.Tasks;
|
||||
using SharpCompress.Common.GZip;
|
||||
using SharpCompress.Common.Tar;
|
||||
|
||||
namespace SharpCompress.Common.Arc
|
||||
namespace SharpCompress.Common.Arc;
|
||||
|
||||
public class ArcEntry : Entry
|
||||
{
|
||||
public class ArcEntry : Entry
|
||||
private readonly ArcFilePart? _filePart;
|
||||
|
||||
internal ArcEntry(ArcFilePart? filePart)
|
||||
{
|
||||
private readonly ArcFilePart? _filePart;
|
||||
|
||||
internal ArcEntry(ArcFilePart? filePart)
|
||||
{
|
||||
_filePart = filePart;
|
||||
}
|
||||
|
||||
public override long Crc
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_filePart == null)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return _filePart.Header.Crc16;
|
||||
}
|
||||
}
|
||||
|
||||
public override string? Key => _filePart?.Header.Name;
|
||||
|
||||
public override string? LinkTarget => null;
|
||||
|
||||
public override long CompressedSize => _filePart?.Header.CompressedSize ?? 0;
|
||||
|
||||
public override CompressionType CompressionType =>
|
||||
_filePart?.Header.CompressionMethod ?? CompressionType.Unknown;
|
||||
|
||||
public override long Size => throw new NotImplementedException();
|
||||
|
||||
public override DateTime? LastModifiedTime => null;
|
||||
|
||||
public override DateTime? CreatedTime => null;
|
||||
|
||||
public override DateTime? LastAccessedTime => null;
|
||||
|
||||
public override DateTime? ArchivedTime => null;
|
||||
|
||||
public override bool IsEncrypted => false;
|
||||
|
||||
public override bool IsDirectory => false;
|
||||
|
||||
public override bool IsSplitAfter => false;
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => _filePart.Empty();
|
||||
_filePart = filePart;
|
||||
}
|
||||
|
||||
public override long Crc
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_filePart == null)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return _filePart.Header.Crc16;
|
||||
}
|
||||
}
|
||||
|
||||
public override string? Key => _filePart?.Header.Name;
|
||||
|
||||
public override string? LinkTarget => null;
|
||||
|
||||
public override long CompressedSize => _filePart?.Header.CompressedSize ?? 0;
|
||||
|
||||
public override CompressionType CompressionType =>
|
||||
_filePart?.Header.CompressionMethod ?? CompressionType.Unknown;
|
||||
|
||||
public override long Size => throw new NotImplementedException();
|
||||
|
||||
public override DateTime? LastModifiedTime => null;
|
||||
|
||||
public override DateTime? CreatedTime => null;
|
||||
|
||||
public override DateTime? LastAccessedTime => null;
|
||||
|
||||
public override DateTime? ArchivedTime => null;
|
||||
|
||||
public override bool IsEncrypted => false;
|
||||
|
||||
public override bool IsDirectory => false;
|
||||
|
||||
public override bool IsSplitAfter => false;
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => _filePart.Empty();
|
||||
}
|
||||
|
||||
@@ -3,74 +3,73 @@ using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Arc
|
||||
namespace SharpCompress.Common.Arc;
|
||||
|
||||
public class ArcEntryHeader
|
||||
{
|
||||
public class ArcEntryHeader
|
||||
public ArchiveEncoding ArchiveEncoding { get; }
|
||||
public CompressionType CompressionMethod { get; private set; }
|
||||
public string? Name { get; private set; }
|
||||
public long CompressedSize { get; private set; }
|
||||
public DateTime DateTime { get; private set; }
|
||||
public int Crc16 { get; private set; }
|
||||
public long OriginalSize { get; private set; }
|
||||
public long DataStartPosition { get; private set; }
|
||||
|
||||
public ArcEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
public ArchiveEncoding ArchiveEncoding { get; }
|
||||
public CompressionType CompressionMethod { get; private set; }
|
||||
public string? Name { get; private set; }
|
||||
public long CompressedSize { get; private set; }
|
||||
public DateTime DateTime { get; private set; }
|
||||
public int Crc16 { get; private set; }
|
||||
public long OriginalSize { get; private set; }
|
||||
public long DataStartPosition { get; private set; }
|
||||
this.ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
public ArcEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
public ArcEntryHeader? ReadHeader(Stream stream)
|
||||
{
|
||||
byte[] headerBytes = new byte[29];
|
||||
if (stream.Read(headerBytes, 0, headerBytes.Length) != headerBytes.Length)
|
||||
{
|
||||
this.ArchiveEncoding = archiveEncoding;
|
||||
return null;
|
||||
}
|
||||
DataStartPosition = stream.Position;
|
||||
return LoadFrom(headerBytes);
|
||||
}
|
||||
|
||||
public ArcEntryHeader? ReadHeader(Stream stream)
|
||||
public ArcEntryHeader LoadFrom(byte[] headerBytes)
|
||||
{
|
||||
CompressionMethod = GetCompressionType(headerBytes[1]);
|
||||
|
||||
// Read name
|
||||
int nameEnd = Array.IndexOf(headerBytes, (byte)0, 1); // Find null terminator
|
||||
Name = Encoding.UTF8.GetString(headerBytes, 2, nameEnd > 0 ? nameEnd - 2 : 12);
|
||||
|
||||
int offset = 15;
|
||||
CompressedSize = BitConverter.ToUInt32(headerBytes, offset);
|
||||
offset += 4;
|
||||
uint rawDateTime = BitConverter.ToUInt32(headerBytes, offset);
|
||||
DateTime = ConvertToDateTime(rawDateTime);
|
||||
offset += 4;
|
||||
Crc16 = BitConverter.ToUInt16(headerBytes, offset);
|
||||
offset += 2;
|
||||
OriginalSize = BitConverter.ToUInt32(headerBytes, offset);
|
||||
return this;
|
||||
}
|
||||
|
||||
private CompressionType GetCompressionType(byte value)
|
||||
{
|
||||
return value switch
|
||||
{
|
||||
byte[] headerBytes = new byte[29];
|
||||
if (stream.Read(headerBytes, 0, headerBytes.Length) != headerBytes.Length)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
DataStartPosition = stream.Position;
|
||||
return LoadFrom(headerBytes);
|
||||
}
|
||||
1 or 2 => CompressionType.None,
|
||||
3 => CompressionType.RLE90,
|
||||
4 => CompressionType.Squeezed,
|
||||
5 or 6 or 7 or 8 => CompressionType.Crunched,
|
||||
9 => CompressionType.Squashed,
|
||||
10 => CompressionType.Crushed,
|
||||
11 => CompressionType.Distilled,
|
||||
_ => CompressionType.Unknown,
|
||||
};
|
||||
}
|
||||
|
||||
public ArcEntryHeader LoadFrom(byte[] headerBytes)
|
||||
{
|
||||
CompressionMethod = GetCompressionType(headerBytes[1]);
|
||||
|
||||
// Read name
|
||||
int nameEnd = Array.IndexOf(headerBytes, (byte)0, 1); // Find null terminator
|
||||
Name = Encoding.UTF8.GetString(headerBytes, 2, nameEnd > 0 ? nameEnd - 2 : 12);
|
||||
|
||||
int offset = 15;
|
||||
CompressedSize = BitConverter.ToUInt32(headerBytes, offset);
|
||||
offset += 4;
|
||||
uint rawDateTime = BitConverter.ToUInt32(headerBytes, offset);
|
||||
DateTime = ConvertToDateTime(rawDateTime);
|
||||
offset += 4;
|
||||
Crc16 = BitConverter.ToUInt16(headerBytes, offset);
|
||||
offset += 2;
|
||||
OriginalSize = BitConverter.ToUInt32(headerBytes, offset);
|
||||
return this;
|
||||
}
|
||||
|
||||
private CompressionType GetCompressionType(byte value)
|
||||
{
|
||||
return value switch
|
||||
{
|
||||
1 or 2 => CompressionType.None,
|
||||
3 => CompressionType.RLE90,
|
||||
4 => CompressionType.Squeezed,
|
||||
5 or 6 or 7 or 8 => CompressionType.Crunched,
|
||||
9 => CompressionType.Squashed,
|
||||
10 => CompressionType.Crushed,
|
||||
11 => CompressionType.Distilled,
|
||||
_ => CompressionType.Unknown,
|
||||
};
|
||||
}
|
||||
|
||||
public static DateTime ConvertToDateTime(long rawDateTime)
|
||||
{
|
||||
// Convert Unix timestamp to DateTime (UTC)
|
||||
return DateTimeOffset.FromUnixTimeSeconds(rawDateTime).UtcDateTime;
|
||||
}
|
||||
public static DateTime ConvertToDateTime(long rawDateTime)
|
||||
{
|
||||
// Convert Unix timestamp to DateTime (UTC)
|
||||
return DateTimeOffset.FromUnixTimeSeconds(rawDateTime).UtcDateTime;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,63 +13,55 @@ using SharpCompress.Compressors.RLE90;
|
||||
using SharpCompress.Compressors.Squeezed;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Arc
|
||||
namespace SharpCompress.Common.Arc;
|
||||
|
||||
public class ArcFilePart : FilePart
|
||||
{
|
||||
public class ArcFilePart : FilePart
|
||||
private readonly Stream? _stream;
|
||||
|
||||
internal ArcFilePart(ArcEntryHeader localArcHeader, Stream? seekableStream)
|
||||
: base(localArcHeader.ArchiveEncoding)
|
||||
{
|
||||
private readonly Stream? _stream;
|
||||
|
||||
internal ArcFilePart(ArcEntryHeader localArcHeader, Stream? seekableStream)
|
||||
: base(localArcHeader.ArchiveEncoding)
|
||||
{
|
||||
_stream = seekableStream;
|
||||
Header = localArcHeader;
|
||||
}
|
||||
|
||||
internal ArcEntryHeader Header { get; set; }
|
||||
|
||||
internal override string? FilePartName => Header.Name;
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (_stream != null)
|
||||
{
|
||||
Stream compressedStream;
|
||||
switch (Header.CompressionMethod)
|
||||
{
|
||||
case CompressionType.None:
|
||||
compressedStream = new ReadOnlySubStream(
|
||||
_stream,
|
||||
Header.DataStartPosition,
|
||||
Header.CompressedSize
|
||||
);
|
||||
break;
|
||||
case CompressionType.RLE90:
|
||||
compressedStream = new RunLength90Stream(
|
||||
_stream,
|
||||
(int)Header.CompressedSize
|
||||
);
|
||||
break;
|
||||
case CompressionType.Squeezed:
|
||||
compressedStream = new SqueezeStream(_stream, (int)Header.CompressedSize);
|
||||
break;
|
||||
case CompressionType.Crunched:
|
||||
compressedStream = new ArcLzwStream(
|
||||
_stream,
|
||||
(int)Header.CompressedSize,
|
||||
true
|
||||
);
|
||||
break;
|
||||
default:
|
||||
throw new NotSupportedException(
|
||||
"CompressionMethod: " + Header.CompressionMethod
|
||||
);
|
||||
}
|
||||
return compressedStream;
|
||||
}
|
||||
return _stream.NotNull();
|
||||
}
|
||||
|
||||
internal override Stream? GetRawStream() => _stream;
|
||||
_stream = seekableStream;
|
||||
Header = localArcHeader;
|
||||
}
|
||||
|
||||
internal ArcEntryHeader Header { get; set; }
|
||||
|
||||
internal override string? FilePartName => Header.Name;
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (_stream != null)
|
||||
{
|
||||
Stream compressedStream;
|
||||
switch (Header.CompressionMethod)
|
||||
{
|
||||
case CompressionType.None:
|
||||
compressedStream = new ReadOnlySubStream(
|
||||
_stream,
|
||||
Header.DataStartPosition,
|
||||
Header.CompressedSize
|
||||
);
|
||||
break;
|
||||
case CompressionType.RLE90:
|
||||
compressedStream = new RunLength90Stream(_stream, (int)Header.CompressedSize);
|
||||
break;
|
||||
case CompressionType.Squeezed:
|
||||
compressedStream = new SqueezeStream(_stream, (int)Header.CompressedSize);
|
||||
break;
|
||||
case CompressionType.Crunched:
|
||||
compressedStream = new ArcLzwStream(_stream, (int)Header.CompressedSize, true);
|
||||
break;
|
||||
default:
|
||||
throw new NotSupportedException(
|
||||
"CompressionMethod: " + Header.CompressionMethod
|
||||
);
|
||||
}
|
||||
return compressedStream;
|
||||
}
|
||||
return _stream.NotNull();
|
||||
}
|
||||
|
||||
internal override Stream? GetRawStream() => _stream;
|
||||
}
|
||||
|
||||
@@ -6,11 +6,10 @@ using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Common.Arc
|
||||
namespace SharpCompress.Common.Arc;
|
||||
|
||||
public class ArcVolume : Volume
|
||||
{
|
||||
public class ArcVolume : Volume
|
||||
{
|
||||
public ArcVolume(Stream stream, ReaderOptions readerOptions, int index = 0)
|
||||
: base(stream, readerOptions, index) { }
|
||||
}
|
||||
public ArcVolume(Stream stream, ReaderOptions readerOptions, int index = 0)
|
||||
: base(stream, readerOptions, index) { }
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.IO.Compression;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
@@ -53,15 +51,6 @@ public class EntryStream : Stream, IStreamStack
|
||||
_completed = true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Asynchronously skip the rest of the entry stream.
|
||||
/// </summary>
|
||||
public async Task SkipEntryAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
await this.SkipAsync(cancellationToken).ConfigureAwait(false);
|
||||
_completed = true;
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (!(_completed || _reader.Cancelled))
|
||||
@@ -94,40 +83,6 @@ public class EntryStream : Stream, IStreamStack
|
||||
_stream.Dispose();
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask DisposeAsync()
|
||||
{
|
||||
if (!(_completed || _reader.Cancelled))
|
||||
{
|
||||
await SkipEntryAsync().ConfigureAwait(false);
|
||||
}
|
||||
|
||||
//Need a safe standard approach to this - it's okay for compression to overreads. Handling needs to be standardised
|
||||
if (_stream is IStreamStack ss)
|
||||
{
|
||||
if (ss.BaseStream() is SharpCompress.Compressors.Deflate.DeflateStream deflateStream)
|
||||
{
|
||||
await deflateStream.FlushAsync().ConfigureAwait(false);
|
||||
}
|
||||
else if (ss.BaseStream() is SharpCompress.Compressors.LZMA.LzmaStream lzmaStream)
|
||||
{
|
||||
await lzmaStream.FlushAsync().ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
if (_isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_isDisposed = true;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(EntryStream));
|
||||
#endif
|
||||
await base.DisposeAsync().ConfigureAwait(false);
|
||||
await _stream.DisposeAsync().ConfigureAwait(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
@@ -136,8 +91,6 @@ public class EntryStream : Stream, IStreamStack
|
||||
|
||||
public override void Flush() { }
|
||||
|
||||
public override Task FlushAsync(CancellationToken cancellationToken) => Task.CompletedTask;
|
||||
|
||||
public override long Length => _stream.Length;
|
||||
|
||||
public override long Position
|
||||
@@ -156,38 +109,6 @@ public class EntryStream : Stream, IStreamStack
|
||||
return read;
|
||||
}
|
||||
|
||||
public override async Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
var read = await _stream
|
||||
.ReadAsync(buffer, offset, count, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (read <= 0)
|
||||
{
|
||||
_completed = true;
|
||||
}
|
||||
return read;
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var read = await _stream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
if (read <= 0)
|
||||
{
|
||||
_completed = true;
|
||||
}
|
||||
return read;
|
||||
}
|
||||
#endif
|
||||
|
||||
public override int ReadByte()
|
||||
{
|
||||
var value = _stream.ReadByte();
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
@@ -118,115 +116,4 @@ internal static class ExtractionMethods
|
||||
entry.PreserveExtractionOptions(destinationFileName, options);
|
||||
}
|
||||
}
|
||||
|
||||
public static async Task WriteEntryToDirectoryAsync(
|
||||
IEntry entry,
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options,
|
||||
Func<string, ExtractionOptions?, Task> writeAsync,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
string destinationFileName;
|
||||
var fullDestinationDirectoryPath = Path.GetFullPath(destinationDirectory);
|
||||
|
||||
//check for trailing slash.
|
||||
if (
|
||||
fullDestinationDirectoryPath[fullDestinationDirectoryPath.Length - 1]
|
||||
!= Path.DirectorySeparatorChar
|
||||
)
|
||||
{
|
||||
fullDestinationDirectoryPath += Path.DirectorySeparatorChar;
|
||||
}
|
||||
|
||||
if (!Directory.Exists(fullDestinationDirectoryPath))
|
||||
{
|
||||
throw new ExtractionException(
|
||||
$"Directory does not exist to extract to: {fullDestinationDirectoryPath}"
|
||||
);
|
||||
}
|
||||
|
||||
options ??= new ExtractionOptions() { Overwrite = true };
|
||||
|
||||
var file = Path.GetFileName(entry.Key.NotNull("Entry Key is null")).NotNull("File is null");
|
||||
file = Utility.ReplaceInvalidFileNameChars(file);
|
||||
if (options.ExtractFullPath)
|
||||
{
|
||||
var folder = Path.GetDirectoryName(entry.Key.NotNull("Entry Key is null"))
|
||||
.NotNull("Directory is null");
|
||||
var destdir = Path.GetFullPath(Path.Combine(fullDestinationDirectoryPath, folder));
|
||||
|
||||
if (!Directory.Exists(destdir))
|
||||
{
|
||||
if (!destdir.StartsWith(fullDestinationDirectoryPath, StringComparison.Ordinal))
|
||||
{
|
||||
throw new ExtractionException(
|
||||
"Entry is trying to create a directory outside of the destination directory."
|
||||
);
|
||||
}
|
||||
|
||||
Directory.CreateDirectory(destdir);
|
||||
}
|
||||
destinationFileName = Path.Combine(destdir, file);
|
||||
}
|
||||
else
|
||||
{
|
||||
destinationFileName = Path.Combine(fullDestinationDirectoryPath, file);
|
||||
}
|
||||
|
||||
if (!entry.IsDirectory)
|
||||
{
|
||||
destinationFileName = Path.GetFullPath(destinationFileName);
|
||||
|
||||
if (
|
||||
!destinationFileName.StartsWith(
|
||||
fullDestinationDirectoryPath,
|
||||
StringComparison.Ordinal
|
||||
)
|
||||
)
|
||||
{
|
||||
throw new ExtractionException(
|
||||
"Entry is trying to write a file outside of the destination directory."
|
||||
);
|
||||
}
|
||||
await writeAsync(destinationFileName, options).ConfigureAwait(false);
|
||||
}
|
||||
else if (options.ExtractFullPath && !Directory.Exists(destinationFileName))
|
||||
{
|
||||
Directory.CreateDirectory(destinationFileName);
|
||||
}
|
||||
}
|
||||
|
||||
public static async Task WriteEntryToFileAsync(
|
||||
IEntry entry,
|
||||
string destinationFileName,
|
||||
ExtractionOptions? options,
|
||||
Func<string, FileMode, Task> openAndWriteAsync,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (entry.LinkTarget != null)
|
||||
{
|
||||
if (options?.WriteSymbolicLink is null)
|
||||
{
|
||||
throw new ExtractionException(
|
||||
"Entry is a symbolic link but ExtractionOptions.WriteSymbolicLink delegate is null"
|
||||
);
|
||||
}
|
||||
options.WriteSymbolicLink(destinationFileName, entry.LinkTarget);
|
||||
}
|
||||
else
|
||||
{
|
||||
var fm = FileMode.Create;
|
||||
options ??= new ExtractionOptions() { Overwrite = true };
|
||||
|
||||
if (!options.Overwrite)
|
||||
{
|
||||
fm = FileMode.CreateNew;
|
||||
}
|
||||
|
||||
await openAndWriteAsync(destinationFileName, fm).ConfigureAwait(false);
|
||||
entry.PreserveExtractionOptions(destinationFileName, options);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,36 +66,6 @@ internal class TarReadOnlySubStream : SharpCompressStream, IStreamStack
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async System.Threading.Tasks.ValueTask DisposeAsync()
|
||||
{
|
||||
if (_isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
_isDisposed = true;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(TarReadOnlySubStream));
|
||||
#endif
|
||||
// Ensure we read all remaining blocks for this entry.
|
||||
await Stream.SkipAsync(BytesLeftToRead).ConfigureAwait(false);
|
||||
_amountRead += BytesLeftToRead;
|
||||
|
||||
// If the last block wasn't a full 512 bytes, skip the remaining padding bytes.
|
||||
var bytesInLastBlock = _amountRead % 512;
|
||||
|
||||
if (bytesInLastBlock != 0)
|
||||
{
|
||||
await Stream.SkipAsync(512 - bytesInLastBlock).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
// Call base Dispose instead of base DisposeAsync to avoid double disposal
|
||||
base.Dispose(true);
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
#endif
|
||||
|
||||
private long BytesLeftToRead { get; set; }
|
||||
|
||||
public override bool CanRead => true;
|
||||
@@ -106,10 +76,6 @@ internal class TarReadOnlySubStream : SharpCompressStream, IStreamStack
|
||||
|
||||
public override void Flush() { }
|
||||
|
||||
public override System.Threading.Tasks.Task FlushAsync(
|
||||
System.Threading.CancellationToken cancellationToken
|
||||
) => System.Threading.Tasks.Task.CompletedTask;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
@@ -148,48 +114,6 @@ internal class TarReadOnlySubStream : SharpCompressStream, IStreamStack
|
||||
return value;
|
||||
}
|
||||
|
||||
public override async System.Threading.Tasks.Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
System.Threading.CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
if (BytesLeftToRead < count)
|
||||
{
|
||||
count = (int)BytesLeftToRead;
|
||||
}
|
||||
var read = await Stream
|
||||
.ReadAsync(buffer, offset, count, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (read > 0)
|
||||
{
|
||||
BytesLeftToRead -= read;
|
||||
_amountRead += read;
|
||||
}
|
||||
return read;
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async System.Threading.Tasks.ValueTask<int> ReadAsync(
|
||||
System.Memory<byte> buffer,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (BytesLeftToRead < buffer.Length)
|
||||
{
|
||||
buffer = buffer.Slice(0, (int)BytesLeftToRead);
|
||||
}
|
||||
var read = await Stream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
if (read > 0)
|
||||
{
|
||||
BytesLeftToRead -= read;
|
||||
_amountRead += read;
|
||||
}
|
||||
return read;
|
||||
}
|
||||
#endif
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
@@ -91,15 +91,8 @@ internal abstract class ZipFileEntry : ZipHeader
|
||||
|
||||
protected void LoadExtra(byte[] extra)
|
||||
{
|
||||
for (var i = 0; i < extra.Length; )
|
||||
for (var i = 0; i < extra.Length - 4; )
|
||||
{
|
||||
// Ensure we have at least a header (2-byte ID + 2-byte length)
|
||||
if (i + 4 > extra.Length)
|
||||
{
|
||||
// Incomplete header — stop parsing extras
|
||||
break;
|
||||
}
|
||||
|
||||
var type = (ExtraDataType)BinaryPrimitives.ReadUInt16LittleEndian(extra.AsSpan(i));
|
||||
if (!Enum.IsDefined(typeof(ExtraDataType), type))
|
||||
{
|
||||
@@ -113,17 +106,7 @@ internal abstract class ZipFileEntry : ZipHeader
|
||||
if (length > extra.Length)
|
||||
{
|
||||
// bad extras block
|
||||
break; // allow processing optional other blocks
|
||||
}
|
||||
// Some ZIP files contain vendor-specific or malformed extra fields where the declared
|
||||
// data length extends beyond the remaining buffer. This adjustment ensures that
|
||||
// we only read data within bounds (i + 4 + length <= extra.Length)
|
||||
// The example here is: 41 43 18 00 41 52 43 30 46 EB FF FF 51 29 03 C6 03 00 00 00 00 00 00 00 00
|
||||
// No existing zip utility uses 0x4341 ('AC')
|
||||
if (i + 4 + length > extra.Length)
|
||||
{
|
||||
// incomplete or corrupt field
|
||||
break; // allow processing optional other blocks
|
||||
return;
|
||||
}
|
||||
|
||||
var data = new byte[length];
|
||||
|
||||
@@ -13,8 +13,8 @@ using SharpCompress.Compressors.PPMd;
|
||||
using SharpCompress.Compressors.Reduce;
|
||||
using SharpCompress.Compressors.Shrink;
|
||||
using SharpCompress.Compressors.Xz;
|
||||
using SharpCompress.Compressors.ZStandard;
|
||||
using SharpCompress.IO;
|
||||
using ZstdSharp;
|
||||
|
||||
namespace SharpCompress.Common.Zip;
|
||||
|
||||
|
||||
@@ -28,7 +28,6 @@ using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate;
|
||||
@@ -290,34 +289,6 @@ public class DeflateStream : Stream, IStreamStack
|
||||
_baseStream.Flush();
|
||||
}
|
||||
|
||||
public override async Task FlushAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("DeflateStream");
|
||||
}
|
||||
await _baseStream.FlushAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask DisposeAsync()
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_disposed = true;
|
||||
if (_baseStream != null)
|
||||
{
|
||||
await _baseStream.DisposeAsync().ConfigureAwait(false);
|
||||
}
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(DeflateStream));
|
||||
#endif
|
||||
await base.DisposeAsync().ConfigureAwait(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
/// <summary>
|
||||
/// Read data from the stream.
|
||||
/// </summary>
|
||||
@@ -354,36 +325,6 @@ public class DeflateStream : Stream, IStreamStack
|
||||
return _baseStream.Read(buffer, offset, count);
|
||||
}
|
||||
|
||||
public override async Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("DeflateStream");
|
||||
}
|
||||
return await _baseStream
|
||||
.ReadAsync(buffer, offset, count, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("DeflateStream");
|
||||
}
|
||||
return await _baseStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
public override int ReadByte()
|
||||
{
|
||||
if (_disposed)
|
||||
@@ -445,36 +386,6 @@ public class DeflateStream : Stream, IStreamStack
|
||||
_baseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
public override async Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("DeflateStream");
|
||||
}
|
||||
await _baseStream
|
||||
.WriteAsync(buffer, offset, count, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("DeflateStream");
|
||||
}
|
||||
await _baseStream.WriteAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
public override void WriteByte(byte value)
|
||||
{
|
||||
if (_disposed)
|
||||
|
||||
@@ -30,8 +30,6 @@ using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate;
|
||||
@@ -259,15 +257,6 @@ public class GZipStream : Stream, IStreamStack
|
||||
BaseStream.Flush();
|
||||
}
|
||||
|
||||
public override async Task FlushAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
await BaseStream.FlushAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read and decompress data from the source stream.
|
||||
/// </summary>
|
||||
@@ -320,54 +309,6 @@ public class GZipStream : Stream, IStreamStack
|
||||
return n;
|
||||
}
|
||||
|
||||
public override async Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
var n = await BaseStream
|
||||
.ReadAsync(buffer, offset, count, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
if (!_firstReadDone)
|
||||
{
|
||||
_firstReadDone = true;
|
||||
FileName = BaseStream._GzipFileName;
|
||||
Comment = BaseStream._GzipComment;
|
||||
LastModified = BaseStream._GzipMtime;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
var n = await BaseStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
if (!_firstReadDone)
|
||||
{
|
||||
_firstReadDone = true;
|
||||
FileName = BaseStream._GzipFileName;
|
||||
Comment = BaseStream._GzipComment;
|
||||
LastModified = BaseStream._GzipMtime;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
#endif
|
||||
|
||||
/// <summary>
|
||||
/// Calling this method always throws a <see cref="NotImplementedException"/>.
|
||||
/// </summary>
|
||||
@@ -427,77 +368,6 @@ public class GZipStream : Stream, IStreamStack
|
||||
BaseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
public override async Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Undefined)
|
||||
{
|
||||
if (BaseStream._wantCompress)
|
||||
{
|
||||
// first write in compression, therefore, emit the GZIP header
|
||||
_headerByteCount = EmitHeader();
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
await BaseStream.WriteAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("GZipStream");
|
||||
}
|
||||
if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Undefined)
|
||||
{
|
||||
if (BaseStream._wantCompress)
|
||||
{
|
||||
// first write in compression, therefore, emit the GZIP header
|
||||
_headerByteCount = EmitHeader();
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
await BaseStream.WriteAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public override async ValueTask DisposeAsync()
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_disposed = true;
|
||||
if (BaseStream != null)
|
||||
{
|
||||
await BaseStream.DisposeAsync().ConfigureAwait(false);
|
||||
}
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(GZipStream));
|
||||
#endif
|
||||
await base.DisposeAsync().ConfigureAwait(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endregion Stream methods
|
||||
|
||||
public string? Comment
|
||||
|
||||
@@ -31,8 +31,6 @@ using System.Buffers.Binary;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
|
||||
@@ -199,69 +197,6 @@ internal class ZlibBaseStream : Stream, IStreamStack
|
||||
} while (!done);
|
||||
}
|
||||
|
||||
public override async Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
// workitem 7159
|
||||
// calculate the CRC on the unccompressed data (before writing)
|
||||
if (crc != null)
|
||||
{
|
||||
crc.SlurpBlock(buffer, offset, count);
|
||||
}
|
||||
|
||||
if (_streamMode == StreamMode.Undefined)
|
||||
{
|
||||
_streamMode = StreamMode.Writer;
|
||||
}
|
||||
else if (_streamMode != StreamMode.Writer)
|
||||
{
|
||||
throw new ZlibException("Cannot Write after Reading.");
|
||||
}
|
||||
|
||||
if (count == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// first reference of z property will initialize the private var _z
|
||||
z.InputBuffer = buffer;
|
||||
_z.NextIn = offset;
|
||||
_z.AvailableBytesIn = count;
|
||||
var done = false;
|
||||
do
|
||||
{
|
||||
_z.OutputBuffer = workingBuffer;
|
||||
_z.NextOut = 0;
|
||||
_z.AvailableBytesOut = _workingBuffer.Length;
|
||||
var rc = (_wantCompress) ? _z.Deflate(_flushMode) : _z.Inflate(_flushMode);
|
||||
if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
|
||||
{
|
||||
throw new ZlibException((_wantCompress ? "de" : "in") + "flating: " + _z.Message);
|
||||
}
|
||||
|
||||
await _stream
|
||||
.WriteAsync(
|
||||
_workingBuffer,
|
||||
0,
|
||||
_workingBuffer.Length - _z.AvailableBytesOut,
|
||||
cancellationToken
|
||||
)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0;
|
||||
|
||||
// If GZIP and de-compress, we're done when 8 bytes remain.
|
||||
if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress)
|
||||
{
|
||||
done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0);
|
||||
}
|
||||
} while (!done);
|
||||
}
|
||||
|
||||
private void finish()
|
||||
{
|
||||
if (_z is null)
|
||||
@@ -400,111 +335,6 @@ internal class ZlibBaseStream : Stream, IStreamStack
|
||||
}
|
||||
}
|
||||
|
||||
private async Task finishAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
if (_z is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (_streamMode == StreamMode.Writer)
|
||||
{
|
||||
var done = false;
|
||||
do
|
||||
{
|
||||
_z.OutputBuffer = workingBuffer;
|
||||
_z.NextOut = 0;
|
||||
_z.AvailableBytesOut = _workingBuffer.Length;
|
||||
var rc =
|
||||
(_wantCompress) ? _z.Deflate(FlushType.Finish) : _z.Inflate(FlushType.Finish);
|
||||
|
||||
if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
|
||||
{
|
||||
var verb = (_wantCompress ? "de" : "in") + "flating";
|
||||
if (_z.Message is null)
|
||||
{
|
||||
throw new ZlibException(String.Format("{0}: (rc = {1})", verb, rc));
|
||||
}
|
||||
throw new ZlibException(verb + ": " + _z.Message);
|
||||
}
|
||||
|
||||
if (_workingBuffer.Length - _z.AvailableBytesOut > 0)
|
||||
{
|
||||
await _stream
|
||||
.WriteAsync(
|
||||
_workingBuffer,
|
||||
0,
|
||||
_workingBuffer.Length - _z.AvailableBytesOut,
|
||||
cancellationToken
|
||||
)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0;
|
||||
|
||||
// If GZIP and de-compress, we're done when 8 bytes remain.
|
||||
if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress)
|
||||
{
|
||||
done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0);
|
||||
}
|
||||
} while (!done);
|
||||
|
||||
await FlushAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
// workitem 7159
|
||||
if (_flavor == ZlibStreamFlavor.GZIP)
|
||||
{
|
||||
if (_wantCompress)
|
||||
{
|
||||
// Emit the GZIP trailer: CRC32 and size mod 2^32
|
||||
byte[] intBuf = new byte[4];
|
||||
BinaryPrimitives.WriteInt32LittleEndian(intBuf, crc.Crc32Result);
|
||||
await _stream.WriteAsync(intBuf, 0, 4, cancellationToken).ConfigureAwait(false);
|
||||
var c2 = (int)(crc.TotalBytesRead & 0x00000000FFFFFFFF);
|
||||
BinaryPrimitives.WriteInt32LittleEndian(intBuf, c2);
|
||||
await _stream.WriteAsync(intBuf, 0, 4, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new ZlibException("Writing with decompression is not supported.");
|
||||
}
|
||||
}
|
||||
}
|
||||
// workitem 7159
|
||||
else if (_streamMode == StreamMode.Reader)
|
||||
{
|
||||
if (_flavor == ZlibStreamFlavor.GZIP)
|
||||
{
|
||||
if (!_wantCompress)
|
||||
{
|
||||
// workitem 8501: handle edge case (decompress empty stream)
|
||||
if (_z.TotalBytesOut == 0L)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Read and potentially verify the GZIP trailer: CRC32 and size mod 2^32
|
||||
byte[] trailer = new byte[8];
|
||||
|
||||
// workitem 8679
|
||||
if (_z.AvailableBytesIn != 8)
|
||||
{
|
||||
// Make sure we have read to the end of the stream
|
||||
_z.InputBuffer.AsSpan(_z.NextIn, _z.AvailableBytesIn).CopyTo(trailer);
|
||||
var bytesNeeded = 8 - _z.AvailableBytesIn;
|
||||
var bytesRead = await _stream
|
||||
.ReadAsync(trailer, _z.AvailableBytesIn, bytesNeeded, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new ZlibException("Reading with compression is not supported.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void end()
|
||||
{
|
||||
if (z is null)
|
||||
@@ -552,38 +382,6 @@ internal class ZlibBaseStream : Stream, IStreamStack
|
||||
}
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask DisposeAsync()
|
||||
{
|
||||
if (isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
isDisposed = true;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(ZlibBaseStream));
|
||||
#endif
|
||||
await base.DisposeAsync().ConfigureAwait(false);
|
||||
if (_stream is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
try
|
||||
{
|
||||
await finishAsync().ConfigureAwait(false);
|
||||
}
|
||||
finally
|
||||
{
|
||||
end();
|
||||
if (_stream != null)
|
||||
{
|
||||
await _stream.DisposeAsync().ConfigureAwait(false);
|
||||
_stream = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
_stream.Flush();
|
||||
@@ -592,14 +390,6 @@ internal class ZlibBaseStream : Stream, IStreamStack
|
||||
z.AvailableBytesIn = 0;
|
||||
}
|
||||
|
||||
public override async Task FlushAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
await _stream.FlushAsync(cancellationToken).ConfigureAwait(false);
|
||||
//rewind the buffer
|
||||
((IStreamStack)this).Rewind(z.AvailableBytesIn); //unused
|
||||
z.AvailableBytesIn = 0;
|
||||
}
|
||||
|
||||
public override Int64 Seek(Int64 offset, SeekOrigin origin) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
@@ -646,31 +436,6 @@ internal class ZlibBaseStream : Stream, IStreamStack
|
||||
return _encoding.GetString(buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
private async Task<string> ReadZeroTerminatedStringAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var list = new List<byte>();
|
||||
var done = false;
|
||||
do
|
||||
{
|
||||
// workitem 7740
|
||||
var n = await _stream.ReadAsync(_buf1, 0, 1, cancellationToken).ConfigureAwait(false);
|
||||
if (n != 1)
|
||||
{
|
||||
throw new ZlibException("Unexpected EOF reading GZIP header.");
|
||||
}
|
||||
if (_buf1[0] == 0)
|
||||
{
|
||||
done = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
list.Add(_buf1[0]);
|
||||
}
|
||||
} while (!done);
|
||||
var buffer = list.ToArray();
|
||||
return _encoding.GetString(buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
private int _ReadAndValidateGzipHeader()
|
||||
{
|
||||
var totalBytesRead = 0;
|
||||
@@ -729,68 +494,6 @@ internal class ZlibBaseStream : Stream, IStreamStack
|
||||
return totalBytesRead;
|
||||
}
|
||||
|
||||
private async Task<int> _ReadAndValidateGzipHeaderAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
var totalBytesRead = 0;
|
||||
|
||||
// read the header on the first read
|
||||
byte[] header = new byte[10];
|
||||
var n = await _stream.ReadAsync(header, 0, 10, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
// workitem 8501: handle edge case (decompress empty stream)
|
||||
if (n == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (n != 10)
|
||||
{
|
||||
throw new ZlibException("Not a valid GZIP stream.");
|
||||
}
|
||||
|
||||
if (header[0] != 0x1F || header[1] != 0x8B || header[2] != 8)
|
||||
{
|
||||
throw new ZlibException("Bad GZIP header.");
|
||||
}
|
||||
|
||||
var timet = BinaryPrimitives.ReadInt32LittleEndian(header.AsSpan(4));
|
||||
_GzipMtime = TarHeader.EPOCH.AddSeconds(timet);
|
||||
totalBytesRead += n;
|
||||
if ((header[3] & 0x04) == 0x04)
|
||||
{
|
||||
// read and discard extra field
|
||||
n = await _stream.ReadAsync(header, 0, 2, cancellationToken).ConfigureAwait(false); // 2-byte length field
|
||||
totalBytesRead += n;
|
||||
|
||||
var extraLength = (short)(header[0] + header[1] * 256);
|
||||
var extra = new byte[extraLength];
|
||||
n = await _stream
|
||||
.ReadAsync(extra, 0, extra.Length, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (n != extraLength)
|
||||
{
|
||||
throw new ZlibException("Unexpected end-of-file reading GZIP header.");
|
||||
}
|
||||
totalBytesRead += n;
|
||||
}
|
||||
if ((header[3] & 0x08) == 0x08)
|
||||
{
|
||||
_GzipFileName = await ReadZeroTerminatedStringAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
if ((header[3] & 0x10) == 0x010)
|
||||
{
|
||||
_GzipComment = await ReadZeroTerminatedStringAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
if ((header[3] & 0x02) == 0x02)
|
||||
{
|
||||
await _stream.ReadAsync(_buf1, 0, 1, cancellationToken).ConfigureAwait(false); // CRC16, ignore
|
||||
}
|
||||
|
||||
return totalBytesRead;
|
||||
}
|
||||
|
||||
public override Int32 Read(Byte[] buffer, Int32 offset, Int32 count)
|
||||
{
|
||||
// According to MS documentation, any implementation of the IO.Stream.Read function must:
|
||||
@@ -975,220 +678,6 @@ internal class ZlibBaseStream : Stream, IStreamStack
|
||||
return rc;
|
||||
}
|
||||
|
||||
public override async Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
// According to MS documentation, any implementation of the IO.Stream.Read function must:
|
||||
// (a) throw an exception if offset & count reference an invalid part of the buffer,
|
||||
// or if count < 0, or if buffer is null
|
||||
// (b) return 0 only upon EOF, or if count = 0
|
||||
// (c) if not EOF, then return at least 1 byte, up to <count> bytes
|
||||
|
||||
if (_streamMode == StreamMode.Undefined)
|
||||
{
|
||||
if (!_stream.CanRead)
|
||||
{
|
||||
throw new ZlibException("The stream is not readable.");
|
||||
}
|
||||
|
||||
// for the first read, set up some controls.
|
||||
_streamMode = StreamMode.Reader;
|
||||
|
||||
// (The first reference to _z goes through the private accessor which
|
||||
// may initialize it.)
|
||||
z.AvailableBytesIn = 0;
|
||||
if (_flavor == ZlibStreamFlavor.GZIP)
|
||||
{
|
||||
_gzipHeaderByteCount = await _ReadAndValidateGzipHeaderAsync(cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
// workitem 8501: handle edge case (decompress empty stream)
|
||||
if (_gzipHeaderByteCount == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (_streamMode != StreamMode.Reader)
|
||||
{
|
||||
throw new ZlibException("Cannot Read after Writing.");
|
||||
}
|
||||
|
||||
var rc = 0;
|
||||
|
||||
// set up the output of the deflate/inflate codec:
|
||||
_z.OutputBuffer = buffer;
|
||||
_z.NextOut = offset;
|
||||
_z.AvailableBytesOut = count;
|
||||
|
||||
if (count == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if (nomoreinput && _wantCompress)
|
||||
{
|
||||
// no more input data available; therefore we flush to
|
||||
// try to complete the read
|
||||
rc = _z.Deflate(FlushType.Finish);
|
||||
|
||||
if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
|
||||
{
|
||||
throw new ZlibException(
|
||||
String.Format("Deflating: rc={0} msg={1}", rc, _z.Message)
|
||||
);
|
||||
}
|
||||
|
||||
rc = (count - _z.AvailableBytesOut);
|
||||
|
||||
// calculate CRC after reading
|
||||
if (crc != null)
|
||||
{
|
||||
crc.SlurpBlock(buffer, offset, rc);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
if (buffer is null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
if (count < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
if (offset < buffer.GetLowerBound(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
if ((offset + count) > buffer.GetLength(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
|
||||
// This is necessary in case _workingBuffer has been resized. (new byte[])
|
||||
// (The first reference to _workingBuffer goes through the private accessor which
|
||||
// may initialize it.)
|
||||
_z.InputBuffer = workingBuffer;
|
||||
|
||||
do
|
||||
{
|
||||
// need data in _workingBuffer in order to deflate/inflate. Here, we check if we have any.
|
||||
if ((_z.AvailableBytesIn == 0) && (!nomoreinput))
|
||||
{
|
||||
// No data available, so try to Read data from the captive stream.
|
||||
_z.NextIn = 0;
|
||||
_z.AvailableBytesIn = await _stream
|
||||
.ReadAsync(_workingBuffer, 0, _workingBuffer.Length, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (_z.AvailableBytesIn == 0)
|
||||
{
|
||||
nomoreinput = true;
|
||||
}
|
||||
}
|
||||
|
||||
// we have data in InputBuffer; now compress or decompress as appropriate
|
||||
rc = (_wantCompress) ? _z.Deflate(_flushMode) : _z.Inflate(_flushMode);
|
||||
|
||||
if (nomoreinput && (rc == ZlibConstants.Z_BUF_ERROR))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
|
||||
{
|
||||
throw new ZlibException(
|
||||
String.Format(
|
||||
"{0}flating: rc={1} msg={2}",
|
||||
(_wantCompress ? "de" : "in"),
|
||||
rc,
|
||||
_z.Message
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
(nomoreinput || rc == ZlibConstants.Z_STREAM_END) && (_z.AvailableBytesOut == count)
|
||||
)
|
||||
{
|
||||
break; // nothing more to read
|
||||
}
|
||||
} //while (_z.AvailableBytesOut == count && rc == ZlibConstants.Z_OK);
|
||||
while (_z.AvailableBytesOut > 0 && !nomoreinput && rc == ZlibConstants.Z_OK);
|
||||
|
||||
// workitem 8557
|
||||
// is there more room in output?
|
||||
if (_z.AvailableBytesOut > 0)
|
||||
{
|
||||
if (rc == ZlibConstants.Z_OK && _z.AvailableBytesIn == 0)
|
||||
{
|
||||
// deferred
|
||||
}
|
||||
|
||||
// are we completely done reading?
|
||||
if (nomoreinput)
|
||||
{
|
||||
// and in compression?
|
||||
if (_wantCompress)
|
||||
{
|
||||
// no more input data available; therefore we flush to
|
||||
// try to complete the read
|
||||
rc = _z.Deflate(FlushType.Finish);
|
||||
|
||||
if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
|
||||
{
|
||||
throw new ZlibException(
|
||||
String.Format("Deflating: rc={0} msg={1}", rc, _z.Message)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rc = (count - _z.AvailableBytesOut);
|
||||
|
||||
// calculate CRC after reading
|
||||
if (crc != null)
|
||||
{
|
||||
crc.SlurpBlock(buffer, offset, rc);
|
||||
}
|
||||
|
||||
if (rc == ZlibConstants.Z_STREAM_END && z.AvailableBytesIn != 0 && !_wantCompress)
|
||||
{
|
||||
//rewind the buffer
|
||||
((IStreamStack)this).Rewind(z.AvailableBytesIn); //unused
|
||||
z.AvailableBytesIn = 0;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
// Use ArrayPool to rent a buffer and delegate to byte[] ReadAsync
|
||||
byte[] array = System.Buffers.ArrayPool<byte>.Shared.Rent(buffer.Length);
|
||||
try
|
||||
{
|
||||
int read = await ReadAsync(array, 0, buffer.Length, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
array.AsSpan(0, read).CopyTo(buffer.Span);
|
||||
return read;
|
||||
}
|
||||
finally
|
||||
{
|
||||
System.Buffers.ArrayPool<byte>.Shared.Return(array);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
public override Boolean CanRead => _stream.CanRead;
|
||||
|
||||
public override Boolean CanSeek => _stream.CanSeek;
|
||||
|
||||
@@ -28,8 +28,6 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate;
|
||||
@@ -268,34 +266,6 @@ public class ZlibStream : Stream, IStreamStack
|
||||
_baseStream.Flush();
|
||||
}
|
||||
|
||||
public override async Task FlushAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("ZlibStream");
|
||||
}
|
||||
await _baseStream.FlushAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask DisposeAsync()
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_disposed = true;
|
||||
if (_baseStream != null)
|
||||
{
|
||||
await _baseStream.DisposeAsync().ConfigureAwait(false);
|
||||
}
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(ZlibStream));
|
||||
#endif
|
||||
await base.DisposeAsync().ConfigureAwait(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
/// <summary>
|
||||
/// Read data from the stream.
|
||||
/// </summary>
|
||||
@@ -331,36 +301,6 @@ public class ZlibStream : Stream, IStreamStack
|
||||
return _baseStream.Read(buffer, offset, count);
|
||||
}
|
||||
|
||||
public override async Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("ZlibStream");
|
||||
}
|
||||
return await _baseStream
|
||||
.ReadAsync(buffer, offset, count, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("ZlibStream");
|
||||
}
|
||||
return await _baseStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
public override int ReadByte()
|
||||
{
|
||||
if (_disposed)
|
||||
@@ -415,36 +355,6 @@ public class ZlibStream : Stream, IStreamStack
|
||||
_baseStream.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
public override async Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("ZlibStream");
|
||||
}
|
||||
await _baseStream
|
||||
.WriteAsync(buffer, offset, count, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
throw new ObjectDisposedException("ZlibStream");
|
||||
}
|
||||
await _baseStream.WriteAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
public override void WriteByte(byte value)
|
||||
{
|
||||
if (_disposed)
|
||||
|
||||
@@ -1,36 +1,35 @@
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Filters
|
||||
namespace SharpCompress.Compressors.Filters;
|
||||
|
||||
internal class DeltaFilter : Filter
|
||||
{
|
||||
internal class DeltaFilter : Filter
|
||||
private const int DISTANCE_MIN = 1;
|
||||
private const int DISTANCE_MAX = 256;
|
||||
private const int DISTANCE_MASK = DISTANCE_MAX - 1;
|
||||
|
||||
private int _distance;
|
||||
private byte[] _history;
|
||||
private int _position;
|
||||
|
||||
public DeltaFilter(bool isEncoder, Stream baseStream, byte[] info)
|
||||
: base(isEncoder, baseStream, 1)
|
||||
{
|
||||
private const int DISTANCE_MIN = 1;
|
||||
private const int DISTANCE_MAX = 256;
|
||||
private const int DISTANCE_MASK = DISTANCE_MAX - 1;
|
||||
_distance = info[0];
|
||||
_history = new byte[DISTANCE_MAX];
|
||||
_position = 0;
|
||||
}
|
||||
|
||||
private int _distance;
|
||||
private byte[] _history;
|
||||
private int _position;
|
||||
protected override int Transform(byte[] buffer, int offset, int count)
|
||||
{
|
||||
var end = offset + count;
|
||||
|
||||
public DeltaFilter(bool isEncoder, Stream baseStream, byte[] info)
|
||||
: base(isEncoder, baseStream, 1)
|
||||
for (var i = offset; i < end; i++)
|
||||
{
|
||||
_distance = info[0];
|
||||
_history = new byte[DISTANCE_MAX];
|
||||
_position = 0;
|
||||
buffer[i] += _history[(_distance + _position--) & DISTANCE_MASK];
|
||||
_history[_position & DISTANCE_MASK] = buffer[i];
|
||||
}
|
||||
|
||||
protected override int Transform(byte[] buffer, int offset, int count)
|
||||
{
|
||||
var end = offset + count;
|
||||
|
||||
for (var i = offset; i < end; i++)
|
||||
{
|
||||
buffer[i] += _history[(_distance + _position--) & DISTANCE_MASK];
|
||||
_history[_position & DISTANCE_MASK] = buffer[i];
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
#nullable disable
|
||||
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA.LZ;
|
||||
|
||||
internal class OutWindow : IDisposable
|
||||
internal class OutWindow
|
||||
{
|
||||
private byte[] _buffer;
|
||||
private int _windowSize;
|
||||
@@ -16,22 +15,19 @@ internal class OutWindow : IDisposable
|
||||
private int _pendingDist;
|
||||
private Stream _stream;
|
||||
|
||||
private long _total;
|
||||
private long _limit;
|
||||
|
||||
public long Total => _total;
|
||||
public long _total;
|
||||
public long _limit;
|
||||
|
||||
public void Create(int windowSize)
|
||||
{
|
||||
if (_windowSize != windowSize)
|
||||
{
|
||||
if (_buffer is not null)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(_buffer);
|
||||
}
|
||||
_buffer = ArrayPool<byte>.Shared.Rent(windowSize);
|
||||
_buffer = new byte[windowSize];
|
||||
}
|
||||
else
|
||||
{
|
||||
_buffer[windowSize - 1] = 0;
|
||||
}
|
||||
_buffer[windowSize - 1] = 0;
|
||||
_windowSize = windowSize;
|
||||
_pos = 0;
|
||||
_streamPos = 0;
|
||||
@@ -40,22 +36,7 @@ internal class OutWindow : IDisposable
|
||||
_limit = 0;
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
ReleaseStream();
|
||||
if (_buffer is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
ArrayPool<byte>.Shared.Return(_buffer);
|
||||
_buffer = null;
|
||||
}
|
||||
|
||||
public void Reset()
|
||||
{
|
||||
ReleaseStream();
|
||||
Create(_windowSize);
|
||||
}
|
||||
public void Reset() => Create(_windowSize);
|
||||
|
||||
public void Init(Stream stream)
|
||||
{
|
||||
@@ -85,7 +66,7 @@ internal class OutWindow : IDisposable
|
||||
_stream = null;
|
||||
}
|
||||
|
||||
private void Flush()
|
||||
public void Flush()
|
||||
{
|
||||
if (_stream is null)
|
||||
{
|
||||
|
||||
@@ -294,7 +294,7 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
}
|
||||
else
|
||||
{
|
||||
_outWindow.SetLimit(long.MaxValue - _outWindow.Total);
|
||||
_outWindow.SetLimit(long.MaxValue - _outWindow._total);
|
||||
}
|
||||
|
||||
var rangeDecoder = new RangeCoder.Decoder();
|
||||
@@ -305,7 +305,6 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
_outWindow.ReleaseStream();
|
||||
rangeDecoder.ReleaseStream();
|
||||
|
||||
_outWindow.Dispose();
|
||||
_outWindow = null;
|
||||
}
|
||||
|
||||
@@ -317,7 +316,7 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
|
||||
while (outWindow.HasSpace)
|
||||
{
|
||||
var posState = (uint)outWindow.Total & _posStateMask;
|
||||
var posState = (uint)outWindow._total & _posStateMask;
|
||||
if (
|
||||
_isMatchDecoders[(_state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState]
|
||||
.Decode(rangeDecoder) == 0
|
||||
@@ -329,14 +328,18 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
{
|
||||
b = _literalDecoder.DecodeWithMatchByte(
|
||||
rangeDecoder,
|
||||
(uint)outWindow.Total,
|
||||
(uint)outWindow._total,
|
||||
prevByte,
|
||||
outWindow.GetByte((int)_rep0)
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
b = _literalDecoder.DecodeNormal(rangeDecoder, (uint)outWindow.Total, prevByte);
|
||||
b = _literalDecoder.DecodeNormal(
|
||||
rangeDecoder,
|
||||
(uint)outWindow._total,
|
||||
prevByte
|
||||
);
|
||||
}
|
||||
outWindow.PutByte(b);
|
||||
_state.UpdateChar();
|
||||
@@ -421,7 +424,7 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
_rep0 = posSlot;
|
||||
}
|
||||
}
|
||||
if (_rep0 >= outWindow.Total || _rep0 >= dictionarySizeCheck)
|
||||
if (_rep0 >= outWindow._total || _rep0 >= dictionarySizeCheck)
|
||||
{
|
||||
if (_rep0 == 0xFFFFFFFF)
|
||||
{
|
||||
|
||||
@@ -178,7 +178,6 @@ public class LzmaStream : Stream, IStreamStack
|
||||
_position = _encoder.Code(null, true);
|
||||
}
|
||||
_inputStream?.Dispose();
|
||||
_outWindow.Dispose();
|
||||
}
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Compressors.Filters;
|
||||
using SharpCompress.Compressors.LZMA.Utilites;
|
||||
using SharpCompress.Compressors.PPMd;
|
||||
using ZstdSharp;
|
||||
using SharpCompress.Compressors.ZStandard;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA;
|
||||
|
||||
|
||||
@@ -1,65 +1,64 @@
|
||||
namespace SharpCompress.Compressors.Lzw
|
||||
namespace SharpCompress.Compressors.Lzw;
|
||||
|
||||
/// <summary>
|
||||
/// This class contains constants used for LZW
|
||||
/// </summary>
|
||||
[System.Diagnostics.CodeAnalysis.SuppressMessage(
|
||||
"Naming",
|
||||
"CA1707:Identifiers should not contain underscores",
|
||||
Justification = "kept for backwards compatibility"
|
||||
)]
|
||||
public sealed class LzwConstants
|
||||
{
|
||||
/// <summary>
|
||||
/// This class contains constants used for LZW
|
||||
/// Magic number found at start of LZW header: 0x1f 0x9d
|
||||
/// </summary>
|
||||
[System.Diagnostics.CodeAnalysis.SuppressMessage(
|
||||
"Naming",
|
||||
"CA1707:Identifiers should not contain underscores",
|
||||
Justification = "kept for backwards compatibility"
|
||||
)]
|
||||
public sealed class LzwConstants
|
||||
{
|
||||
/// <summary>
|
||||
/// Magic number found at start of LZW header: 0x1f 0x9d
|
||||
/// </summary>
|
||||
public const int MAGIC = 0x1f9d;
|
||||
public const int MAGIC = 0x1f9d;
|
||||
|
||||
/// <summary>
|
||||
/// Maximum number of bits per code
|
||||
/// </summary>
|
||||
public const int MAX_BITS = 16;
|
||||
/// <summary>
|
||||
/// Maximum number of bits per code
|
||||
/// </summary>
|
||||
public const int MAX_BITS = 16;
|
||||
|
||||
/* 3rd header byte:
|
||||
* bit 0..4 Number of compression bits
|
||||
* bit 5 Extended header
|
||||
* bit 6 Free
|
||||
* bit 7 Block mode
|
||||
*/
|
||||
/* 3rd header byte:
|
||||
* bit 0..4 Number of compression bits
|
||||
* bit 5 Extended header
|
||||
* bit 6 Free
|
||||
* bit 7 Block mode
|
||||
*/
|
||||
|
||||
/// <summary>
|
||||
/// Mask for 'number of compression bits'
|
||||
/// </summary>
|
||||
public const int BIT_MASK = 0x1f;
|
||||
/// <summary>
|
||||
/// Mask for 'number of compression bits'
|
||||
/// </summary>
|
||||
public const int BIT_MASK = 0x1f;
|
||||
|
||||
/// <summary>
|
||||
/// Indicates the presence of a fourth header byte
|
||||
/// </summary>
|
||||
public const int EXTENDED_MASK = 0x20;
|
||||
/// <summary>
|
||||
/// Indicates the presence of a fourth header byte
|
||||
/// </summary>
|
||||
public const int EXTENDED_MASK = 0x20;
|
||||
|
||||
//public const int FREE_MASK = 0x40;
|
||||
//public const int FREE_MASK = 0x40;
|
||||
|
||||
/// <summary>
|
||||
/// Reserved bits
|
||||
/// </summary>
|
||||
public const int RESERVED_MASK = 0x60;
|
||||
/// <summary>
|
||||
/// Reserved bits
|
||||
/// </summary>
|
||||
public const int RESERVED_MASK = 0x60;
|
||||
|
||||
/// <summary>
|
||||
/// Block compression: if table is full and compression rate is dropping,
|
||||
/// clear the dictionary.
|
||||
/// </summary>
|
||||
public const int BLOCK_MODE_MASK = 0x80;
|
||||
/// <summary>
|
||||
/// Block compression: if table is full and compression rate is dropping,
|
||||
/// clear the dictionary.
|
||||
/// </summary>
|
||||
public const int BLOCK_MODE_MASK = 0x80;
|
||||
|
||||
/// <summary>
|
||||
/// LZW file header size (in bytes)
|
||||
/// </summary>
|
||||
public const int HDR_SIZE = 3;
|
||||
/// <summary>
|
||||
/// LZW file header size (in bytes)
|
||||
/// </summary>
|
||||
public const int HDR_SIZE = 3;
|
||||
|
||||
/// <summary>
|
||||
/// Initial number of bits per code
|
||||
/// </summary>
|
||||
public const int INIT_BITS = 9;
|
||||
/// <summary>
|
||||
/// Initial number of bits per code
|
||||
/// </summary>
|
||||
public const int INIT_BITS = 9;
|
||||
|
||||
private LzwConstants() { }
|
||||
}
|
||||
private LzwConstants() { }
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,52 +1,51 @@
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
|
||||
namespace SharpCompress.Compressors.RLE90
|
||||
namespace SharpCompress.Compressors.RLE90;
|
||||
|
||||
public static class RLE
|
||||
{
|
||||
public static class RLE
|
||||
private const byte DLE = 0x90;
|
||||
|
||||
/// <summary>
|
||||
/// Unpacks an RLE compressed buffer.
|
||||
/// Format: <char> DLE <count>, where count == 0 -> DLE
|
||||
/// </summary>
|
||||
/// <param name="compressedBuffer">The compressed buffer to unpack.</param>
|
||||
/// <returns>A list of unpacked bytes.</returns>
|
||||
public static List<byte> UnpackRLE(byte[] compressedBuffer)
|
||||
{
|
||||
private const byte DLE = 0x90;
|
||||
var result = new List<byte>(compressedBuffer.Length * 2); // Optimized initial capacity
|
||||
var countMode = false;
|
||||
byte last = 0;
|
||||
|
||||
/// <summary>
|
||||
/// Unpacks an RLE compressed buffer.
|
||||
/// Format: <char> DLE <count>, where count == 0 -> DLE
|
||||
/// </summary>
|
||||
/// <param name="compressedBuffer">The compressed buffer to unpack.</param>
|
||||
/// <returns>A list of unpacked bytes.</returns>
|
||||
public static List<byte> UnpackRLE(byte[] compressedBuffer)
|
||||
foreach (var c in compressedBuffer)
|
||||
{
|
||||
var result = new List<byte>(compressedBuffer.Length * 2); // Optimized initial capacity
|
||||
var countMode = false;
|
||||
byte last = 0;
|
||||
|
||||
foreach (var c in compressedBuffer)
|
||||
if (!countMode)
|
||||
{
|
||||
if (!countMode)
|
||||
if (c == DLE)
|
||||
{
|
||||
if (c == DLE)
|
||||
{
|
||||
countMode = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
result.Add(c);
|
||||
last = c;
|
||||
}
|
||||
countMode = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
countMode = false;
|
||||
if (c == 0)
|
||||
{
|
||||
result.Add(DLE);
|
||||
}
|
||||
else
|
||||
{
|
||||
result.AddRange(Enumerable.Repeat(last, c - 1));
|
||||
}
|
||||
result.Add(c);
|
||||
last = c;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
countMode = false;
|
||||
if (c == 0)
|
||||
{
|
||||
result.Add(DLE);
|
||||
}
|
||||
else
|
||||
{
|
||||
result.AddRange(Enumerable.Repeat(last, c - 1));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,91 +6,90 @@ using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.RLE90
|
||||
namespace SharpCompress.Compressors.RLE90;
|
||||
|
||||
public class RunLength90Stream : Stream, IStreamStack
|
||||
{
|
||||
public class RunLength90Stream : Stream, IStreamStack
|
||||
#if DEBUG_STREAMS
|
||||
long IStreamStack.InstanceId { get; set; }
|
||||
#endif
|
||||
int IStreamStack.DefaultBufferSize { get; set; }
|
||||
|
||||
Stream IStreamStack.BaseStream() => _stream;
|
||||
|
||||
int IStreamStack.BufferSize
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
int IStreamStack.BufferPosition
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
|
||||
void IStreamStack.SetPosition(long position) { }
|
||||
|
||||
private readonly Stream _stream;
|
||||
private const byte DLE = 0x90;
|
||||
private int _compressedSize;
|
||||
private bool _processed = false;
|
||||
|
||||
public RunLength90Stream(Stream stream, int compressedSize)
|
||||
{
|
||||
_stream = stream;
|
||||
_compressedSize = compressedSize;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugConstruct(typeof(RunLength90Stream));
|
||||
#endif
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
long IStreamStack.InstanceId { get; set; }
|
||||
this.DebugDispose(typeof(RunLength90Stream));
|
||||
#endif
|
||||
int IStreamStack.DefaultBufferSize { get; set; }
|
||||
|
||||
Stream IStreamStack.BaseStream() => _stream;
|
||||
|
||||
int IStreamStack.BufferSize
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
int IStreamStack.BufferPosition
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
|
||||
void IStreamStack.SetPosition(long position) { }
|
||||
|
||||
private readonly Stream _stream;
|
||||
private const byte DLE = 0x90;
|
||||
private int _compressedSize;
|
||||
private bool _processed = false;
|
||||
|
||||
public RunLength90Stream(Stream stream, int compressedSize)
|
||||
{
|
||||
_stream = stream;
|
||||
_compressedSize = compressedSize;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugConstruct(typeof(RunLength90Stream));
|
||||
#endif
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(RunLength90Stream));
|
||||
#endif
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotImplementedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _stream.Position;
|
||||
set => throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotImplementedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_processed)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
_processed = true;
|
||||
|
||||
using var binaryReader = new BinaryReader(_stream);
|
||||
byte[] compressedBuffer = binaryReader.ReadBytes(_compressedSize);
|
||||
|
||||
var unpacked = RLE.UnpackRLE(compressedBuffer);
|
||||
unpacked.CopyTo(buffer);
|
||||
|
||||
return unpacked.Count;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotImplementedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotImplementedException();
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotImplementedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _stream.Position;
|
||||
set => throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotImplementedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_processed)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
_processed = true;
|
||||
|
||||
using var binaryReader = new BinaryReader(_stream);
|
||||
byte[] compressedBuffer = binaryReader.ReadBytes(_compressedSize);
|
||||
|
||||
var unpacked = RLE.UnpackRLE(compressedBuffer);
|
||||
unpacked.CopyTo(buffer);
|
||||
|
||||
return unpacked.Count;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotImplementedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ using SharpCompress.Compressors.Rar.VM;
|
||||
|
||||
namespace SharpCompress.Compressors.Rar.UnpackV1;
|
||||
|
||||
internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
internal sealed partial class Unpack : BitInput, IRarUnpack, IDisposable
|
||||
{
|
||||
private readonly BitInput Inp;
|
||||
private bool disposed;
|
||||
@@ -22,17 +22,15 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
// to ease in porting Unpack50.cs
|
||||
Inp = this;
|
||||
|
||||
public override void Dispose()
|
||||
public void Dispose()
|
||||
{
|
||||
if (!disposed)
|
||||
{
|
||||
base.Dispose();
|
||||
if (!externalWindow)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(window);
|
||||
window = null;
|
||||
}
|
||||
rarVM.Dispose();
|
||||
disposed = true;
|
||||
}
|
||||
}
|
||||
@@ -576,111 +574,104 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
|
||||
var FilteredDataOffset = Prg.FilteredDataOffset;
|
||||
var FilteredDataSize = Prg.FilteredDataSize;
|
||||
var FilteredData = ArrayPool<byte>.Shared.Rent(FilteredDataSize);
|
||||
try
|
||||
var FilteredData = new byte[FilteredDataSize];
|
||||
|
||||
for (var i = 0; i < FilteredDataSize; i++)
|
||||
{
|
||||
Array.Copy(
|
||||
rarVM.Mem,
|
||||
FilteredDataOffset,
|
||||
FilteredData,
|
||||
0,
|
||||
FilteredDataSize
|
||||
);
|
||||
FilteredData[i] = rarVM.Mem[FilteredDataOffset + i];
|
||||
|
||||
prgStack[I] = null;
|
||||
while (I + 1 < prgStack.Count)
|
||||
// Prg.GlobalData.get(FilteredDataOffset
|
||||
// +
|
||||
// i);
|
||||
}
|
||||
|
||||
prgStack[I] = null;
|
||||
while (I + 1 < prgStack.Count)
|
||||
{
|
||||
var NextFilter = prgStack[I + 1];
|
||||
if (
|
||||
NextFilter is null
|
||||
|| NextFilter.BlockStart != BlockStart
|
||||
|| NextFilter.BlockLength != FilteredDataSize
|
||||
|| NextFilter.NextWindow
|
||||
)
|
||||
{
|
||||
var NextFilter = prgStack[I + 1];
|
||||
if (
|
||||
NextFilter is null
|
||||
|| NextFilter.BlockStart != BlockStart
|
||||
|| NextFilter.BlockLength != FilteredDataSize
|
||||
|| NextFilter.NextWindow
|
||||
)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
// apply several filters to same data block
|
||||
|
||||
rarVM.setMemory(0, FilteredData, 0, FilteredDataSize);
|
||||
|
||||
// .SetMemory(0,FilteredData,FilteredDataSize);
|
||||
|
||||
var pPrg = filters[NextFilter.ParentFilter].Program;
|
||||
var NextPrg = NextFilter.Program;
|
||||
|
||||
if (pPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
// copy global data from previous script execution
|
||||
// if any
|
||||
// NextPrg->GlobalData.Alloc(ParentPrg->GlobalData.Size());
|
||||
NextPrg.GlobalData.SetSize(pPrg.GlobalData.Count);
|
||||
|
||||
// memcpy(&NextPrg->GlobalData[VM_FIXEDGLOBALSIZE],&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],ParentPrg->GlobalData.Size()-VM_FIXEDGLOBALSIZE);
|
||||
for (
|
||||
var i = 0;
|
||||
i < pPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE;
|
||||
i++
|
||||
)
|
||||
{
|
||||
NextPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] =
|
||||
pPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i];
|
||||
}
|
||||
}
|
||||
|
||||
ExecuteCode(NextPrg);
|
||||
|
||||
if (NextPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
// save global data for next script execution
|
||||
if (pPrg.GlobalData.Count < NextPrg.GlobalData.Count)
|
||||
{
|
||||
pPrg.GlobalData.SetSize(NextPrg.GlobalData.Count);
|
||||
}
|
||||
|
||||
// memcpy(&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],&NextPrg->GlobalData[VM_FIXEDGLOBALSIZE],NextPrg->GlobalData.Size()-VM_FIXEDGLOBALSIZE);
|
||||
for (
|
||||
var i = 0;
|
||||
i < NextPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE;
|
||||
i++
|
||||
)
|
||||
{
|
||||
pPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] =
|
||||
NextPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
pPrg.GlobalData.Clear();
|
||||
}
|
||||
|
||||
FilteredDataOffset = NextPrg.FilteredDataOffset;
|
||||
FilteredDataSize = NextPrg.FilteredDataSize;
|
||||
if (FilteredData.Length < FilteredDataSize)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(FilteredData);
|
||||
FilteredData = ArrayPool<byte>.Shared.Rent(FilteredDataSize);
|
||||
}
|
||||
for (var i = 0; i < FilteredDataSize; i++)
|
||||
{
|
||||
FilteredData[i] = NextPrg.GlobalData[FilteredDataOffset + i];
|
||||
}
|
||||
|
||||
I++;
|
||||
prgStack[I] = null;
|
||||
break;
|
||||
}
|
||||
|
||||
writeStream.Write(FilteredData, 0, FilteredDataSize);
|
||||
writtenFileSize += FilteredDataSize;
|
||||
destUnpSize -= FilteredDataSize;
|
||||
WrittenBorder = BlockEnd;
|
||||
WriteSize = (unpPtr - WrittenBorder) & PackDef.MAXWINMASK;
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(FilteredData);
|
||||
// apply several filters to same data block
|
||||
|
||||
rarVM.setMemory(0, FilteredData, 0, FilteredDataSize);
|
||||
|
||||
// .SetMemory(0,FilteredData,FilteredDataSize);
|
||||
|
||||
var pPrg = filters[NextFilter.ParentFilter].Program;
|
||||
var NextPrg = NextFilter.Program;
|
||||
|
||||
if (pPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
// copy global data from previous script execution
|
||||
// if any
|
||||
// NextPrg->GlobalData.Alloc(ParentPrg->GlobalData.Size());
|
||||
NextPrg.GlobalData.SetSize(pPrg.GlobalData.Count);
|
||||
|
||||
// memcpy(&NextPrg->GlobalData[VM_FIXEDGLOBALSIZE],&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],ParentPrg->GlobalData.Size()-VM_FIXEDGLOBALSIZE);
|
||||
for (
|
||||
var i = 0;
|
||||
i < pPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE;
|
||||
i++
|
||||
)
|
||||
{
|
||||
NextPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] = pPrg.GlobalData[
|
||||
RarVM.VM_FIXEDGLOBALSIZE + i
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
ExecuteCode(NextPrg);
|
||||
|
||||
if (NextPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
// save global data for next script execution
|
||||
if (pPrg.GlobalData.Count < NextPrg.GlobalData.Count)
|
||||
{
|
||||
pPrg.GlobalData.SetSize(NextPrg.GlobalData.Count);
|
||||
}
|
||||
|
||||
// memcpy(&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],&NextPrg->GlobalData[VM_FIXEDGLOBALSIZE],NextPrg->GlobalData.Size()-VM_FIXEDGLOBALSIZE);
|
||||
for (
|
||||
var i = 0;
|
||||
i < NextPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE;
|
||||
i++
|
||||
)
|
||||
{
|
||||
pPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] = NextPrg.GlobalData[
|
||||
RarVM.VM_FIXEDGLOBALSIZE + i
|
||||
];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
pPrg.GlobalData.Clear();
|
||||
}
|
||||
FilteredDataOffset = NextPrg.FilteredDataOffset;
|
||||
FilteredDataSize = NextPrg.FilteredDataSize;
|
||||
|
||||
FilteredData = new byte[FilteredDataSize];
|
||||
for (var i = 0; i < FilteredDataSize; i++)
|
||||
{
|
||||
FilteredData[i] = NextPrg.GlobalData[FilteredDataOffset + i];
|
||||
}
|
||||
|
||||
I++;
|
||||
prgStack[I] = null;
|
||||
}
|
||||
writeStream.Write(FilteredData, 0, FilteredDataSize);
|
||||
unpSomeRead = true;
|
||||
writtenFileSize += FilteredDataSize;
|
||||
destUnpSize -= FilteredDataSize;
|
||||
WrittenBorder = BlockEnd;
|
||||
WriteSize = (unpPtr - WrittenBorder) & PackDef.MAXWINMASK;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -704,10 +695,15 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
|
||||
private void UnpWriteArea(int startPtr, int endPtr)
|
||||
{
|
||||
if (endPtr != startPtr)
|
||||
{
|
||||
unpSomeRead = true;
|
||||
}
|
||||
if (endPtr < startPtr)
|
||||
{
|
||||
UnpWriteData(window, startPtr, -startPtr & PackDef.MAXWINMASK);
|
||||
UnpWriteData(window, 0, endPtr);
|
||||
unpAllBuf = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -761,27 +757,19 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
// System.out.println("copyString(" + length + ", " + distance + ")");
|
||||
|
||||
var destPtr = unpPtr - distance;
|
||||
var safeZone = PackDef.MAXWINSIZE - 260;
|
||||
|
||||
// Fast path: use Array.Copy for bulk operations when in safe zone
|
||||
if (destPtr >= 0 && destPtr < safeZone && unpPtr < safeZone && distance >= length)
|
||||
// System.out.println(unpPtr+":"+distance);
|
||||
if (destPtr >= 0 && destPtr < PackDef.MAXWINSIZE - 260 && unpPtr < PackDef.MAXWINSIZE - 260)
|
||||
{
|
||||
// Non-overlapping copy: can use Array.Copy directly
|
||||
Array.Copy(window, destPtr, window, unpPtr, length);
|
||||
unpPtr += length;
|
||||
}
|
||||
else if (destPtr >= 0 && destPtr < safeZone && unpPtr < safeZone)
|
||||
{
|
||||
// Overlapping copy in safe zone: use byte-by-byte to handle self-referential copies
|
||||
for (int i = 0; i < length; i++)
|
||||
window[unpPtr++] = window[destPtr++];
|
||||
|
||||
while (--length > 0)
|
||||
{
|
||||
window[unpPtr + i] = window[destPtr + i];
|
||||
window[unpPtr++] = window[destPtr++];
|
||||
}
|
||||
unpPtr += length;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Slow path with wraparound mask
|
||||
while (length-- != 0)
|
||||
{
|
||||
window[unpPtr] = window[destPtr++ & PackDef.MAXWINMASK];
|
||||
@@ -1040,7 +1028,7 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
vmCode.Add((byte)(GetBits() >> 8));
|
||||
AddBits(8);
|
||||
}
|
||||
return AddVMCode(FirstByte, vmCode);
|
||||
return (AddVMCode(FirstByte, vmCode, Length));
|
||||
}
|
||||
|
||||
private bool ReadVMCodePPM()
|
||||
@@ -1085,12 +1073,12 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
}
|
||||
vmCode.Add((byte)Ch); // VMCode[I]=Ch;
|
||||
}
|
||||
return AddVMCode(FirstByte, vmCode);
|
||||
return (AddVMCode(FirstByte, vmCode, Length));
|
||||
}
|
||||
|
||||
private bool AddVMCode(int firstByte, List<byte> vmCode)
|
||||
private bool AddVMCode(int firstByte, List<byte> vmCode, int length)
|
||||
{
|
||||
using var Inp = new BitInput();
|
||||
var Inp = new BitInput();
|
||||
Inp.InitBitInput();
|
||||
|
||||
// memcpy(Inp.InBuf,Code,Min(BitInput::MAX_SIZE,CodeSize));
|
||||
@@ -1098,6 +1086,7 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
{
|
||||
Inp.InBuf[i] = vmCode[i];
|
||||
}
|
||||
rarVM.init();
|
||||
|
||||
int FiltPos;
|
||||
if ((firstByte & 0x80) != 0)
|
||||
@@ -1210,28 +1199,19 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
{
|
||||
return (false);
|
||||
}
|
||||
|
||||
var VMCode = ArrayPool<byte>.Shared.Rent(VMCodeSize);
|
||||
try
|
||||
Span<byte> VMCode = stackalloc byte[VMCodeSize];
|
||||
for (var I = 0; I < VMCodeSize; I++)
|
||||
{
|
||||
for (var I = 0; I < VMCodeSize; I++)
|
||||
if (Inp.Overflow(3))
|
||||
{
|
||||
if (Inp.Overflow(3))
|
||||
{
|
||||
return (false);
|
||||
}
|
||||
|
||||
VMCode[I] = (byte)(Inp.GetBits() >> 8);
|
||||
Inp.AddBits(8);
|
||||
return (false);
|
||||
}
|
||||
VMCode[I] = (byte)(Inp.GetBits() >> 8);
|
||||
Inp.AddBits(8);
|
||||
}
|
||||
|
||||
// VM.Prepare(&VMCode[0],VMCodeSize,&Filter->Prg);
|
||||
rarVM.prepare(VMCode.AsSpan(0, VMCodeSize), Filter.Program);
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(VMCode);
|
||||
}
|
||||
// VM.Prepare(&VMCode[0],VMCodeSize,&Filter->Prg);
|
||||
rarVM.prepare(VMCode, VMCodeSize, Filter.Program);
|
||||
}
|
||||
StackFilter.Program.AltCommands = Filter.Program.Commands; // StackFilter->Prg.AltCmd=&Filter->Prg.Cmd[0];
|
||||
StackFilter.Program.CommandCount = Filter.Program.CommandCount;
|
||||
|
||||
@@ -19,9 +19,14 @@ internal partial class Unpack
|
||||
|
||||
private bool suspended;
|
||||
|
||||
internal bool unpAllBuf;
|
||||
|
||||
//private ComprDataIO unpIO;
|
||||
private Stream readStream;
|
||||
private Stream writeStream;
|
||||
|
||||
internal bool unpSomeRead;
|
||||
|
||||
private int readTop;
|
||||
|
||||
private long destUnpSize;
|
||||
@@ -803,10 +808,15 @@ internal partial class Unpack
|
||||
|
||||
private void oldUnpWriteBuf()
|
||||
{
|
||||
if (unpPtr != wrPtr)
|
||||
{
|
||||
unpSomeRead = true;
|
||||
}
|
||||
if (unpPtr < wrPtr)
|
||||
{
|
||||
writeStream.Write(window, wrPtr, -wrPtr & PackDef.MAXWINMASK);
|
||||
writeStream.Write(window, 0, unpPtr);
|
||||
unpAllBuf = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using SharpCompress.Compressors.Rar.VM;
|
||||
|
||||
namespace SharpCompress.Compressors.Rar.UnpackV1;
|
||||
@@ -10,15 +9,167 @@ internal static class UnpackUtility
|
||||
internal static uint DecodeNumber(this BitInput input, Decode.Decode dec) =>
|
||||
(uint)input.decodeNumber(dec);
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
internal static int decodeNumber(this BitInput input, Decode.Decode dec)
|
||||
{
|
||||
int bits;
|
||||
long bitField = input.GetBits() & 0xfffe;
|
||||
|
||||
// if (bitField < dec.getDecodeLen()[8]) {
|
||||
// if (bitField < dec.getDecodeLen()[4]) {
|
||||
// if (bitField < dec.getDecodeLen()[2]) {
|
||||
// if (bitField < dec.getDecodeLen()[1]) {
|
||||
// bits = 1;
|
||||
// } else {
|
||||
// bits = 2;
|
||||
// }
|
||||
// } else {
|
||||
// if (bitField < dec.getDecodeLen()[3]) {
|
||||
// bits = 3;
|
||||
// } else {
|
||||
// bits = 4;
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// if (bitField < dec.getDecodeLen()[6]) {
|
||||
// if (bitField < dec.getDecodeLen()[5])
|
||||
// bits = 5;
|
||||
// else
|
||||
// bits = 6;
|
||||
// } else {
|
||||
// if (bitField < dec.getDecodeLen()[7]) {
|
||||
// bits = 7;
|
||||
// } else {
|
||||
// bits = 8;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// if (bitField < dec.getDecodeLen()[12]) {
|
||||
// if (bitField < dec.getDecodeLen()[10])
|
||||
// if (bitField < dec.getDecodeLen()[9])
|
||||
// bits = 9;
|
||||
// else
|
||||
// bits = 10;
|
||||
// else if (bitField < dec.getDecodeLen()[11])
|
||||
// bits = 11;
|
||||
// else
|
||||
// bits = 12;
|
||||
// } else {
|
||||
// if (bitField < dec.getDecodeLen()[14]) {
|
||||
// if (bitField < dec.getDecodeLen()[13]) {
|
||||
// bits = 13;
|
||||
// } else {
|
||||
// bits = 14;
|
||||
// }
|
||||
// } else {
|
||||
// bits = 15;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// addbits(bits);
|
||||
// int N = dec.getDecodePos()[bits]
|
||||
// + (((int) bitField - dec.getDecodeLen()[bits - 1]) >>> (16 - bits));
|
||||
// if (N >= dec.getMaxNum()) {
|
||||
// N = 0;
|
||||
// }
|
||||
// return (dec.getDecodeNum()[N]);
|
||||
var decodeLen = dec.DecodeLen;
|
||||
|
||||
// Binary search to find the bit length - faster than nested ifs
|
||||
int bits = FindDecodeBits(bitField, decodeLen);
|
||||
|
||||
if (bitField < decodeLen[8])
|
||||
{
|
||||
if (bitField < decodeLen[4])
|
||||
{
|
||||
if (bitField < decodeLen[2])
|
||||
{
|
||||
if (bitField < decodeLen[1])
|
||||
{
|
||||
bits = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 2;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (bitField < decodeLen[3])
|
||||
{
|
||||
bits = 3;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (bitField < decodeLen[6])
|
||||
{
|
||||
if (bitField < decodeLen[5])
|
||||
{
|
||||
bits = 5;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 6;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (bitField < decodeLen[7])
|
||||
{
|
||||
bits = 7;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (bitField < decodeLen[12])
|
||||
{
|
||||
if (bitField < decodeLen[10])
|
||||
{
|
||||
if (bitField < decodeLen[9])
|
||||
{
|
||||
bits = 9;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 10;
|
||||
}
|
||||
}
|
||||
else if (bitField < decodeLen[11])
|
||||
{
|
||||
bits = 11;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 12;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (bitField < decodeLen[14])
|
||||
{
|
||||
if (bitField < decodeLen[13])
|
||||
{
|
||||
bits = 13;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 14;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 15;
|
||||
}
|
||||
}
|
||||
}
|
||||
input.AddBits(bits);
|
||||
var N =
|
||||
dec.DecodePos[bits]
|
||||
@@ -30,52 +181,6 @@ internal static class UnpackUtility
|
||||
return (dec.DecodeNum[N]);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Fast binary search to find which bit length matches the bitField.
|
||||
/// Optimized with cached array access to minimize memory lookups.
|
||||
/// </summary>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static int FindDecodeBits(long bitField, int[] decodeLen)
|
||||
{
|
||||
// Cache critical values to reduce array access overhead
|
||||
long len4 = decodeLen[4];
|
||||
long len8 = decodeLen[8];
|
||||
long len12 = decodeLen[12];
|
||||
|
||||
if (bitField < len8)
|
||||
{
|
||||
if (bitField < len4)
|
||||
{
|
||||
long len2 = decodeLen[2];
|
||||
if (bitField < len2)
|
||||
{
|
||||
return bitField < decodeLen[1] ? 1 : 2;
|
||||
}
|
||||
return bitField < decodeLen[3] ? 3 : 4;
|
||||
}
|
||||
|
||||
long len6 = decodeLen[6];
|
||||
if (bitField < len6)
|
||||
{
|
||||
return bitField < decodeLen[5] ? 5 : 6;
|
||||
}
|
||||
return bitField < decodeLen[7] ? 7 : 8;
|
||||
}
|
||||
|
||||
if (bitField < len12)
|
||||
{
|
||||
long len10 = decodeLen[10];
|
||||
if (bitField < len10)
|
||||
{
|
||||
return bitField < decodeLen[9] ? 9 : 10;
|
||||
}
|
||||
return bitField < decodeLen[11] ? 11 : 12;
|
||||
}
|
||||
|
||||
long len14 = decodeLen[14];
|
||||
return bitField < len14 ? (bitField < decodeLen[13] ? 13 : 14) : 15;
|
||||
}
|
||||
|
||||
internal static void makeDecodeTables(
|
||||
Span<byte> lenTab,
|
||||
int offset,
|
||||
@@ -89,7 +194,8 @@ internal static class UnpackUtility
|
||||
long M,
|
||||
N;
|
||||
|
||||
new Span<int>(dec.DecodeNum).Clear();
|
||||
new Span<int>(dec.DecodeNum).Clear(); // memset(Dec->DecodeNum,0,Size*sizeof(*Dec->DecodeNum));
|
||||
|
||||
for (i = 0; i < size; i++)
|
||||
{
|
||||
lenCount[lenTab[offset + i] & 0xF]++;
|
||||
|
||||
@@ -413,7 +413,7 @@ internal partial class Unpack
|
||||
else
|
||||
//x memcpy(Mem,Window+BlockStart,BlockLength);
|
||||
{
|
||||
Buffer.BlockCopy(Window, (int)BlockStart, Mem, 0, (int)BlockLength);
|
||||
Utility.Copy(Window, BlockStart, Mem, 0, BlockLength);
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -427,21 +427,9 @@ internal partial class Unpack
|
||||
else
|
||||
{
|
||||
//x memcpy(Mem,Window+BlockStart,FirstPartLength);
|
||||
Buffer.BlockCopy(
|
||||
Window,
|
||||
(int)BlockStart,
|
||||
Mem,
|
||||
0,
|
||||
(int)FirstPartLength
|
||||
);
|
||||
Utility.Copy(Window, BlockStart, Mem, 0, FirstPartLength);
|
||||
//x memcpy(Mem+FirstPartLength,Window,BlockEnd);
|
||||
Buffer.BlockCopy(
|
||||
Window,
|
||||
0,
|
||||
Mem,
|
||||
(int)FirstPartLength,
|
||||
(int)BlockEnd
|
||||
);
|
||||
Utility.Copy(Window, 0, Mem, FirstPartLength, BlockEnd);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
|
||||
namespace SharpCompress.Compressors.Rar.VM;
|
||||
|
||||
internal class BitInput : IDisposable
|
||||
internal class BitInput
|
||||
{
|
||||
/// <summary> the max size of the input</summary>
|
||||
internal const int MAX_SIZE = 0x8000;
|
||||
@@ -23,11 +20,9 @@ internal class BitInput : IDisposable
|
||||
set => inBit = value;
|
||||
}
|
||||
public bool ExternalBuffer;
|
||||
private byte[] _privateBuffer = ArrayPool<byte>.Shared.Rent(MAX_SIZE);
|
||||
private bool _disposed;
|
||||
|
||||
/// <summary> </summary>
|
||||
internal BitInput() => InBuf = _privateBuffer;
|
||||
internal BitInput() => InBuf = new byte[MAX_SIZE];
|
||||
|
||||
internal byte[] InBuf { get; }
|
||||
|
||||
@@ -92,14 +87,4 @@ internal class BitInput : IDisposable
|
||||
/// <returns> true if an Oververflow would occur
|
||||
/// </returns>
|
||||
internal bool Overflow(int IncPtr) => (inAddr + IncPtr >= MAX_SIZE);
|
||||
|
||||
public virtual void Dispose()
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
ArrayPool<byte>.Shared.Return(_privateBuffer);
|
||||
_disposed = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#nullable disable
|
||||
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.Buffers.Binary;
|
||||
using System.Collections.Generic;
|
||||
|
||||
@@ -15,9 +16,7 @@ internal sealed class RarVM : BitInput
|
||||
// Mem.set_Renamed(offset + 3, Byte.valueOf((sbyte) ((Utility.URShift(value_Renamed, 24)) & 0xff)));
|
||||
|
||||
//}
|
||||
internal byte[] Mem => _memory.NotNull();
|
||||
|
||||
private byte[]? _memory = ArrayPool<byte>.Shared.Rent(VM_MEMSIZE + 4);
|
||||
internal byte[] Mem { get; private set; }
|
||||
|
||||
public const int VM_MEMSIZE = 0x40000;
|
||||
|
||||
@@ -41,18 +40,11 @@ internal sealed class RarVM : BitInput
|
||||
|
||||
private int IP;
|
||||
|
||||
internal RarVM() { }
|
||||
internal RarVM() =>
|
||||
//InitBlock();
|
||||
Mem = null;
|
||||
|
||||
public override void Dispose()
|
||||
{
|
||||
base.Dispose();
|
||||
if (_memory is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
ArrayPool<byte>.Shared.Return(_memory);
|
||||
_memory = null;
|
||||
}
|
||||
internal void init() => Mem ??= new byte[VM_MEMSIZE + 4];
|
||||
|
||||
private bool IsVMMem(byte[] mem) => Mem == mem;
|
||||
|
||||
@@ -784,10 +776,9 @@ internal sealed class RarVM : BitInput
|
||||
}
|
||||
}
|
||||
|
||||
public void prepare(ReadOnlySpan<byte> code, VMPreparedProgram prg)
|
||||
public void prepare(ReadOnlySpan<byte> code, int codeSize, VMPreparedProgram prg)
|
||||
{
|
||||
InitBitInput();
|
||||
var codeSize = code.Length;
|
||||
var cpLength = Math.Min(MAX_SIZE, codeSize);
|
||||
|
||||
// memcpy(inBuf,Code,Min(CodeSize,BitInput::MAX_SIZE));
|
||||
@@ -804,7 +795,7 @@ internal sealed class RarVM : BitInput
|
||||
prg.CommandCount = 0;
|
||||
if (xorSum == code[0])
|
||||
{
|
||||
var filterType = IsStandardFilter(code);
|
||||
var filterType = IsStandardFilter(code, codeSize);
|
||||
if (filterType != VMStandardFilters.VMSF_NONE)
|
||||
{
|
||||
var curCmd = new VMPreparedCommand();
|
||||
@@ -1114,7 +1105,7 @@ internal sealed class RarVM : BitInput
|
||||
}
|
||||
}
|
||||
|
||||
private VMStandardFilters IsStandardFilter(ReadOnlySpan<byte> code)
|
||||
private VMStandardFilters IsStandardFilter(ReadOnlySpan<byte> code, int codeSize)
|
||||
{
|
||||
VMStandardFilterSignature[] stdList =
|
||||
{
|
||||
@@ -1139,7 +1130,6 @@ internal sealed class RarVM : BitInput
|
||||
|
||||
private void ExecuteStandardFilter(VMStandardFilters filterType)
|
||||
{
|
||||
var mem = Mem;
|
||||
switch (filterType)
|
||||
{
|
||||
case VMStandardFilters.VMSF_E8:
|
||||
@@ -1158,7 +1148,7 @@ internal sealed class RarVM : BitInput
|
||||
);
|
||||
for (var curPos = 0; curPos < dataSize - 4; )
|
||||
{
|
||||
var curByte = mem[curPos++];
|
||||
var curByte = Mem[curPos++];
|
||||
if (curByte == 0xe8 || curByte == cmpByte2)
|
||||
{
|
||||
// #ifdef PRESENT_INT32
|
||||
@@ -1174,19 +1164,19 @@ internal sealed class RarVM : BitInput
|
||||
// SET_VALUE(false,Data,Addr-Offset);
|
||||
// #else
|
||||
var offset = curPos + fileOffset;
|
||||
long Addr = GetValue(false, mem, curPos);
|
||||
long Addr = GetValue(false, Mem, curPos);
|
||||
if ((Addr & unchecked((int)0x80000000)) != 0)
|
||||
{
|
||||
if (((Addr + offset) & unchecked((int)0x80000000)) == 0)
|
||||
{
|
||||
SetValue(false, mem, curPos, (int)Addr + fileSize);
|
||||
SetValue(false, Mem, curPos, (int)Addr + fileSize);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (((Addr - fileSize) & unchecked((int)0x80000000)) != 0)
|
||||
{
|
||||
SetValue(false, mem, curPos, (int)(Addr - offset));
|
||||
SetValue(false, Mem, curPos, (int)(Addr - offset));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1214,7 +1204,7 @@ internal sealed class RarVM : BitInput
|
||||
|
||||
while (curPos < dataSize - 21)
|
||||
{
|
||||
var Byte = (mem[curPos] & 0x1f) - 0x10;
|
||||
var Byte = (Mem[curPos] & 0x1f) - 0x10;
|
||||
if (Byte >= 0)
|
||||
{
|
||||
var cmdMask = Masks[Byte];
|
||||
@@ -1260,7 +1250,7 @@ internal sealed class RarVM : BitInput
|
||||
var channels = R[0] & unchecked((int)0xFFffFFff);
|
||||
var srcPos = 0;
|
||||
var border = (dataSize * 2) & unchecked((int)0xFFffFFff);
|
||||
SetValue(false, mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
if (dataSize >= VM_GLOBALMEMADDR / 2)
|
||||
{
|
||||
break;
|
||||
@@ -1278,7 +1268,7 @@ internal sealed class RarVM : BitInput
|
||||
destPos += channels
|
||||
)
|
||||
{
|
||||
mem[destPos] = (PrevByte = (byte)(PrevByte - mem[srcPos++]));
|
||||
Mem[destPos] = (PrevByte = (byte)(PrevByte - Mem[srcPos++]));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1293,7 +1283,7 @@ internal sealed class RarVM : BitInput
|
||||
var channels = 3;
|
||||
var srcPos = 0;
|
||||
var destDataPos = dataSize;
|
||||
SetValue(false, mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
if (dataSize >= VM_GLOBALMEMADDR / 2 || posR < 0)
|
||||
{
|
||||
break;
|
||||
@@ -1309,8 +1299,8 @@ internal sealed class RarVM : BitInput
|
||||
if (upperPos >= 3)
|
||||
{
|
||||
var upperDataPos = destDataPos + upperPos;
|
||||
var upperByte = mem[upperDataPos] & 0xff;
|
||||
var upperLeftByte = mem[upperDataPos - 3] & 0xff;
|
||||
var upperByte = Mem[upperDataPos] & 0xff;
|
||||
var upperLeftByte = Mem[upperDataPos - 3] & 0xff;
|
||||
predicted = prevByte + upperByte - upperLeftByte;
|
||||
var pa = Math.Abs((int)(predicted - prevByte));
|
||||
var pb = Math.Abs((int)(predicted - upperByte));
|
||||
@@ -1336,15 +1326,15 @@ internal sealed class RarVM : BitInput
|
||||
predicted = prevByte;
|
||||
}
|
||||
|
||||
prevByte = ((predicted - mem[srcPos++]) & 0xff) & 0xff;
|
||||
mem[destDataPos + i] = (byte)(prevByte & 0xff);
|
||||
prevByte = ((predicted - Mem[srcPos++]) & 0xff) & 0xff;
|
||||
Mem[destDataPos + i] = (byte)(prevByte & 0xff);
|
||||
}
|
||||
}
|
||||
for (int i = posR, border = dataSize - 2; i < border; i += 3)
|
||||
{
|
||||
var G = mem[destDataPos + i + 1];
|
||||
mem[destDataPos + i] = (byte)(mem[destDataPos + i] + G);
|
||||
mem[destDataPos + i + 2] = (byte)(mem[destDataPos + i + 2] + G);
|
||||
var G = Mem[destDataPos + i + 1];
|
||||
Mem[destDataPos + i] = (byte)(Mem[destDataPos + i] + G);
|
||||
Mem[destDataPos + i + 2] = (byte)(Mem[destDataPos + i + 2] + G);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@@ -1357,7 +1347,7 @@ internal sealed class RarVM : BitInput
|
||||
var destDataPos = dataSize;
|
||||
|
||||
//byte *SrcData=Mem,*DestData=SrcData+DataSize;
|
||||
SetValue(false, mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
if (dataSize >= VM_GLOBALMEMADDR / 2)
|
||||
{
|
||||
break;
|
||||
@@ -1387,10 +1377,10 @@ internal sealed class RarVM : BitInput
|
||||
var predicted = (8 * prevByte) + (K1 * D1) + (K2 * D2) + (K3 * D3);
|
||||
predicted = Utility.URShift(predicted, 3) & 0xff;
|
||||
|
||||
long curByte = mem[srcPos++];
|
||||
long curByte = Mem[srcPos++];
|
||||
|
||||
predicted -= curByte;
|
||||
mem[destDataPos + i] = (byte)predicted;
|
||||
Mem[destDataPos + i] = (byte)predicted;
|
||||
prevDelta = (byte)(predicted - prevByte);
|
||||
|
||||
//fix java byte
|
||||
@@ -1490,15 +1480,15 @@ internal sealed class RarVM : BitInput
|
||||
}
|
||||
while (srcPos < dataSize)
|
||||
{
|
||||
var curByte = mem[srcPos++];
|
||||
if (curByte == 2 && (curByte = mem[srcPos++]) != 2)
|
||||
var curByte = Mem[srcPos++];
|
||||
if (curByte == 2 && (curByte = Mem[srcPos++]) != 2)
|
||||
{
|
||||
curByte = (byte)(curByte - 32);
|
||||
}
|
||||
mem[destPos++] = curByte;
|
||||
Mem[destPos++] = curByte;
|
||||
}
|
||||
SetValue(false, mem, VM_GLOBALMEMADDR + 0x1c, destPos - dataSize);
|
||||
SetValue(false, mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
SetValue(false, Mem, VM_GLOBALMEMADDR + 0x1c, destPos - dataSize);
|
||||
SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -1538,14 +1528,15 @@ internal sealed class RarVM : BitInput
|
||||
{
|
||||
if (pos < VM_MEMSIZE)
|
||||
{
|
||||
// Use Array.Copy for fast bulk memory operations instead of byte-by-byte loop
|
||||
// Calculate how much data can actually fit in VM memory
|
||||
int copyLength = Math.Min(dataSize, VM_MEMSIZE - pos);
|
||||
copyLength = Math.Min(copyLength, data.Length - offset);
|
||||
|
||||
if (copyLength > 0)
|
||||
//&& data!=Mem+Pos)
|
||||
//memmove(Mem+Pos,Data,Min(DataSize,VM_MEMSIZE-Pos));
|
||||
for (var i = 0; i < Math.Min(data.Length - offset, dataSize); i++)
|
||||
{
|
||||
Array.Copy(data, offset, Mem, pos, copyLength);
|
||||
if ((VM_MEMSIZE - pos) < i)
|
||||
{
|
||||
break;
|
||||
}
|
||||
Mem[pos + i] = data[offset + i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,79 +1,78 @@
|
||||
namespace SharpCompress.Compressors.Shrink
|
||||
namespace SharpCompress.Compressors.Shrink;
|
||||
|
||||
internal class BitStream
|
||||
{
|
||||
internal class BitStream
|
||||
private byte[] _src;
|
||||
private int _srcLen;
|
||||
private int _byteIdx;
|
||||
private int _bitIdx;
|
||||
private int _bitsLeft;
|
||||
private ulong _bitBuffer;
|
||||
private static uint[] _maskBits = new uint[17]
|
||||
{
|
||||
private byte[] _src;
|
||||
private int _srcLen;
|
||||
private int _byteIdx;
|
||||
private int _bitIdx;
|
||||
private int _bitsLeft;
|
||||
private ulong _bitBuffer;
|
||||
private static uint[] _maskBits = new uint[17]
|
||||
{
|
||||
0U,
|
||||
1U,
|
||||
3U,
|
||||
7U,
|
||||
15U,
|
||||
31U,
|
||||
63U,
|
||||
(uint)sbyte.MaxValue,
|
||||
(uint)byte.MaxValue,
|
||||
511U,
|
||||
1023U,
|
||||
2047U,
|
||||
4095U,
|
||||
8191U,
|
||||
16383U,
|
||||
(uint)short.MaxValue,
|
||||
(uint)ushort.MaxValue,
|
||||
};
|
||||
0U,
|
||||
1U,
|
||||
3U,
|
||||
7U,
|
||||
15U,
|
||||
31U,
|
||||
63U,
|
||||
(uint)sbyte.MaxValue,
|
||||
(uint)byte.MaxValue,
|
||||
511U,
|
||||
1023U,
|
||||
2047U,
|
||||
4095U,
|
||||
8191U,
|
||||
16383U,
|
||||
(uint)short.MaxValue,
|
||||
(uint)ushort.MaxValue,
|
||||
};
|
||||
|
||||
public BitStream(byte[] src, int srcLen)
|
||||
public BitStream(byte[] src, int srcLen)
|
||||
{
|
||||
_src = src;
|
||||
_srcLen = srcLen;
|
||||
_byteIdx = 0;
|
||||
_bitIdx = 0;
|
||||
}
|
||||
|
||||
public int BytesRead => (_byteIdx << 3) + _bitIdx;
|
||||
|
||||
private int NextByte()
|
||||
{
|
||||
if (_byteIdx >= _srcLen)
|
||||
{
|
||||
_src = src;
|
||||
_srcLen = srcLen;
|
||||
_byteIdx = 0;
|
||||
_bitIdx = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
public int BytesRead => (_byteIdx << 3) + _bitIdx;
|
||||
return _src[_byteIdx++];
|
||||
}
|
||||
|
||||
private int NextByte()
|
||||
public int NextBits(int nbits)
|
||||
{
|
||||
var result = 0;
|
||||
if (nbits > _bitsLeft)
|
||||
{
|
||||
if (_byteIdx >= _srcLen)
|
||||
int num;
|
||||
while (_bitsLeft <= 24 && (num = NextByte()) != 1234)
|
||||
{
|
||||
return 0;
|
||||
_bitBuffer |= (ulong)num << _bitsLeft;
|
||||
_bitsLeft += 8;
|
||||
}
|
||||
|
||||
return _src[_byteIdx++];
|
||||
}
|
||||
result = (int)((long)_bitBuffer & (long)_maskBits[nbits]);
|
||||
_bitBuffer >>= nbits;
|
||||
_bitsLeft -= nbits;
|
||||
return result;
|
||||
}
|
||||
|
||||
public int NextBits(int nbits)
|
||||
public bool Advance(int count)
|
||||
{
|
||||
if (_byteIdx > _srcLen)
|
||||
{
|
||||
var result = 0;
|
||||
if (nbits > _bitsLeft)
|
||||
{
|
||||
int num;
|
||||
while (_bitsLeft <= 24 && (num = NextByte()) != 1234)
|
||||
{
|
||||
_bitBuffer |= (ulong)num << _bitsLeft;
|
||||
_bitsLeft += 8;
|
||||
}
|
||||
}
|
||||
result = (int)((long)_bitBuffer & (long)_maskBits[nbits]);
|
||||
_bitBuffer >>= nbits;
|
||||
_bitsLeft -= nbits;
|
||||
return result;
|
||||
}
|
||||
|
||||
public bool Advance(int count)
|
||||
{
|
||||
if (_byteIdx > _srcLen)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,275 +1,297 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Compressors.Shrink
|
||||
namespace SharpCompress.Compressors.Shrink;
|
||||
|
||||
public class HwUnshrink
|
||||
{
|
||||
public class HwUnshrink
|
||||
private const int MIN_CODE_SIZE = 9;
|
||||
private const int MAX_CODE_SIZE = 13;
|
||||
|
||||
private const ushort MAX_CODE = (ushort)((1U << MAX_CODE_SIZE) - 1);
|
||||
private const ushort INVALID_CODE = ushort.MaxValue;
|
||||
private const ushort CONTROL_CODE = 256;
|
||||
private const ushort INC_CODE_SIZE = 1;
|
||||
private const ushort PARTIAL_CLEAR = 2;
|
||||
|
||||
private const int HASH_BITS = MAX_CODE_SIZE + 1; // For a load factor of 0.5.
|
||||
private const int HASHTAB_SIZE = 1 << HASH_BITS;
|
||||
private const ushort UNKNOWN_LEN = ushort.MaxValue;
|
||||
|
||||
private struct CodeTabEntry
|
||||
{
|
||||
private const int MIN_CODE_SIZE = 9;
|
||||
private const int MAX_CODE_SIZE = 13;
|
||||
public int prefixCode; // INVALID_CODE means the entry is invalid.
|
||||
public byte extByte;
|
||||
public ushort len;
|
||||
public int lastDstPos;
|
||||
}
|
||||
|
||||
private const ushort MAX_CODE = (ushort)((1U << MAX_CODE_SIZE) - 1);
|
||||
private const ushort INVALID_CODE = ushort.MaxValue;
|
||||
private const ushort CONTROL_CODE = 256;
|
||||
private const ushort INC_CODE_SIZE = 1;
|
||||
private const ushort PARTIAL_CLEAR = 2;
|
||||
|
||||
private const int HASH_BITS = MAX_CODE_SIZE + 1; // For a load factor of 0.5.
|
||||
private const int HASHTAB_SIZE = 1 << HASH_BITS;
|
||||
private const ushort UNKNOWN_LEN = ushort.MaxValue;
|
||||
|
||||
private struct CodeTabEntry
|
||||
private static void CodeTabInit(CodeTabEntry[] codeTab)
|
||||
{
|
||||
for (var i = 0; i <= byte.MaxValue; i++)
|
||||
{
|
||||
public int prefixCode; // INVALID_CODE means the entry is invalid.
|
||||
public byte extByte;
|
||||
public ushort len;
|
||||
public int lastDstPos;
|
||||
codeTab[i].prefixCode = (ushort)i;
|
||||
codeTab[i].extByte = (byte)i;
|
||||
codeTab[i].len = 1;
|
||||
}
|
||||
|
||||
private static void CodeTabInit(CodeTabEntry[] codeTab)
|
||||
for (var i = byte.MaxValue + 1; i <= MAX_CODE; i++)
|
||||
{
|
||||
for (var i = 0; i <= byte.MaxValue; i++)
|
||||
{
|
||||
codeTab[i].prefixCode = (ushort)i;
|
||||
codeTab[i].extByte = (byte)i;
|
||||
codeTab[i].len = 1;
|
||||
}
|
||||
codeTab[i].prefixCode = INVALID_CODE;
|
||||
}
|
||||
}
|
||||
|
||||
for (var i = byte.MaxValue + 1; i <= MAX_CODE; i++)
|
||||
private static void UnshrinkPartialClear(CodeTabEntry[] codeTab, ref CodeQueue queue)
|
||||
{
|
||||
var isPrefix = new bool[MAX_CODE + 1];
|
||||
int codeQueueSize;
|
||||
|
||||
// Scan for codes that have been used as a prefix.
|
||||
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
|
||||
{
|
||||
if (codeTab[i].prefixCode != INVALID_CODE)
|
||||
{
|
||||
isPrefix[codeTab[i].prefixCode] = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Clear "non-prefix" codes in the table; populate the code queue.
|
||||
codeQueueSize = 0;
|
||||
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
|
||||
{
|
||||
if (!isPrefix[i])
|
||||
{
|
||||
codeTab[i].prefixCode = INVALID_CODE;
|
||||
queue.codes[codeQueueSize++] = (ushort)i;
|
||||
}
|
||||
}
|
||||
|
||||
private static void UnshrinkPartialClear(CodeTabEntry[] codeTab, ref CodeQueue queue)
|
||||
queue.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
|
||||
queue.nextIdx = 0;
|
||||
}
|
||||
|
||||
private static bool ReadCode(
|
||||
BitStream stream,
|
||||
ref int codeSize,
|
||||
CodeTabEntry[] codeTab,
|
||||
ref CodeQueue queue,
|
||||
out int nextCode
|
||||
)
|
||||
{
|
||||
int code,
|
||||
controlCode;
|
||||
|
||||
code = (int)stream.NextBits(codeSize);
|
||||
if (!stream.Advance(codeSize))
|
||||
{
|
||||
var isPrefix = new bool[MAX_CODE + 1];
|
||||
int codeQueueSize;
|
||||
|
||||
// Scan for codes that have been used as a prefix.
|
||||
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
|
||||
{
|
||||
if (codeTab[i].prefixCode != INVALID_CODE)
|
||||
{
|
||||
isPrefix[codeTab[i].prefixCode] = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Clear "non-prefix" codes in the table; populate the code queue.
|
||||
codeQueueSize = 0;
|
||||
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
|
||||
{
|
||||
if (!isPrefix[i])
|
||||
{
|
||||
codeTab[i].prefixCode = INVALID_CODE;
|
||||
queue.codes[codeQueueSize++] = (ushort)i;
|
||||
}
|
||||
}
|
||||
|
||||
queue.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
|
||||
queue.nextIdx = 0;
|
||||
nextCode = INVALID_CODE;
|
||||
return false;
|
||||
}
|
||||
|
||||
private static bool ReadCode(
|
||||
BitStream stream,
|
||||
ref int codeSize,
|
||||
CodeTabEntry[] codeTab,
|
||||
ref CodeQueue queue,
|
||||
out int nextCode
|
||||
)
|
||||
// Handle regular codes (the common case).
|
||||
if (code != CONTROL_CODE)
|
||||
{
|
||||
int code,
|
||||
controlCode;
|
||||
|
||||
code = (int)stream.NextBits(codeSize);
|
||||
if (!stream.Advance(codeSize))
|
||||
{
|
||||
nextCode = INVALID_CODE;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Handle regular codes (the common case).
|
||||
if (code != CONTROL_CODE)
|
||||
{
|
||||
nextCode = code;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Handle control codes.
|
||||
controlCode = (ushort)stream.NextBits(codeSize);
|
||||
if (!stream.Advance(codeSize))
|
||||
{
|
||||
nextCode = INVALID_CODE;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (controlCode == INC_CODE_SIZE && codeSize < MAX_CODE_SIZE)
|
||||
{
|
||||
codeSize++;
|
||||
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
|
||||
}
|
||||
|
||||
if (controlCode == PARTIAL_CLEAR)
|
||||
{
|
||||
UnshrinkPartialClear(codeTab, ref queue);
|
||||
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
|
||||
}
|
||||
nextCode = code;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Handle control codes.
|
||||
controlCode = (ushort)stream.NextBits(codeSize);
|
||||
if (!stream.Advance(codeSize))
|
||||
{
|
||||
nextCode = INVALID_CODE;
|
||||
return true;
|
||||
}
|
||||
|
||||
private static void CopyFromPrevPos(byte[] dst, int prevPos, int dstPos, int len)
|
||||
if (controlCode == INC_CODE_SIZE && codeSize < MAX_CODE_SIZE)
|
||||
{
|
||||
if (dstPos + len > dst.Length)
|
||||
{
|
||||
// Not enough room in dst for the sloppy copy below.
|
||||
Array.Copy(dst, prevPos, dst, dstPos, len);
|
||||
return;
|
||||
}
|
||||
|
||||
if (prevPos + len > dstPos)
|
||||
{
|
||||
// Benign one-byte overlap possible in the KwKwK case.
|
||||
//assert(prevPos + len == dstPos + 1);
|
||||
//assert(dst[prevPos] == dst[prevPos + len - 1]);
|
||||
}
|
||||
|
||||
Buffer.BlockCopy(dst, prevPos, dst, dstPos, len);
|
||||
codeSize++;
|
||||
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
|
||||
}
|
||||
|
||||
private static UnshrnkStatus OutputCode(
|
||||
int code,
|
||||
byte[] dst,
|
||||
int dstPos,
|
||||
int dstCap,
|
||||
int prevCode,
|
||||
CodeTabEntry[] codeTab,
|
||||
ref CodeQueue queue,
|
||||
out byte firstByte,
|
||||
out int len
|
||||
)
|
||||
if (controlCode == PARTIAL_CLEAR)
|
||||
{
|
||||
int prefixCode;
|
||||
UnshrinkPartialClear(codeTab, ref queue);
|
||||
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
|
||||
}
|
||||
|
||||
//assert(code <= MAX_CODE && code != CONTROL_CODE);
|
||||
//assert(dstPos < dstCap);
|
||||
nextCode = INVALID_CODE;
|
||||
return true;
|
||||
}
|
||||
|
||||
private static void CopyFromPrevPos(byte[] dst, int prevPos, int dstPos, int len)
|
||||
{
|
||||
if (dstPos + len > dst.Length)
|
||||
{
|
||||
// Not enough room in dst for the sloppy copy below.
|
||||
Array.Copy(dst, prevPos, dst, dstPos, len);
|
||||
return;
|
||||
}
|
||||
|
||||
if (prevPos + len > dstPos)
|
||||
{
|
||||
// Benign one-byte overlap possible in the KwKwK case.
|
||||
//assert(prevPos + len == dstPos + 1);
|
||||
//assert(dst[prevPos] == dst[prevPos + len - 1]);
|
||||
}
|
||||
|
||||
Buffer.BlockCopy(dst, prevPos, dst, dstPos, len);
|
||||
}
|
||||
|
||||
private static UnshrnkStatus OutputCode(
|
||||
int code,
|
||||
byte[] dst,
|
||||
int dstPos,
|
||||
int dstCap,
|
||||
int prevCode,
|
||||
CodeTabEntry[] codeTab,
|
||||
ref CodeQueue queue,
|
||||
out byte firstByte,
|
||||
out int len
|
||||
)
|
||||
{
|
||||
int prefixCode;
|
||||
|
||||
//assert(code <= MAX_CODE && code != CONTROL_CODE);
|
||||
//assert(dstPos < dstCap);
|
||||
firstByte = 0;
|
||||
if (code <= byte.MaxValue)
|
||||
{
|
||||
// Output literal byte.
|
||||
firstByte = (byte)code;
|
||||
len = 1;
|
||||
dst[dstPos] = (byte)code;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
if (codeTab[code].prefixCode == INVALID_CODE || codeTab[code].prefixCode == code)
|
||||
{
|
||||
// Reject invalid codes. Self-referential codes may exist in the table but cannot be used.
|
||||
firstByte = 0;
|
||||
if (code <= byte.MaxValue)
|
||||
{
|
||||
// Output literal byte.
|
||||
firstByte = (byte)code;
|
||||
len = 1;
|
||||
dst[dstPos] = (byte)code;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
len = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
if (codeTab[code].prefixCode == INVALID_CODE || codeTab[code].prefixCode == code)
|
||||
{
|
||||
// Reject invalid codes. Self-referential codes may exist in the table but cannot be used.
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
if (codeTab[code].len != UNKNOWN_LEN)
|
||||
{
|
||||
// Output string with known length (the common case).
|
||||
if (dstCap - dstPos < codeTab[code].len)
|
||||
{
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
CopyFromPrevPos(dst, codeTab[code].lastDstPos, dstPos, codeTab[code].len);
|
||||
firstByte = dst[dstPos];
|
||||
len = codeTab[code].len;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
// Output a string of unknown length.
|
||||
//assert(codeTab[code].len == UNKNOWN_LEN);
|
||||
prefixCode = codeTab[code].prefixCode;
|
||||
// assert(prefixCode > CONTROL_CODE);
|
||||
|
||||
if (prefixCode == queue.codes[queue.nextIdx])
|
||||
{
|
||||
// The prefix code hasn't been added yet, but we were just about to: the KwKwK case.
|
||||
//assert(codeTab[prevCode].prefixCode != INVALID_CODE);
|
||||
codeTab[prefixCode].prefixCode = prevCode;
|
||||
codeTab[prefixCode].extByte = firstByte;
|
||||
codeTab[prefixCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[prefixCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
dst[dstPos] = firstByte;
|
||||
}
|
||||
else if (codeTab[prefixCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// The prefix code is still invalid.
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
// Output the prefix string, then the extension byte.
|
||||
len = codeTab[prefixCode].len + 1;
|
||||
if (dstCap - dstPos < len)
|
||||
if (codeTab[code].len != UNKNOWN_LEN)
|
||||
{
|
||||
// Output string with known length (the common case).
|
||||
if (dstCap - dstPos < codeTab[code].len)
|
||||
{
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
CopyFromPrevPos(dst, codeTab[prefixCode].lastDstPos, dstPos, codeTab[prefixCode].len);
|
||||
dst[dstPos + len - 1] = codeTab[code].extByte;
|
||||
CopyFromPrevPos(dst, codeTab[code].lastDstPos, dstPos, codeTab[code].len);
|
||||
firstByte = dst[dstPos];
|
||||
|
||||
// Update the code table now that the string has a length and pos.
|
||||
//assert(prevCode != code);
|
||||
codeTab[code].len = (ushort)len;
|
||||
codeTab[code].lastDstPos = dstPos;
|
||||
|
||||
len = codeTab[code].len;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
public static UnshrnkStatus Unshrink(
|
||||
byte[] src,
|
||||
int srcLen,
|
||||
out int srcUsed,
|
||||
byte[] dst,
|
||||
int dstCap,
|
||||
out int dstUsed
|
||||
)
|
||||
// Output a string of unknown length.
|
||||
//assert(codeTab[code].len == UNKNOWN_LEN);
|
||||
prefixCode = codeTab[code].prefixCode;
|
||||
// assert(prefixCode > CONTROL_CODE);
|
||||
|
||||
if (prefixCode == queue.codes[queue.nextIdx])
|
||||
{
|
||||
var codeTab = new CodeTabEntry[HASHTAB_SIZE];
|
||||
var queue = new CodeQueue();
|
||||
var stream = new BitStream(src, srcLen);
|
||||
int codeSize,
|
||||
dstPos,
|
||||
len;
|
||||
int currCode,
|
||||
prevCode,
|
||||
newCode;
|
||||
byte firstByte;
|
||||
// The prefix code hasn't been added yet, but we were just about to: the KwKwK case.
|
||||
//assert(codeTab[prevCode].prefixCode != INVALID_CODE);
|
||||
codeTab[prefixCode].prefixCode = prevCode;
|
||||
codeTab[prefixCode].extByte = firstByte;
|
||||
codeTab[prefixCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[prefixCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
dst[dstPos] = firstByte;
|
||||
}
|
||||
else if (codeTab[prefixCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// The prefix code is still invalid.
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
CodeTabInit(codeTab);
|
||||
CodeQueueInit(ref queue);
|
||||
codeSize = MIN_CODE_SIZE;
|
||||
dstPos = 0;
|
||||
// Output the prefix string, then the extension byte.
|
||||
len = codeTab[prefixCode].len + 1;
|
||||
if (dstCap - dstPos < len)
|
||||
{
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
// Handle the first code separately since there is no previous code.
|
||||
if (!ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
|
||||
CopyFromPrevPos(dst, codeTab[prefixCode].lastDstPos, dstPos, codeTab[prefixCode].len);
|
||||
dst[dstPos + len - 1] = codeTab[code].extByte;
|
||||
firstByte = dst[dstPos];
|
||||
|
||||
// Update the code table now that the string has a length and pos.
|
||||
//assert(prevCode != code);
|
||||
codeTab[code].len = (ushort)len;
|
||||
codeTab[code].lastDstPos = dstPos;
|
||||
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
public static UnshrnkStatus Unshrink(
|
||||
byte[] src,
|
||||
int srcLen,
|
||||
out int srcUsed,
|
||||
byte[] dst,
|
||||
int dstCap,
|
||||
out int dstUsed
|
||||
)
|
||||
{
|
||||
var codeTab = new CodeTabEntry[HASHTAB_SIZE];
|
||||
var queue = new CodeQueue();
|
||||
var stream = new BitStream(src, srcLen);
|
||||
int codeSize,
|
||||
dstPos,
|
||||
len;
|
||||
int currCode,
|
||||
prevCode,
|
||||
newCode;
|
||||
byte firstByte;
|
||||
|
||||
CodeTabInit(codeTab);
|
||||
CodeQueueInit(ref queue);
|
||||
codeSize = MIN_CODE_SIZE;
|
||||
dstPos = 0;
|
||||
|
||||
// Handle the first code separately since there is no previous code.
|
||||
if (!ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
//assert(currCode != CONTROL_CODE);
|
||||
if (currCode > byte.MaxValue)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Error; // The first code must be a literal.
|
||||
}
|
||||
|
||||
if (dstPos == dstCap)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
firstByte = (byte)currCode;
|
||||
dst[dstPos] = (byte)currCode;
|
||||
codeTab[currCode].lastDstPos = dstPos;
|
||||
dstPos++;
|
||||
|
||||
prevCode = currCode;
|
||||
while (ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
|
||||
{
|
||||
if (currCode == INVALID_CODE)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
//assert(currCode != CONTROL_CODE);
|
||||
if (currCode > byte.MaxValue)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Error; // The first code must be a literal.
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
if (dstPos == dstCap)
|
||||
@@ -279,153 +301,130 @@ namespace SharpCompress.Compressors.Shrink
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
firstByte = (byte)currCode;
|
||||
dst[dstPos] = (byte)currCode;
|
||||
codeTab[currCode].lastDstPos = dstPos;
|
||||
dstPos++;
|
||||
|
||||
prevCode = currCode;
|
||||
while (ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
|
||||
// Handle KwKwK: next code used before being added.
|
||||
if (currCode == queue.codes[queue.nextIdx])
|
||||
{
|
||||
if (currCode == INVALID_CODE)
|
||||
if (codeTab[prevCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// The previous code is no longer valid.
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
if (dstPos == dstCap)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
// Handle KwKwK: next code used before being added.
|
||||
if (currCode == queue.codes[queue.nextIdx])
|
||||
{
|
||||
if (codeTab[prevCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// The previous code is no longer valid.
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
// Extend the previous code with its first byte.
|
||||
//assert(currCode != prevCode);
|
||||
codeTab[currCode].prefixCode = prevCode;
|
||||
codeTab[currCode].extByte = firstByte;
|
||||
codeTab[currCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[currCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
//assert(dstPos < dstCap);
|
||||
dst[dstPos] = firstByte;
|
||||
}
|
||||
|
||||
// Output the string represented by the current code.
|
||||
var status = OutputCode(
|
||||
currCode,
|
||||
dst,
|
||||
dstPos,
|
||||
dstCap,
|
||||
prevCode,
|
||||
codeTab,
|
||||
ref queue,
|
||||
out firstByte,
|
||||
out len
|
||||
);
|
||||
if (status != UnshrnkStatus.Ok)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return status;
|
||||
}
|
||||
|
||||
// Verify that the output matches walking the prefixes.
|
||||
var c = currCode;
|
||||
for (var i = 0; i < len; i++)
|
||||
{
|
||||
// assert(codeTab[c].len == len - i);
|
||||
//assert(codeTab[c].extByte == dst[dstPos + len - i - 1]);
|
||||
c = codeTab[c].prefixCode;
|
||||
}
|
||||
|
||||
// Add a new code to the string table if there's room.
|
||||
// The string is the previous code's string extended with the first byte of the current code's string.
|
||||
newCode = CodeQueueRemoveNext(ref queue);
|
||||
if (newCode != INVALID_CODE)
|
||||
{
|
||||
//assert(codeTab[prevCode].lastDstPos < dstPos);
|
||||
codeTab[newCode].prefixCode = prevCode;
|
||||
codeTab[newCode].extByte = firstByte;
|
||||
codeTab[newCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[newCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
|
||||
if (codeTab[prevCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// prevCode was invalidated in a partial clearing. Until that code is re-used, the
|
||||
// string represented by newCode is indeterminate.
|
||||
codeTab[newCode].len = UNKNOWN_LEN;
|
||||
}
|
||||
// If prevCode was invalidated in a partial clearing, it's possible that newCode == prevCode,
|
||||
// in which case it will never be used or cleared.
|
||||
}
|
||||
|
||||
codeTab[currCode].lastDstPos = dstPos;
|
||||
dstPos += len;
|
||||
|
||||
prevCode = currCode;
|
||||
// Extend the previous code with its first byte.
|
||||
//assert(currCode != prevCode);
|
||||
codeTab[currCode].prefixCode = prevCode;
|
||||
codeTab[currCode].extByte = firstByte;
|
||||
codeTab[currCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[currCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
//assert(dstPos < dstCap);
|
||||
dst[dstPos] = firstByte;
|
||||
}
|
||||
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = dstPos;
|
||||
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
public enum UnshrnkStatus
|
||||
{
|
||||
Ok,
|
||||
Full,
|
||||
Error,
|
||||
}
|
||||
|
||||
private struct CodeQueue
|
||||
{
|
||||
public int nextIdx;
|
||||
public ushort[] codes;
|
||||
}
|
||||
|
||||
private static void CodeQueueInit(ref CodeQueue q)
|
||||
{
|
||||
int codeQueueSize;
|
||||
ushort code;
|
||||
|
||||
codeQueueSize = 0;
|
||||
q.codes = new ushort[MAX_CODE - CONTROL_CODE + 2];
|
||||
|
||||
for (code = CONTROL_CODE + 1; code <= MAX_CODE; code++)
|
||||
// Output the string represented by the current code.
|
||||
var status = OutputCode(
|
||||
currCode,
|
||||
dst,
|
||||
dstPos,
|
||||
dstCap,
|
||||
prevCode,
|
||||
codeTab,
|
||||
ref queue,
|
||||
out firstByte,
|
||||
out len
|
||||
);
|
||||
if (status != UnshrnkStatus.Ok)
|
||||
{
|
||||
q.codes[codeQueueSize++] = code;
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return status;
|
||||
}
|
||||
|
||||
//assert(codeQueueSize < q.codes.Length);
|
||||
q.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
|
||||
q.nextIdx = 0;
|
||||
}
|
||||
|
||||
private static ushort CodeQueueNext(ref CodeQueue q) =>
|
||||
//assert(q.nextIdx < q.codes.Length);
|
||||
q.codes[q.nextIdx];
|
||||
|
||||
private static ushort CodeQueueRemoveNext(ref CodeQueue q)
|
||||
{
|
||||
var code = CodeQueueNext(ref q);
|
||||
if (code != INVALID_CODE)
|
||||
// Verify that the output matches walking the prefixes.
|
||||
var c = currCode;
|
||||
for (var i = 0; i < len; i++)
|
||||
{
|
||||
q.nextIdx++;
|
||||
// assert(codeTab[c].len == len - i);
|
||||
//assert(codeTab[c].extByte == dst[dstPos + len - i - 1]);
|
||||
c = codeTab[c].prefixCode;
|
||||
}
|
||||
return code;
|
||||
|
||||
// Add a new code to the string table if there's room.
|
||||
// The string is the previous code's string extended with the first byte of the current code's string.
|
||||
newCode = CodeQueueRemoveNext(ref queue);
|
||||
if (newCode != INVALID_CODE)
|
||||
{
|
||||
//assert(codeTab[prevCode].lastDstPos < dstPos);
|
||||
codeTab[newCode].prefixCode = prevCode;
|
||||
codeTab[newCode].extByte = firstByte;
|
||||
codeTab[newCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[newCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
|
||||
if (codeTab[prevCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// prevCode was invalidated in a partial clearing. Until that code is re-used, the
|
||||
// string represented by newCode is indeterminate.
|
||||
codeTab[newCode].len = UNKNOWN_LEN;
|
||||
}
|
||||
// If prevCode was invalidated in a partial clearing, it's possible that newCode == prevCode,
|
||||
// in which case it will never be used or cleared.
|
||||
}
|
||||
|
||||
codeTab[currCode].lastDstPos = dstPos;
|
||||
dstPos += len;
|
||||
|
||||
prevCode = currCode;
|
||||
}
|
||||
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = dstPos;
|
||||
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
public enum UnshrnkStatus
|
||||
{
|
||||
Ok,
|
||||
Full,
|
||||
Error,
|
||||
}
|
||||
|
||||
private struct CodeQueue
|
||||
{
|
||||
public int nextIdx;
|
||||
public ushort[] codes;
|
||||
}
|
||||
|
||||
private static void CodeQueueInit(ref CodeQueue q)
|
||||
{
|
||||
int codeQueueSize;
|
||||
ushort code;
|
||||
|
||||
codeQueueSize = 0;
|
||||
q.codes = new ushort[MAX_CODE - CONTROL_CODE + 2];
|
||||
|
||||
for (code = CONTROL_CODE + 1; code <= MAX_CODE; code++)
|
||||
{
|
||||
q.codes[codeQueueSize++] = code;
|
||||
}
|
||||
|
||||
//assert(codeQueueSize < q.codes.Length);
|
||||
q.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
|
||||
q.nextIdx = 0;
|
||||
}
|
||||
|
||||
private static ushort CodeQueueNext(ref CodeQueue q) =>
|
||||
//assert(q.nextIdx < q.codes.Length);
|
||||
q.codes[q.nextIdx];
|
||||
|
||||
private static ushort CodeQueueRemoveNext(ref CodeQueue q)
|
||||
{
|
||||
var code = CodeQueueNext(ref q);
|
||||
if (code != INVALID_CODE)
|
||||
{
|
||||
q.nextIdx++;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,139 +7,138 @@ using System.Threading.Tasks;
|
||||
using SharpCompress.Compressors.RLE90;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Squeezed
|
||||
namespace SharpCompress.Compressors.Squeezed;
|
||||
|
||||
public class SqueezeStream : Stream, IStreamStack
|
||||
{
|
||||
public class SqueezeStream : Stream, IStreamStack
|
||||
#if DEBUG_STREAMS
|
||||
long IStreamStack.InstanceId { get; set; }
|
||||
#endif
|
||||
int IStreamStack.DefaultBufferSize { get; set; }
|
||||
|
||||
Stream IStreamStack.BaseStream() => _stream;
|
||||
|
||||
int IStreamStack.BufferSize
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
int IStreamStack.BufferPosition
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
|
||||
void IStreamStack.SetPosition(long position) { }
|
||||
|
||||
private readonly Stream _stream;
|
||||
private readonly int _compressedSize;
|
||||
private const int NUMVALS = 257;
|
||||
private const int SPEOF = 256;
|
||||
private bool _processed = false;
|
||||
|
||||
public SqueezeStream(Stream stream, int compressedSize)
|
||||
{
|
||||
_stream = stream;
|
||||
_compressedSize = compressedSize;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugConstruct(typeof(SqueezeStream));
|
||||
#endif
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
long IStreamStack.InstanceId { get; set; }
|
||||
this.DebugDispose(typeof(SqueezeStream));
|
||||
#endif
|
||||
int IStreamStack.DefaultBufferSize { get; set; }
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
Stream IStreamStack.BaseStream() => _stream;
|
||||
public override bool CanRead => true;
|
||||
|
||||
int IStreamStack.BufferSize
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotImplementedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _stream.Position;
|
||||
set => throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotImplementedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_processed)
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
return 0;
|
||||
}
|
||||
int IStreamStack.BufferPosition
|
||||
_processed = true;
|
||||
using var binaryReader = new BinaryReader(_stream);
|
||||
|
||||
// Read numnodes (equivalent to convert_u16!(numnodes, buf))
|
||||
var numnodes = binaryReader.ReadUInt16();
|
||||
|
||||
// Validation: numnodes should be within bounds
|
||||
if (numnodes >= NUMVALS)
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
throw new InvalidDataException(
|
||||
$"Invalid number of nodes {numnodes} (max {NUMVALS - 1})"
|
||||
);
|
||||
}
|
||||
|
||||
void IStreamStack.SetPosition(long position) { }
|
||||
|
||||
private readonly Stream _stream;
|
||||
private readonly int _compressedSize;
|
||||
private const int NUMVALS = 257;
|
||||
private const int SPEOF = 256;
|
||||
private bool _processed = false;
|
||||
|
||||
public SqueezeStream(Stream stream, int compressedSize)
|
||||
// Handle the case where no nodes exist
|
||||
if (numnodes == 0)
|
||||
{
|
||||
_stream = stream;
|
||||
_compressedSize = compressedSize;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugConstruct(typeof(SqueezeStream));
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
// Build dnode (tree of nodes)
|
||||
var dnode = new int[numnodes, 2];
|
||||
for (int j = 0; j < numnodes; j++)
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(SqueezeStream));
|
||||
#endif
|
||||
base.Dispose(disposing);
|
||||
dnode[j, 0] = binaryReader.ReadInt16();
|
||||
dnode[j, 1] = binaryReader.ReadInt16();
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
// Initialize BitReader for reading bits
|
||||
var bitReader = new BitReader(_stream);
|
||||
var decoded = new List<byte>();
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotImplementedException();
|
||||
|
||||
public override long Position
|
||||
int i = 0;
|
||||
// Decode the buffer using the dnode tree
|
||||
while (true)
|
||||
{
|
||||
get => _stream.Position;
|
||||
set => throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotImplementedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_processed)
|
||||
i = dnode[i, bitReader.ReadBit() ? 1 : 0];
|
||||
if (i < 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
_processed = true;
|
||||
using var binaryReader = new BinaryReader(_stream);
|
||||
|
||||
// Read numnodes (equivalent to convert_u16!(numnodes, buf))
|
||||
var numnodes = binaryReader.ReadUInt16();
|
||||
|
||||
// Validation: numnodes should be within bounds
|
||||
if (numnodes >= NUMVALS)
|
||||
{
|
||||
throw new InvalidDataException(
|
||||
$"Invalid number of nodes {numnodes} (max {NUMVALS - 1})"
|
||||
);
|
||||
}
|
||||
|
||||
// Handle the case where no nodes exist
|
||||
if (numnodes == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Build dnode (tree of nodes)
|
||||
var dnode = new int[numnodes, 2];
|
||||
for (int j = 0; j < numnodes; j++)
|
||||
{
|
||||
dnode[j, 0] = binaryReader.ReadInt16();
|
||||
dnode[j, 1] = binaryReader.ReadInt16();
|
||||
}
|
||||
|
||||
// Initialize BitReader for reading bits
|
||||
var bitReader = new BitReader(_stream);
|
||||
var decoded = new List<byte>();
|
||||
|
||||
int i = 0;
|
||||
// Decode the buffer using the dnode tree
|
||||
while (true)
|
||||
{
|
||||
i = dnode[i, bitReader.ReadBit() ? 1 : 0];
|
||||
if (i < 0)
|
||||
i = (short)-(i + 1);
|
||||
if (i == SPEOF)
|
||||
{
|
||||
i = (short)-(i + 1);
|
||||
if (i == SPEOF)
|
||||
{
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
decoded.Add((byte)i);
|
||||
i = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
decoded.Add((byte)i);
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack the decoded buffer using the RLE class
|
||||
var unpacked = RLE.UnpackRLE(decoded.ToArray());
|
||||
unpacked.CopyTo(buffer, 0);
|
||||
return unpacked.Count();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotImplementedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotImplementedException();
|
||||
// Unpack the decoded buffer using the RLE class
|
||||
var unpacked = RLE.UnpackRLE(decoded.ToArray());
|
||||
unpacked.CopyTo(buffer, 0);
|
||||
return unpacked.Count();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotImplementedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
@@ -22,7 +22,9 @@ public class XZFooter
|
||||
|
||||
public static XZFooter FromStream(Stream stream)
|
||||
{
|
||||
var footer = new XZFooter(new BinaryReader(stream, Encoding.UTF8, true));
|
||||
var footer = new XZFooter(
|
||||
new BinaryReader(SharpCompressStream.Create(stream, leaveOpen: true), Encoding.UTF8)
|
||||
);
|
||||
footer.Process();
|
||||
return footer;
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ public class XZHeader
|
||||
|
||||
public static XZHeader FromStream(Stream stream)
|
||||
{
|
||||
var header = new XZHeader(new BinaryReader(stream, Encoding.UTF8, true));
|
||||
var header = new XZHeader(
|
||||
new BinaryReader(SharpCompressStream.Create(stream, leaveOpen: true), Encoding.UTF8)
|
||||
);
|
||||
header.Process();
|
||||
return header;
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ public class XZIndex
|
||||
public static XZIndex FromStream(Stream stream, bool indexMarkerAlreadyVerified)
|
||||
{
|
||||
var index = new XZIndex(
|
||||
new BinaryReader(stream, Encoding.UTF8, true),
|
||||
new BinaryReader(SharpCompressStream.Create(stream, leaveOpen: true), Encoding.UTF8),
|
||||
indexMarkerAlreadyVerified
|
||||
);
|
||||
index.Process();
|
||||
|
||||
311
src/SharpCompress/Compressors/ZStandard/BitOperations.cs
Normal file
311
src/SharpCompress/Compressors/ZStandard/BitOperations.cs
Normal file
@@ -0,0 +1,311 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
|
||||
#if !NETCOREAPP3_0_OR_GREATER
|
||||
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
// Some routines inspired by the Stanford Bit Twiddling Hacks by Sean Eron Anderson:
|
||||
// http://graphics.stanford.edu/~seander/bithacks.html
|
||||
|
||||
namespace System.Numerics
|
||||
{
|
||||
/// <summary>
|
||||
/// Utility methods for intrinsic bit-twiddling operations.
|
||||
/// The methods use hardware intrinsics when available on the underlying platform,
|
||||
/// otherwise they use optimized software fallbacks.
|
||||
/// </summary>
|
||||
public static unsafe class BitOperations
|
||||
{
|
||||
// hack: should be public because of inline
|
||||
public static readonly byte* TrailingZeroCountDeBruijn = GetArrayPointer(
|
||||
new byte[]
|
||||
{
|
||||
00,
|
||||
01,
|
||||
28,
|
||||
02,
|
||||
29,
|
||||
14,
|
||||
24,
|
||||
03,
|
||||
30,
|
||||
22,
|
||||
20,
|
||||
15,
|
||||
25,
|
||||
17,
|
||||
04,
|
||||
08,
|
||||
31,
|
||||
27,
|
||||
13,
|
||||
23,
|
||||
21,
|
||||
19,
|
||||
16,
|
||||
07,
|
||||
26,
|
||||
12,
|
||||
18,
|
||||
06,
|
||||
11,
|
||||
05,
|
||||
10,
|
||||
09,
|
||||
}
|
||||
);
|
||||
|
||||
// hack: should be public because of inline
|
||||
public static readonly byte* Log2DeBruijn = GetArrayPointer(
|
||||
new byte[]
|
||||
{
|
||||
00,
|
||||
09,
|
||||
01,
|
||||
10,
|
||||
13,
|
||||
21,
|
||||
02,
|
||||
29,
|
||||
11,
|
||||
14,
|
||||
16,
|
||||
18,
|
||||
22,
|
||||
25,
|
||||
03,
|
||||
30,
|
||||
08,
|
||||
12,
|
||||
20,
|
||||
28,
|
||||
15,
|
||||
17,
|
||||
24,
|
||||
07,
|
||||
19,
|
||||
27,
|
||||
23,
|
||||
06,
|
||||
26,
|
||||
05,
|
||||
04,
|
||||
31,
|
||||
}
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Returns the integer (floor) log of the specified value, base 2.
|
||||
/// Note that by convention, input value 0 returns 0 since log(0) is undefined.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int Log2(uint value)
|
||||
{
|
||||
// The 0->0 contract is fulfilled by setting the LSB to 1.
|
||||
// Log(1) is 0, and setting the LSB for values > 1 does not change the log2 result.
|
||||
value |= 1;
|
||||
|
||||
// value lzcnt actual expected
|
||||
// ..0001 31 31-31 0
|
||||
// ..0010 30 31-30 1
|
||||
// 0010.. 2 31-2 29
|
||||
// 0100.. 1 31-1 30
|
||||
// 1000.. 0 31-0 31
|
||||
|
||||
// Fallback contract is 0->0
|
||||
// No AggressiveInlining due to large method size
|
||||
// Has conventional contract 0->0 (Log(0) is undefined)
|
||||
|
||||
// Fill trailing zeros with ones, eg 00010010 becomes 00011111
|
||||
value |= value >> 01;
|
||||
value |= value >> 02;
|
||||
value |= value >> 04;
|
||||
value |= value >> 08;
|
||||
value |= value >> 16;
|
||||
|
||||
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
|
||||
return Log2DeBruijn[
|
||||
// Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_1100_0100_1010_1100_1101_1101u
|
||||
(int)((value * 0x07C4ACDDu) >> 27)
|
||||
];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns the integer (floor) log of the specified value, base 2.
|
||||
/// Note that by convention, input value 0 returns 0 since log(0) is undefined.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int Log2(ulong value)
|
||||
{
|
||||
value |= 1;
|
||||
|
||||
uint hi = (uint)(value >> 32);
|
||||
|
||||
if (hi == 0)
|
||||
{
|
||||
return Log2((uint)value);
|
||||
}
|
||||
|
||||
return 32 + Log2(hi);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in an integer value.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(int value) => TrailingZeroCount((uint)value);
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in an integer value.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(uint value)
|
||||
{
|
||||
// Unguarded fallback contract is 0->0, BSF contract is 0->undefined
|
||||
if (value == 0)
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
|
||||
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
|
||||
return TrailingZeroCountDeBruijn[
|
||||
// Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_0111_1100_1011_0101_0011_0001u
|
||||
(int)(((value & (uint)-(int)value) * 0x077CB531u) >> 27)
|
||||
]; // Multi-cast mitigates redundant conv.u8
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(long value) => TrailingZeroCount((ulong)value);
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(ulong value)
|
||||
{
|
||||
uint lo = (uint)value;
|
||||
|
||||
if (lo == 0)
|
||||
{
|
||||
return 32 + TrailingZeroCount((uint)(value >> 32));
|
||||
}
|
||||
|
||||
return TrailingZeroCount(lo);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value left by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROL.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..31] is treated as congruent mod 32.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static uint RotateLeft(uint value, int offset) =>
|
||||
(value << offset) | (value >> (32 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value left by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROL.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..63] is treated as congruent mod 64.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ulong RotateLeft(ulong value, int offset) =>
|
||||
(value << offset) | (value >> (64 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value right by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROR.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..31] is treated as congruent mod 32.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static uint RotateRight(uint value, int offset) =>
|
||||
(value >> offset) | (value << (32 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value right by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROR.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..63] is treated as congruent mod 64.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ulong RotateRight(ulong value, int offset) =>
|
||||
(value >> offset) | (value << (64 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of leading zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction LZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int LeadingZeroCount(uint value)
|
||||
{
|
||||
// Unguarded fallback contract is 0->31, BSR contract is 0->undefined
|
||||
if (value == 0)
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
|
||||
// No AggressiveInlining due to large method size
|
||||
// Has conventional contract 0->0 (Log(0) is undefined)
|
||||
|
||||
// Fill trailing zeros with ones, eg 00010010 becomes 00011111
|
||||
value |= value >> 01;
|
||||
value |= value >> 02;
|
||||
value |= value >> 04;
|
||||
value |= value >> 08;
|
||||
value |= value >> 16;
|
||||
|
||||
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
|
||||
return 31
|
||||
^ Log2DeBruijn[
|
||||
// uint|long -> IntPtr cast on 32-bit platforms does expensive overflow checks not needed here
|
||||
(int)((value * 0x07C4ACDDu) >> 27)
|
||||
];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of leading zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction LZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int LeadingZeroCount(ulong value)
|
||||
{
|
||||
uint hi = (uint)(value >> 32);
|
||||
|
||||
if (hi == 0)
|
||||
{
|
||||
return 32 + LeadingZeroCount((uint)value);
|
||||
}
|
||||
|
||||
return LeadingZeroCount(hi);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
301
src/SharpCompress/Compressors/ZStandard/CompressionStream.cs
Normal file
301
src/SharpCompress/Compressors/ZStandard/CompressionStream.cs
Normal file
@@ -0,0 +1,301 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public class CompressionStream : Stream
|
||||
{
|
||||
private readonly Stream innerStream;
|
||||
private readonly byte[] outputBuffer;
|
||||
private readonly bool preserveCompressor;
|
||||
private readonly bool leaveOpen;
|
||||
private Compressor? compressor;
|
||||
private ZSTD_outBuffer_s output;
|
||||
|
||||
public CompressionStream(
|
||||
Stream stream,
|
||||
int level = Compressor.DefaultCompressionLevel,
|
||||
int bufferSize = 0,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
: this(stream, new Compressor(level), bufferSize, false, leaveOpen) { }
|
||||
|
||||
public CompressionStream(
|
||||
Stream stream,
|
||||
Compressor compressor,
|
||||
int bufferSize = 0,
|
||||
bool preserveCompressor = true,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
{
|
||||
if (stream == null)
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
|
||||
if (!stream.CanWrite)
|
||||
throw new ArgumentException("Stream is not writable", nameof(stream));
|
||||
|
||||
if (bufferSize < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(bufferSize));
|
||||
|
||||
innerStream = stream;
|
||||
this.compressor = compressor;
|
||||
this.preserveCompressor = preserveCompressor;
|
||||
this.leaveOpen = leaveOpen;
|
||||
|
||||
var outputBufferSize =
|
||||
bufferSize > 0
|
||||
? bufferSize
|
||||
: (int)Unsafe.Methods.ZSTD_CStreamOutSize().EnsureZstdSuccess();
|
||||
outputBuffer = ArrayPool<byte>.Shared.Rent(outputBufferSize);
|
||||
output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)outputBufferSize };
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_cParameter parameter, int value)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
compressor.NotNull().SetParameter(parameter, value);
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_cParameter parameter)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
return compressor.NotNull().GetParameter(parameter);
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
compressor.NotNull().LoadDictionary(dict);
|
||||
}
|
||||
|
||||
~CompressionStream() => Dispose(false);
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override async ValueTask DisposeAsync()
|
||||
#else
|
||||
public async Task DisposeAsync()
|
||||
#endif
|
||||
{
|
||||
if (compressor == null)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_end).ConfigureAwait(false);
|
||||
}
|
||||
finally
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (compressor == null)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
if (disposing)
|
||||
FlushInternal(ZSTD_EndDirective.ZSTD_e_end);
|
||||
}
|
||||
finally
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
}
|
||||
}
|
||||
|
||||
private void ReleaseUnmanagedResources()
|
||||
{
|
||||
if (!preserveCompressor)
|
||||
{
|
||||
compressor.NotNull().Dispose();
|
||||
}
|
||||
compressor = null;
|
||||
|
||||
if (outputBuffer != null)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(outputBuffer);
|
||||
}
|
||||
|
||||
if (!leaveOpen)
|
||||
{
|
||||
innerStream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
public override void Flush() => FlushInternal(ZSTD_EndDirective.ZSTD_e_flush);
|
||||
|
||||
public override async Task FlushAsync(CancellationToken cancellationToken) =>
|
||||
await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_flush, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
private void FlushInternal(ZSTD_EndDirective directive) => WriteInternal(null, directive);
|
||||
|
||||
private async Task FlushInternalAsync(
|
||||
ZSTD_EndDirective directive,
|
||||
CancellationToken cancellationToken = default
|
||||
) => await WriteInternalAsync(null, directive, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
Write(new ReadOnlySpan<byte>(buffer, offset, count));
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override void Write(ReadOnlySpan<byte> buffer) =>
|
||||
WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue);
|
||||
#else
|
||||
public void Write(ReadOnlySpan<byte> buffer) =>
|
||||
WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue);
|
||||
#endif
|
||||
|
||||
private void WriteInternal(ReadOnlySpan<byte> buffer, ZSTD_EndDirective directive)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
var input = new ZSTD_inBuffer_s
|
||||
{
|
||||
pos = 0,
|
||||
size = buffer != null ? (nuint)buffer.Length : 0,
|
||||
};
|
||||
nuint remaining;
|
||||
do
|
||||
{
|
||||
output.pos = 0;
|
||||
remaining = CompressStream(ref input, buffer, directive);
|
||||
|
||||
var written = (int)output.pos;
|
||||
if (written > 0)
|
||||
innerStream.Write(outputBuffer, 0, written);
|
||||
} while (
|
||||
directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0
|
||||
);
|
||||
}
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
private async ValueTask WriteInternalAsync(
|
||||
ReadOnlyMemory<byte>? buffer,
|
||||
ZSTD_EndDirective directive,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#else
|
||||
private async Task WriteInternalAsync(
|
||||
ReadOnlyMemory<byte>? buffer,
|
||||
ZSTD_EndDirective directive,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#endif
|
||||
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
var input = new ZSTD_inBuffer_s
|
||||
{
|
||||
pos = 0,
|
||||
size = buffer.HasValue ? (nuint)buffer.Value.Length : 0,
|
||||
};
|
||||
nuint remaining;
|
||||
do
|
||||
{
|
||||
output.pos = 0;
|
||||
remaining = CompressStream(
|
||||
ref input,
|
||||
buffer.HasValue ? buffer.Value.Span : null,
|
||||
directive
|
||||
);
|
||||
|
||||
var written = (int)output.pos;
|
||||
if (written > 0)
|
||||
await innerStream
|
||||
.WriteAsync(outputBuffer, 0, written, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
} while (
|
||||
directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0
|
||||
);
|
||||
}
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => WriteAsync(new ReadOnlyMemory<byte>(buffer, offset, count), cancellationToken).AsTask();
|
||||
|
||||
public override async ValueTask WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
#else
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => WriteAsync(new ReadOnlyMemory<byte>(buffer, offset, count), cancellationToken);
|
||||
|
||||
public async Task WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
#endif
|
||||
|
||||
internal unsafe nuint CompressStream(
|
||||
ref ZSTD_inBuffer_s input,
|
||||
ReadOnlySpan<byte> inputBuffer,
|
||||
ZSTD_EndDirective directive
|
||||
)
|
||||
{
|
||||
fixed (byte* inputBufferPtr = inputBuffer)
|
||||
fixed (byte* outputBufferPtr = outputBuffer)
|
||||
{
|
||||
input.src = inputBufferPtr;
|
||||
output.dst = outputBufferPtr;
|
||||
return compressor
|
||||
.NotNull()
|
||||
.CompressStream(ref input, ref output, directive)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanRead => false;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => true;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (compressor == null)
|
||||
throw new ObjectDisposedException(nameof(CompressionStream));
|
||||
}
|
||||
|
||||
public void SetPledgedSrcSize(ulong pledgedSrcSize)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
compressor.NotNull().SetPledgedSrcSize(pledgedSrcSize);
|
||||
}
|
||||
}
|
||||
204
src/SharpCompress/Compressors/ZStandard/Compressor.cs
Normal file
204
src/SharpCompress/Compressors/ZStandard/Compressor.cs
Normal file
@@ -0,0 +1,204 @@
|
||||
using System;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public unsafe class Compressor : IDisposable
|
||||
{
|
||||
/// <summary>
|
||||
/// Minimum negative compression level allowed
|
||||
/// </summary>
|
||||
public static int MinCompressionLevel => Unsafe.Methods.ZSTD_minCLevel();
|
||||
|
||||
/// <summary>
|
||||
/// Maximum compression level available
|
||||
/// </summary>
|
||||
public static int MaxCompressionLevel => Unsafe.Methods.ZSTD_maxCLevel();
|
||||
|
||||
/// <summary>
|
||||
/// Default compression level
|
||||
/// </summary>
|
||||
/// <see cref="Unsafe.Methods.ZSTD_defaultCLevel"/>
|
||||
public const int DefaultCompressionLevel = 3;
|
||||
|
||||
private int level = DefaultCompressionLevel;
|
||||
|
||||
private readonly SafeCctxHandle handle;
|
||||
|
||||
public int Level
|
||||
{
|
||||
get => level;
|
||||
set
|
||||
{
|
||||
if (level != value)
|
||||
{
|
||||
level = value;
|
||||
SetParameter(ZSTD_cParameter.ZSTD_c_compressionLevel, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_cParameter parameter, int value)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
Unsafe.Methods.ZSTD_CCtx_setParameter(cctx, parameter, value).EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_cParameter parameter)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
int value;
|
||||
Unsafe.Methods.ZSTD_CCtx_getParameter(cctx, parameter, &value).EnsureZstdSuccess();
|
||||
return value;
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
var dictReadOnlySpan = new ReadOnlySpan<byte>(dict);
|
||||
LoadDictionary(dictReadOnlySpan);
|
||||
}
|
||||
|
||||
public void LoadDictionary(ReadOnlySpan<byte> dict)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
fixed (byte* dictPtr = dict)
|
||||
Unsafe
|
||||
.Methods.ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint)dict.Length)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public Compressor(int level = DefaultCompressionLevel)
|
||||
{
|
||||
handle = SafeCctxHandle.Create();
|
||||
Level = level;
|
||||
}
|
||||
|
||||
public static int GetCompressBound(int length) =>
|
||||
(int)Unsafe.Methods.ZSTD_compressBound((nuint)length);
|
||||
|
||||
public static ulong GetCompressBoundLong(ulong length) =>
|
||||
Unsafe.Methods.ZSTD_compressBound((nuint)length);
|
||||
|
||||
public Span<byte> Wrap(ReadOnlySpan<byte> src)
|
||||
{
|
||||
var dest = new byte[GetCompressBound(src.Length)];
|
||||
var length = Wrap(src, dest);
|
||||
return new Span<byte>(dest, 0, length);
|
||||
}
|
||||
|
||||
public int Wrap(byte[] src, byte[] dest, int offset) =>
|
||||
Wrap(src, new Span<byte>(dest, offset, dest.Length - offset));
|
||||
|
||||
public int Wrap(ReadOnlySpan<byte> src, Span<byte> dest)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
return (int)
|
||||
Unsafe
|
||||
.Methods.ZSTD_compress2(
|
||||
cctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public int Wrap(ArraySegment<byte> src, ArraySegment<byte> dest) =>
|
||||
Wrap((ReadOnlySpan<byte>)src, dest);
|
||||
|
||||
public int Wrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength
|
||||
) =>
|
||||
Wrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength)
|
||||
);
|
||||
|
||||
public bool TryWrap(byte[] src, byte[] dest, int offset, out int written) =>
|
||||
TryWrap(src, new Span<byte>(dest, offset, dest.Length - offset), out written);
|
||||
|
||||
public bool TryWrap(ReadOnlySpan<byte> src, Span<byte> dest, out int written)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
nuint returnValue;
|
||||
using (var cctx = handle.Acquire())
|
||||
{
|
||||
returnValue = Unsafe.Methods.ZSTD_compress2(
|
||||
cctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
);
|
||||
}
|
||||
|
||||
if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))
|
||||
{
|
||||
written = default;
|
||||
return false;
|
||||
}
|
||||
|
||||
returnValue.EnsureZstdSuccess();
|
||||
written = (int)returnValue;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public bool TryWrap(ArraySegment<byte> src, ArraySegment<byte> dest, out int written) =>
|
||||
TryWrap((ReadOnlySpan<byte>)src, dest, out written);
|
||||
|
||||
public bool TryWrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength,
|
||||
out int written
|
||||
) =>
|
||||
TryWrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength),
|
||||
out written
|
||||
);
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
handle.Dispose();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
|
||||
internal nuint CompressStream(
|
||||
ref ZSTD_inBuffer_s input,
|
||||
ref ZSTD_outBuffer_s output,
|
||||
ZSTD_EndDirective directive
|
||||
)
|
||||
{
|
||||
fixed (ZSTD_inBuffer_s* inputPtr = &input)
|
||||
fixed (ZSTD_outBuffer_s* outputPtr = &output)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
return Unsafe
|
||||
.Methods.ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public void SetPledgedSrcSize(ulong pledgedSrcSize)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
Unsafe.Methods.ZSTD_CCtx_setPledgedSrcSize(cctx, pledgedSrcSize).EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
8
src/SharpCompress/Compressors/ZStandard/Constants.cs
Normal file
8
src/SharpCompress/Compressors/ZStandard/Constants.cs
Normal file
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
internal class Constants
|
||||
{
|
||||
//NOTE: https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element#remarks
|
||||
//NOTE: https://github.com/dotnet/runtime/blob/v5.0.0-rtm.20519.4/src/libraries/System.Private.CoreLib/src/System/Array.cs#L27
|
||||
public const ulong MaxByteArrayLength = 0x7FFFFFC7;
|
||||
}
|
||||
293
src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs
Normal file
293
src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs
Normal file
@@ -0,0 +1,293 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public class DecompressionStream : Stream
|
||||
{
|
||||
private readonly Stream innerStream;
|
||||
private readonly byte[] inputBuffer;
|
||||
private readonly int inputBufferSize;
|
||||
private readonly bool preserveDecompressor;
|
||||
private readonly bool leaveOpen;
|
||||
private readonly bool checkEndOfStream;
|
||||
private Decompressor? decompressor;
|
||||
private ZSTD_inBuffer_s input;
|
||||
private nuint lastDecompressResult = 0;
|
||||
private bool contextDrained = true;
|
||||
|
||||
public DecompressionStream(
|
||||
Stream stream,
|
||||
int bufferSize = 0,
|
||||
bool checkEndOfStream = true,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
: this(stream, new Decompressor(), bufferSize, checkEndOfStream, false, leaveOpen) { }
|
||||
|
||||
public DecompressionStream(
|
||||
Stream stream,
|
||||
Decompressor decompressor,
|
||||
int bufferSize = 0,
|
||||
bool checkEndOfStream = true,
|
||||
bool preserveDecompressor = true,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
{
|
||||
if (stream == null)
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
|
||||
if (!stream.CanRead)
|
||||
throw new ArgumentException("Stream is not readable", nameof(stream));
|
||||
|
||||
if (bufferSize < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(bufferSize));
|
||||
|
||||
innerStream = stream;
|
||||
this.decompressor = decompressor;
|
||||
this.preserveDecompressor = preserveDecompressor;
|
||||
this.leaveOpen = leaveOpen;
|
||||
this.checkEndOfStream = checkEndOfStream;
|
||||
|
||||
inputBufferSize =
|
||||
bufferSize > 0
|
||||
? bufferSize
|
||||
: (int)Unsafe.Methods.ZSTD_DStreamInSize().EnsureZstdSuccess();
|
||||
inputBuffer = ArrayPool<byte>.Shared.Rent(inputBufferSize);
|
||||
input = new ZSTD_inBuffer_s { pos = (nuint)inputBufferSize, size = (nuint)inputBufferSize };
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_dParameter parameter, int value)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
decompressor.NotNull().SetParameter(parameter, value);
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_dParameter parameter)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
return decompressor.NotNull().GetParameter(parameter);
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
decompressor.NotNull().LoadDictionary(dict);
|
||||
}
|
||||
|
||||
~DecompressionStream() => Dispose(false);
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (decompressor == null)
|
||||
return;
|
||||
|
||||
if (!preserveDecompressor)
|
||||
{
|
||||
decompressor.Dispose();
|
||||
}
|
||||
decompressor = null;
|
||||
|
||||
if (inputBuffer != null)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(inputBuffer);
|
||||
}
|
||||
|
||||
if (!leaveOpen)
|
||||
{
|
||||
innerStream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) =>
|
||||
Read(new Span<byte>(buffer, offset, count));
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override int Read(Span<byte> buffer)
|
||||
#else
|
||||
public int Read(Span<byte> buffer)
|
||||
#endif
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
// Guard against infinite loop (output.pos would never become non-zero)
|
||||
if (buffer.Length == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length };
|
||||
while (true)
|
||||
{
|
||||
// If there is still input available, or there might be data buffered in the decompressor context, flush that out
|
||||
while (input.pos < input.size || !contextDrained)
|
||||
{
|
||||
nuint oldInputPos = input.pos;
|
||||
nuint result = DecompressStream(ref output, buffer);
|
||||
if (output.pos > 0 || oldInputPos != input.pos)
|
||||
{
|
||||
// Keep result from last decompress call that made some progress, so we known if we're at end of frame
|
||||
lastDecompressResult = result;
|
||||
}
|
||||
// If decompression filled the output buffer, there might still be data buffered in the decompressor context
|
||||
contextDrained = output.pos < output.size;
|
||||
// If we have data to return, return it immediately, so we won't stall on Read
|
||||
if (output.pos > 0)
|
||||
{
|
||||
return (int)output.pos;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, read some more input
|
||||
int bytesRead;
|
||||
if ((bytesRead = innerStream.Read(inputBuffer, 0, inputBufferSize)) == 0)
|
||||
{
|
||||
if (checkEndOfStream && lastDecompressResult != 0)
|
||||
{
|
||||
throw new EndOfStreamException("Premature end of stream");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
input.size = (nuint)bytesRead;
|
||||
input.pos = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => ReadAsync(new Memory<byte>(buffer, offset, count), cancellationToken).AsTask();
|
||||
|
||||
public override async ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#else
|
||||
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => ReadAsync(new Memory<byte>(buffer, offset, count), cancellationToken);
|
||||
|
||||
public async Task<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#endif
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
// Guard against infinite loop (output.pos would never become non-zero)
|
||||
if (buffer.Length == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length };
|
||||
while (true)
|
||||
{
|
||||
// If there is still input available, or there might be data buffered in the decompressor context, flush that out
|
||||
while (input.pos < input.size || !contextDrained)
|
||||
{
|
||||
nuint oldInputPos = input.pos;
|
||||
nuint result = DecompressStream(ref output, buffer.Span);
|
||||
if (output.pos > 0 || oldInputPos != input.pos)
|
||||
{
|
||||
// Keep result from last decompress call that made some progress, so we known if we're at end of frame
|
||||
lastDecompressResult = result;
|
||||
}
|
||||
// If decompression filled the output buffer, there might still be data buffered in the decompressor context
|
||||
contextDrained = output.pos < output.size;
|
||||
// If we have data to return, return it immediately, so we won't stall on Read
|
||||
if (output.pos > 0)
|
||||
{
|
||||
return (int)output.pos;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, read some more input
|
||||
int bytesRead;
|
||||
if (
|
||||
(
|
||||
bytesRead = await innerStream
|
||||
.ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken)
|
||||
.ConfigureAwait(false)
|
||||
) == 0
|
||||
)
|
||||
{
|
||||
if (checkEndOfStream && lastDecompressResult != 0)
|
||||
{
|
||||
throw new EndOfStreamException("Premature end of stream");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
input.size = (nuint)bytesRead;
|
||||
input.pos = 0;
|
||||
}
|
||||
}
|
||||
|
||||
private unsafe nuint DecompressStream(ref ZSTD_outBuffer_s output, Span<byte> outputBuffer)
|
||||
{
|
||||
fixed (byte* inputBufferPtr = inputBuffer)
|
||||
fixed (byte* outputBufferPtr = outputBuffer)
|
||||
{
|
||||
input.src = inputBufferPtr;
|
||||
output.dst = outputBufferPtr;
|
||||
return decompressor.NotNull().DecompressStream(ref input, ref output);
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotSupportedException();
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (decompressor == null)
|
||||
throw new ObjectDisposedException(nameof(DecompressionStream));
|
||||
}
|
||||
|
||||
#if NETSTANDARD2_0 || NETFRAMEWORK
|
||||
public virtual Task DisposeAsync()
|
||||
{
|
||||
try
|
||||
{
|
||||
Dispose();
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
catch (Exception exc)
|
||||
{
|
||||
return Task.FromException(exc);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
176
src/SharpCompress/Compressors/ZStandard/Decompressor.cs
Normal file
176
src/SharpCompress/Compressors/ZStandard/Decompressor.cs
Normal file
@@ -0,0 +1,176 @@
|
||||
using System;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public unsafe class Decompressor : IDisposable
|
||||
{
|
||||
private readonly SafeDctxHandle handle;
|
||||
|
||||
public Decompressor()
|
||||
{
|
||||
handle = SafeDctxHandle.Create();
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_dParameter parameter, int value)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
Unsafe.Methods.ZSTD_DCtx_setParameter(dctx, parameter, value).EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_dParameter parameter)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
int value;
|
||||
Unsafe.Methods.ZSTD_DCtx_getParameter(dctx, parameter, &value).EnsureZstdSuccess();
|
||||
return value;
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
var dictReadOnlySpan = new ReadOnlySpan<byte>(dict);
|
||||
this.LoadDictionary(dictReadOnlySpan);
|
||||
}
|
||||
|
||||
public void LoadDictionary(ReadOnlySpan<byte> dict)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
fixed (byte* dictPtr = dict)
|
||||
Unsafe
|
||||
.Methods.ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint)dict.Length)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public static ulong GetDecompressedSize(ReadOnlySpan<byte> src)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
return Unsafe
|
||||
.Methods.ZSTD_decompressBound(srcPtr, (nuint)src.Length)
|
||||
.EnsureContentSizeOk();
|
||||
}
|
||||
|
||||
public static ulong GetDecompressedSize(ArraySegment<byte> src) =>
|
||||
GetDecompressedSize((ReadOnlySpan<byte>)src);
|
||||
|
||||
public static ulong GetDecompressedSize(byte[] src, int srcOffset, int srcLength) =>
|
||||
GetDecompressedSize(new ReadOnlySpan<byte>(src, srcOffset, srcLength));
|
||||
|
||||
public Span<byte> Unwrap(ReadOnlySpan<byte> src, int maxDecompressedSize = int.MaxValue)
|
||||
{
|
||||
var expectedDstSize = GetDecompressedSize(src);
|
||||
if (expectedDstSize > (ulong)maxDecompressedSize)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall,
|
||||
$"Decompressed content size {expectedDstSize} is greater than {nameof(maxDecompressedSize)} {maxDecompressedSize}"
|
||||
);
|
||||
if (expectedDstSize > Constants.MaxByteArrayLength)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall,
|
||||
$"Decompressed content size {expectedDstSize} is greater than max possible byte array size {Constants.MaxByteArrayLength}"
|
||||
);
|
||||
|
||||
var dest = new byte[expectedDstSize];
|
||||
var length = Unwrap(src, dest);
|
||||
return new Span<byte>(dest, 0, length);
|
||||
}
|
||||
|
||||
public int Unwrap(byte[] src, byte[] dest, int offset) =>
|
||||
Unwrap(src, new Span<byte>(dest, offset, dest.Length - offset));
|
||||
|
||||
public int Unwrap(ReadOnlySpan<byte> src, Span<byte> dest)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
return (int)
|
||||
Unsafe
|
||||
.Methods.ZSTD_decompressDCtx(
|
||||
dctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public int Unwrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength
|
||||
) =>
|
||||
Unwrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength)
|
||||
);
|
||||
|
||||
public bool TryUnwrap(byte[] src, byte[] dest, int offset, out int written) =>
|
||||
TryUnwrap(src, new Span<byte>(dest, offset, dest.Length - offset), out written);
|
||||
|
||||
public bool TryUnwrap(ReadOnlySpan<byte> src, Span<byte> dest, out int written)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
nuint returnValue;
|
||||
using (var dctx = handle.Acquire())
|
||||
{
|
||||
returnValue = Unsafe.Methods.ZSTD_decompressDCtx(
|
||||
dctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
);
|
||||
}
|
||||
|
||||
if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))
|
||||
{
|
||||
written = default;
|
||||
return false;
|
||||
}
|
||||
|
||||
returnValue.EnsureZstdSuccess();
|
||||
written = (int)returnValue;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public bool TryUnwrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength,
|
||||
out int written
|
||||
) =>
|
||||
TryUnwrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength),
|
||||
out written
|
||||
);
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
handle.Dispose();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
|
||||
internal nuint DecompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s output)
|
||||
{
|
||||
fixed (ZSTD_inBuffer_s* inputPtr = &input)
|
||||
fixed (ZSTD_outBuffer_s* outputPtr = &output)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
return Unsafe
|
||||
.Methods.ZSTD_decompressStream(dctx, outputPtr, inputPtr)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
}
|
||||
141
src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs
Normal file
141
src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs
Normal file
@@ -0,0 +1,141 @@
|
||||
using System;
|
||||
using System.Collections.Concurrent;
|
||||
using System.Collections.Generic;
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
internal unsafe class JobThreadPool : IDisposable
|
||||
{
|
||||
private int numThreads;
|
||||
private readonly List<JobThread> threads;
|
||||
private readonly BlockingCollection<Job> queue;
|
||||
|
||||
private struct Job
|
||||
{
|
||||
public void* function;
|
||||
public void* opaque;
|
||||
}
|
||||
|
||||
private class JobThread
|
||||
{
|
||||
private Thread Thread { get; }
|
||||
public CancellationTokenSource CancellationTokenSource { get; }
|
||||
|
||||
public JobThread(Thread thread)
|
||||
{
|
||||
CancellationTokenSource = new CancellationTokenSource();
|
||||
Thread = thread;
|
||||
}
|
||||
|
||||
public void Start()
|
||||
{
|
||||
Thread.Start(this);
|
||||
}
|
||||
|
||||
public void Cancel()
|
||||
{
|
||||
CancellationTokenSource.Cancel();
|
||||
}
|
||||
|
||||
public void Join()
|
||||
{
|
||||
Thread.Join();
|
||||
}
|
||||
}
|
||||
|
||||
private void Worker(object? obj)
|
||||
{
|
||||
if (obj is not JobThread poolThread)
|
||||
return;
|
||||
|
||||
var cancellationToken = poolThread.CancellationTokenSource.Token;
|
||||
while (!queue.IsCompleted && !cancellationToken.IsCancellationRequested)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (queue.TryTake(out var job, -1, cancellationToken))
|
||||
((delegate* managed<void*, void>)job.function)(job.opaque);
|
||||
}
|
||||
catch (InvalidOperationException) { }
|
||||
catch (OperationCanceledException) { }
|
||||
}
|
||||
}
|
||||
|
||||
public JobThreadPool(int num, int queueSize)
|
||||
{
|
||||
numThreads = num;
|
||||
queue = new BlockingCollection<Job>(queueSize + 1);
|
||||
threads = new List<JobThread>(num);
|
||||
for (var i = 0; i < numThreads; i++)
|
||||
CreateThread();
|
||||
}
|
||||
|
||||
private void CreateThread()
|
||||
{
|
||||
var poolThread = new JobThread(new Thread(Worker));
|
||||
threads.Add(poolThread);
|
||||
poolThread.Start();
|
||||
}
|
||||
|
||||
public void Resize(int num)
|
||||
{
|
||||
lock (threads)
|
||||
{
|
||||
if (num < numThreads)
|
||||
{
|
||||
for (var i = numThreads - 1; i >= num; i--)
|
||||
{
|
||||
threads[i].Cancel();
|
||||
threads.RemoveAt(i);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (var i = numThreads; i < num; i++)
|
||||
CreateThread();
|
||||
}
|
||||
}
|
||||
|
||||
numThreads = num;
|
||||
}
|
||||
|
||||
public void Add(void* function, void* opaque)
|
||||
{
|
||||
queue.Add(new Job { function = function, opaque = opaque });
|
||||
}
|
||||
|
||||
public bool TryAdd(void* function, void* opaque)
|
||||
{
|
||||
return queue.TryAdd(new Job { function = function, opaque = opaque });
|
||||
}
|
||||
|
||||
public void Join(bool cancel = true)
|
||||
{
|
||||
queue.CompleteAdding();
|
||||
List<JobThread> jobThreads;
|
||||
lock (threads)
|
||||
jobThreads = new List<JobThread>(threads);
|
||||
|
||||
if (cancel)
|
||||
{
|
||||
foreach (var thread in jobThreads)
|
||||
thread.Cancel();
|
||||
}
|
||||
|
||||
foreach (var thread in jobThreads)
|
||||
thread.Join();
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
queue.Dispose();
|
||||
}
|
||||
|
||||
public int Size()
|
||||
{
|
||||
// todo not implemented
|
||||
// https://github.com/dotnet/runtime/issues/24200
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
163
src/SharpCompress/Compressors/ZStandard/SafeHandles.cs
Normal file
163
src/SharpCompress/Compressors/ZStandard/SafeHandles.cs
Normal file
@@ -0,0 +1,163 @@
|
||||
using System;
|
||||
using System.Runtime.InteropServices;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
/// <summary>
|
||||
/// Provides the base class for ZstdSharp <see cref="SafeHandle"/> implementations.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Even though ZstdSharp is a managed library, its internals are using unmanaged
|
||||
/// memory and we are using safe handles in the library's high-level API to ensure
|
||||
/// proper disposal of unmanaged resources and increase safety.
|
||||
/// </remarks>
|
||||
/// <seealso cref="SafeCctxHandle"/>
|
||||
/// <seealso cref="SafeDctxHandle"/>
|
||||
internal abstract unsafe class SafeZstdHandle : SafeHandle
|
||||
{
|
||||
/// <summary>
|
||||
/// Parameterless constructor is hidden. Use the static <c>Create</c> factory
|
||||
/// method to create a new safe handle instance.
|
||||
/// </summary>
|
||||
protected SafeZstdHandle()
|
||||
: base(IntPtr.Zero, true) { }
|
||||
|
||||
public sealed override bool IsInvalid => handle == IntPtr.Zero;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Safely wraps an unmanaged Zstd compression context.
|
||||
/// </summary>
|
||||
internal sealed unsafe class SafeCctxHandle : SafeZstdHandle
|
||||
{
|
||||
/// <inheritdoc/>
|
||||
private SafeCctxHandle() { }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new instance of <see cref="SafeCctxHandle"/>.
|
||||
/// </summary>
|
||||
/// <returns></returns>
|
||||
/// <exception cref="ZstdException">Creation failed.</exception>
|
||||
public static SafeCctxHandle Create()
|
||||
{
|
||||
var safeHandle = new SafeCctxHandle();
|
||||
bool success = false;
|
||||
try
|
||||
{
|
||||
var cctx = Unsafe.Methods.ZSTD_createCCtx();
|
||||
if (cctx == null)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create cctx");
|
||||
safeHandle.SetHandle((IntPtr)cctx);
|
||||
success = true;
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (!success)
|
||||
{
|
||||
safeHandle.SetHandleAsInvalid();
|
||||
}
|
||||
}
|
||||
return safeHandle;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Acquires a reference to the safe handle.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// A <see cref="SafeHandleHolder{T}"/> instance that can be implicitly converted to a pointer
|
||||
/// to <see cref="ZSTD_CCtx_s"/>.
|
||||
/// </returns>
|
||||
public SafeHandleHolder<ZSTD_CCtx_s> Acquire() => new(this);
|
||||
|
||||
protected override bool ReleaseHandle()
|
||||
{
|
||||
return Unsafe.Methods.ZSTD_freeCCtx((ZSTD_CCtx_s*)handle) == 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Safely wraps an unmanaged Zstd compression context.
|
||||
/// </summary>
|
||||
internal sealed unsafe class SafeDctxHandle : SafeZstdHandle
|
||||
{
|
||||
/// <inheritdoc/>
|
||||
private SafeDctxHandle() { }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new instance of <see cref="SafeDctxHandle"/>.
|
||||
/// </summary>
|
||||
/// <returns></returns>
|
||||
/// <exception cref="ZstdException">Creation failed.</exception>
|
||||
public static SafeDctxHandle Create()
|
||||
{
|
||||
var safeHandle = new SafeDctxHandle();
|
||||
bool success = false;
|
||||
try
|
||||
{
|
||||
var dctx = Unsafe.Methods.ZSTD_createDCtx();
|
||||
if (dctx == null)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create dctx");
|
||||
safeHandle.SetHandle((IntPtr)dctx);
|
||||
success = true;
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (!success)
|
||||
{
|
||||
safeHandle.SetHandleAsInvalid();
|
||||
}
|
||||
}
|
||||
return safeHandle;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Acquires a reference to the safe handle.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// A <see cref="SafeHandleHolder{T}"/> instance that can be implicitly converted to a pointer
|
||||
/// to <see cref="ZSTD_DCtx_s"/>.
|
||||
/// </returns>
|
||||
public SafeHandleHolder<ZSTD_DCtx_s> Acquire() => new(this);
|
||||
|
||||
protected override bool ReleaseHandle()
|
||||
{
|
||||
return Unsafe.Methods.ZSTD_freeDCtx((ZSTD_DCtx_s*)handle) == 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Provides a convenient interface to safely acquire pointers of a specific type
|
||||
/// from a <see cref="SafeHandle"/>, by utilizing <see langword="using"/> blocks.
|
||||
/// </summary>
|
||||
/// <typeparam name="T">The type of pointers to return.</typeparam>
|
||||
/// <remarks>
|
||||
/// Safe handle holders can be <see cref="Dispose"/>d to decrement the safe handle's
|
||||
/// reference count, and can be implicitly converted to pointers to <see cref="T"/>.
|
||||
/// </remarks>
|
||||
internal unsafe ref struct SafeHandleHolder<T>
|
||||
where T : unmanaged
|
||||
{
|
||||
private readonly SafeHandle _handle;
|
||||
|
||||
private bool _refAdded;
|
||||
|
||||
public SafeHandleHolder(SafeHandle safeHandle)
|
||||
{
|
||||
_handle = safeHandle;
|
||||
_refAdded = false;
|
||||
safeHandle.DangerousAddRef(ref _refAdded);
|
||||
}
|
||||
|
||||
public static implicit operator T*(SafeHandleHolder<T> holder) =>
|
||||
(T*)holder._handle.DangerousGetHandle();
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (_refAdded)
|
||||
{
|
||||
_handle.DangerousRelease();
|
||||
_refAdded = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
internal static unsafe class SynchronizationWrapper
|
||||
{
|
||||
private static object UnwrapObject(void** obj) => UnmanagedObject.Unwrap<object>(*obj);
|
||||
|
||||
public static void Init(void** obj) => *obj = UnmanagedObject.Wrap(new object());
|
||||
|
||||
public static void Free(void** obj) => UnmanagedObject.Free(*obj);
|
||||
|
||||
public static void Enter(void** obj) => Monitor.Enter(UnwrapObject(obj));
|
||||
|
||||
public static void Exit(void** obj) => Monitor.Exit(UnwrapObject(obj));
|
||||
|
||||
public static void Pulse(void** obj) => Monitor.Pulse(UnwrapObject(obj));
|
||||
|
||||
public static void PulseAll(void** obj) => Monitor.PulseAll(UnwrapObject(obj));
|
||||
|
||||
public static void Wait(void** mutex) => Monitor.Wait(UnwrapObject(mutex));
|
||||
}
|
||||
48
src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs
Normal file
48
src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs
Normal file
@@ -0,0 +1,48 @@
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public static unsafe class ThrowHelper
|
||||
{
|
||||
private const ulong ZSTD_CONTENTSIZE_UNKNOWN = unchecked(0UL - 1);
|
||||
private const ulong ZSTD_CONTENTSIZE_ERROR = unchecked(0UL - 2);
|
||||
|
||||
public static nuint EnsureZstdSuccess(this nuint returnValue)
|
||||
{
|
||||
if (Unsafe.Methods.ZSTD_isError(returnValue))
|
||||
ThrowException(returnValue, Unsafe.Methods.ZSTD_getErrorName(returnValue));
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
public static nuint EnsureZdictSuccess(this nuint returnValue)
|
||||
{
|
||||
if (Unsafe.Methods.ZDICT_isError(returnValue))
|
||||
ThrowException(returnValue, Unsafe.Methods.ZDICT_getErrorName(returnValue));
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
public static ulong EnsureContentSizeOk(this ulong returnValue)
|
||||
{
|
||||
if (returnValue == ZSTD_CONTENTSIZE_UNKNOWN)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_GENERIC,
|
||||
"Decompressed content size is not specified"
|
||||
);
|
||||
|
||||
if (returnValue == ZSTD_CONTENTSIZE_ERROR)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_GENERIC,
|
||||
"Decompressed content size cannot be determined (e.g. invalid magic number, srcSize too small)"
|
||||
);
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
private static void ThrowException(nuint returnValue, string message)
|
||||
{
|
||||
var code = 0 - returnValue;
|
||||
throw new ZstdException((ZSTD_ErrorCode)code, message);
|
||||
}
|
||||
}
|
||||
18
src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs
Normal file
18
src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs
Normal file
@@ -0,0 +1,18 @@
|
||||
using System;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
/*
|
||||
* Wrap object to void* to make it unmanaged
|
||||
*/
|
||||
internal static unsafe class UnmanagedObject
|
||||
{
|
||||
public static void* Wrap(object obj) => (void*)GCHandle.ToIntPtr(GCHandle.Alloc(obj));
|
||||
|
||||
private static GCHandle UnwrapGcHandle(void* value) => GCHandle.FromIntPtr((IntPtr)value);
|
||||
|
||||
public static T Unwrap<T>(void* value) => (T)UnwrapGcHandle(value).Target!;
|
||||
|
||||
public static void Free(void* value) => UnwrapGcHandle(value).Free();
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/* custom memory allocation functions */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void* ZSTD_customMalloc(nuint size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc != null)
|
||||
return ((delegate* managed<void*, nuint, void*>)customMem.customAlloc)(
|
||||
customMem.opaque,
|
||||
size
|
||||
);
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void* ZSTD_customCalloc(nuint size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc != null)
|
||||
{
|
||||
/* calloc implemented as malloc+memset;
|
||||
* not as efficient as calloc, but next best guess for custom malloc */
|
||||
void* ptr = ((delegate* managed<void*, nuint, void*>)customMem.customAlloc)(
|
||||
customMem.opaque,
|
||||
size
|
||||
);
|
||||
memset(ptr, 0, (uint)size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
return calloc(1, size);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
|
||||
{
|
||||
if (ptr != null)
|
||||
{
|
||||
if (customMem.customFree != null)
|
||||
((delegate* managed<void*, void*, void>)customMem.customFree)(
|
||||
customMem.opaque,
|
||||
ptr
|
||||
);
|
||||
else
|
||||
free(ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* bitStream can mix input from multiple sources.
|
||||
* A critical property of these streams is that they encode and decode in **reverse** direction.
|
||||
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
|
||||
*/
|
||||
public unsafe struct BIT_CStream_t
|
||||
{
|
||||
public nuint bitContainer;
|
||||
public uint bitPos;
|
||||
public sbyte* startPtr;
|
||||
public sbyte* ptr;
|
||||
public sbyte* endPtr;
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public enum BIT_DStream_status
|
||||
{
|
||||
/* fully refilled */
|
||||
BIT_DStream_unfinished = 0,
|
||||
|
||||
/* still some bits left in bitstream */
|
||||
BIT_DStream_endOfBuffer = 1,
|
||||
|
||||
/* bitstream entirely consumed, bit-exact */
|
||||
BIT_DStream_completed = 2,
|
||||
|
||||
/* user requested more bits than present in bitstream */
|
||||
BIT_DStream_overflow = 3,
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-********************************************
|
||||
* bitStream decoding API (read backward)
|
||||
**********************************************/
|
||||
public unsafe struct BIT_DStream_t
|
||||
{
|
||||
public nuint bitContainer;
|
||||
public uint bitsConsumed;
|
||||
public sbyte* ptr;
|
||||
public sbyte* start;
|
||||
public sbyte* limitPtr;
|
||||
}
|
||||
60
src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs
Normal file
60
src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs
Normal file
@@ -0,0 +1,60 @@
|
||||
using System;
|
||||
using System.Numerics;
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countTrailingZeros32(uint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.TrailingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countLeadingZeros32(uint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.LeadingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countTrailingZeros64(ulong val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.TrailingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countLeadingZeros64(ulong val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.LeadingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_NbCommonBytes(nuint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
if (BitConverter.IsLittleEndian)
|
||||
{
|
||||
return MEM_64bits
|
||||
? (uint)BitOperations.TrailingZeroCount(val) >> 3
|
||||
: (uint)BitOperations.TrailingZeroCount((uint)val) >> 3;
|
||||
}
|
||||
|
||||
return MEM_64bits
|
||||
? (uint)BitOperations.LeadingZeroCount(val) >> 3
|
||||
: (uint)BitOperations.LeadingZeroCount((uint)val) >> 3;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_highbit32(uint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.Log2(val);
|
||||
}
|
||||
}
|
||||
739
src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs
Normal file
739
src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs
Normal file
@@ -0,0 +1,739 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
#if NETCOREAPP3_0_OR_GREATER
|
||||
using System.Runtime.Intrinsics.X86;
|
||||
#endif
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
#if NET7_0_OR_GREATER
|
||||
private static ReadOnlySpan<uint> Span_BIT_mask =>
|
||||
new uint[32]
|
||||
{
|
||||
0,
|
||||
1,
|
||||
3,
|
||||
7,
|
||||
0xF,
|
||||
0x1F,
|
||||
0x3F,
|
||||
0x7F,
|
||||
0xFF,
|
||||
0x1FF,
|
||||
0x3FF,
|
||||
0x7FF,
|
||||
0xFFF,
|
||||
0x1FFF,
|
||||
0x3FFF,
|
||||
0x7FFF,
|
||||
0xFFFF,
|
||||
0x1FFFF,
|
||||
0x3FFFF,
|
||||
0x7FFFF,
|
||||
0xFFFFF,
|
||||
0x1FFFFF,
|
||||
0x3FFFFF,
|
||||
0x7FFFFF,
|
||||
0xFFFFFF,
|
||||
0x1FFFFFF,
|
||||
0x3FFFFFF,
|
||||
0x7FFFFFF,
|
||||
0xFFFFFFF,
|
||||
0x1FFFFFFF,
|
||||
0x3FFFFFFF,
|
||||
0x7FFFFFFF,
|
||||
};
|
||||
private static uint* BIT_mask =>
|
||||
(uint*)
|
||||
System.Runtime.CompilerServices.Unsafe.AsPointer(
|
||||
ref MemoryMarshal.GetReference(Span_BIT_mask)
|
||||
);
|
||||
#else
|
||||
|
||||
private static readonly uint* BIT_mask = GetArrayPointer(
|
||||
new uint[32]
|
||||
{
|
||||
0,
|
||||
1,
|
||||
3,
|
||||
7,
|
||||
0xF,
|
||||
0x1F,
|
||||
0x3F,
|
||||
0x7F,
|
||||
0xFF,
|
||||
0x1FF,
|
||||
0x3FF,
|
||||
0x7FF,
|
||||
0xFFF,
|
||||
0x1FFF,
|
||||
0x3FFF,
|
||||
0x7FFF,
|
||||
0xFFFF,
|
||||
0x1FFFF,
|
||||
0x3FFFF,
|
||||
0x7FFFF,
|
||||
0xFFFFF,
|
||||
0x1FFFFF,
|
||||
0x3FFFFF,
|
||||
0x7FFFFF,
|
||||
0xFFFFFF,
|
||||
0x1FFFFFF,
|
||||
0x3FFFFFF,
|
||||
0x7FFFFFF,
|
||||
0xFFFFFFF,
|
||||
0x1FFFFFFF,
|
||||
0x3FFFFFFF,
|
||||
0x7FFFFFFF,
|
||||
}
|
||||
);
|
||||
#endif
|
||||
/*-**************************************************************
|
||||
* bitStream encoding
|
||||
****************************************************************/
|
||||
/*! BIT_initCStream() :
|
||||
* `dstCapacity` must be > sizeof(size_t)
|
||||
* @return : 0 if success,
|
||||
* otherwise an error code (can be tested using ERR_isError()) */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_initCStream(ref BIT_CStream_t bitC, void* startPtr, nuint dstCapacity)
|
||||
{
|
||||
bitC.bitContainer = 0;
|
||||
bitC.bitPos = 0;
|
||||
bitC.startPtr = (sbyte*)startPtr;
|
||||
bitC.ptr = bitC.startPtr;
|
||||
bitC.endPtr = bitC.startPtr + dstCapacity - sizeof(nuint);
|
||||
if (dstCapacity <= (nuint)sizeof(nuint))
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
return 0;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getLowerBits(nuint bitContainer, uint nbBits)
|
||||
{
|
||||
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
|
||||
#if NETCOREAPP3_1_OR_GREATER
|
||||
if (Bmi2.X64.IsSupported)
|
||||
{
|
||||
return (nuint)Bmi2.X64.ZeroHighBits(bitContainer, nbBits);
|
||||
}
|
||||
|
||||
if (Bmi2.IsSupported)
|
||||
{
|
||||
return Bmi2.ZeroHighBits((uint)bitContainer, nbBits);
|
||||
}
|
||||
#endif
|
||||
|
||||
return bitContainer & BIT_mask[nbBits];
|
||||
}
|
||||
|
||||
/*! BIT_addBits() :
|
||||
* can add up to 31 bits into `bitC`.
|
||||
* Note : does not check for register overflow ! */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_addBits(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
nuint value,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
|
||||
assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
bitC_bitContainer |= BIT_getLowerBits(value, nbBits) << (int)bitC_bitPos;
|
||||
bitC_bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_addBitsFast() :
|
||||
* works only if `value` is _clean_,
|
||||
* meaning all high bits above nbBits are 0 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_addBitsFast(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
nuint value,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
assert(value >> (int)nbBits == 0);
|
||||
assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
bitC_bitContainer |= value << (int)bitC_bitPos;
|
||||
bitC_bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_flushBitsFast() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* unsafe version; does not check buffer overflow */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_flushBitsFast(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
ref sbyte* bitC_ptr,
|
||||
sbyte* bitC_endPtr
|
||||
)
|
||||
{
|
||||
nuint nbBytes = bitC_bitPos >> 3;
|
||||
assert(bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
assert(bitC_ptr <= bitC_endPtr);
|
||||
MEM_writeLEST(bitC_ptr, bitC_bitContainer);
|
||||
bitC_ptr += nbBytes;
|
||||
bitC_bitPos &= 7;
|
||||
bitC_bitContainer >>= (int)(nbBytes * 8);
|
||||
}
|
||||
|
||||
/*! BIT_flushBits() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* safe version; check for buffer overflow, and prevents it.
|
||||
* note : does not signal buffer overflow.
|
||||
* overflow will be revealed later on using BIT_closeCStream() */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_flushBits(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
ref sbyte* bitC_ptr,
|
||||
sbyte* bitC_endPtr
|
||||
)
|
||||
{
|
||||
nuint nbBytes = bitC_bitPos >> 3;
|
||||
assert(bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
assert(bitC_ptr <= bitC_endPtr);
|
||||
MEM_writeLEST(bitC_ptr, bitC_bitContainer);
|
||||
bitC_ptr += nbBytes;
|
||||
if (bitC_ptr > bitC_endPtr)
|
||||
bitC_ptr = bitC_endPtr;
|
||||
bitC_bitPos &= 7;
|
||||
bitC_bitContainer >>= (int)(nbBytes * 8);
|
||||
}
|
||||
|
||||
/*! BIT_closeCStream() :
|
||||
* @return : size of CStream, in bytes,
|
||||
* or 0 if it could not fit into dstBuffer */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_closeCStream(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
sbyte* bitC_ptr,
|
||||
sbyte* bitC_endPtr,
|
||||
sbyte* bitC_startPtr
|
||||
)
|
||||
{
|
||||
BIT_addBitsFast(ref bitC_bitContainer, ref bitC_bitPos, 1, 1);
|
||||
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
|
||||
if (bitC_ptr >= bitC_endPtr)
|
||||
return 0;
|
||||
return (nuint)(bitC_ptr - bitC_startPtr) + (nuint)(bitC_bitPos > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint srcSize)
|
||||
{
|
||||
if (srcSize < 1)
|
||||
{
|
||||
*bitD = new BIT_DStream_t();
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
bitD->start = (sbyte*)srcBuffer;
|
||||
bitD->limitPtr = bitD->start + sizeof(nuint);
|
||||
if (srcSize >= (nuint)sizeof(nuint))
|
||||
{
|
||||
bitD->ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint);
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bitD->ptr = bitD->start;
|
||||
bitD->bitContainer = *(byte*)bitD->start;
|
||||
switch (srcSize)
|
||||
{
|
||||
case 7:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16;
|
||||
goto case 6;
|
||||
case 6:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24;
|
||||
goto case 5;
|
||||
case 5:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32;
|
||||
goto case 4;
|
||||
case 4:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[3] << 24;
|
||||
goto case 3;
|
||||
case 3:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[2] << 16;
|
||||
goto case 2;
|
||||
case 2:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[1] << 8;
|
||||
goto default;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
}
|
||||
|
||||
bitD->bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8;
|
||||
}
|
||||
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getUpperBits(nuint bitContainer, uint start)
|
||||
{
|
||||
return bitContainer >> (int)start;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getMiddleBits(nuint bitContainer, uint start, uint nbBits)
|
||||
{
|
||||
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
|
||||
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
|
||||
#if NETCOREAPP3_1_OR_GREATER
|
||||
if (Bmi2.X64.IsSupported)
|
||||
{
|
||||
return (nuint)Bmi2.X64.ZeroHighBits(bitContainer >> (int)(start & regMask), nbBits);
|
||||
}
|
||||
|
||||
if (Bmi2.IsSupported)
|
||||
{
|
||||
return Bmi2.ZeroHighBits((uint)(bitContainer >> (int)(start & regMask)), nbBits);
|
||||
}
|
||||
#endif
|
||||
|
||||
return (nuint)(bitContainer >> (int)(start & regMask) & ((ulong)1 << (int)nbBits) - 1);
|
||||
}
|
||||
|
||||
/*! BIT_lookBits() :
|
||||
* Provides next n bits from local register.
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
return BIT_getMiddleBits(
|
||||
bitD->bitContainer,
|
||||
(uint)(sizeof(nuint) * 8) - bitD->bitsConsumed - nbBits,
|
||||
nbBits
|
||||
);
|
||||
}
|
||||
|
||||
/*! BIT_lookBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBitsFast(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
|
||||
assert(nbBits >= 1);
|
||||
return bitD->bitContainer
|
||||
<< (int)(bitD->bitsConsumed & regMask)
|
||||
>> (int)(regMask + 1 - nbBits & regMask);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_skipBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
bitD->bitsConsumed += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value. */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
nuint value = BIT_lookBits(bitD, nbBits);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBitsFast(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
nuint value = BIT_lookBitsFast(bitD, nbBits);
|
||||
assert(nbBits >= 1);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream_internal() :
|
||||
* Simple variant of BIT_reloadDStream(), with two conditions:
|
||||
* 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
|
||||
* 2. look window is valid after shifted down : bitD->ptr >= bitD->start
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD)
|
||||
{
|
||||
assert(bitD->bitsConsumed <= (uint)(sizeof(nuint) * 8));
|
||||
bitD->ptr -= bitD->bitsConsumed >> 3;
|
||||
assert(bitD->ptr >= bitD->start);
|
||||
bitD->bitsConsumed &= 7;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
return BIT_DStream_status.BIT_DStream_unfinished;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStreamFast() :
|
||||
* Similar to BIT_reloadDStream(), but with two differences:
|
||||
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
|
||||
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
|
||||
* point you must use BIT_reloadDStream() to reload.
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->ptr < bitD->limitPtr)
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
return BIT_reloadDStream_internal(bitD);
|
||||
}
|
||||
|
||||
#if NET7_0_OR_GREATER
|
||||
private static ReadOnlySpan<byte> Span_static_zeroFilled =>
|
||||
new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 };
|
||||
private static nuint* static_zeroFilled =>
|
||||
(nuint*)
|
||||
System.Runtime.CompilerServices.Unsafe.AsPointer(
|
||||
ref MemoryMarshal.GetReference(Span_static_zeroFilled)
|
||||
);
|
||||
#else
|
||||
|
||||
private static readonly nuint* static_zeroFilled = (nuint*)GetArrayPointer(
|
||||
new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }
|
||||
);
|
||||
#endif
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not never beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->bitsConsumed > (uint)(sizeof(nuint) * 8))
|
||||
{
|
||||
bitD->ptr = (sbyte*)&static_zeroFilled[0];
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
}
|
||||
|
||||
assert(bitD->ptr >= bitD->start);
|
||||
if (bitD->ptr >= bitD->limitPtr)
|
||||
{
|
||||
return BIT_reloadDStream_internal(bitD);
|
||||
}
|
||||
|
||||
if (bitD->ptr == bitD->start)
|
||||
{
|
||||
if (bitD->bitsConsumed < (uint)(sizeof(nuint) * 8))
|
||||
return BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
return BIT_DStream_status.BIT_DStream_completed;
|
||||
}
|
||||
|
||||
{
|
||||
uint nbBytes = bitD->bitsConsumed >> 3;
|
||||
BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished;
|
||||
if (bitD->ptr - nbBytes < bitD->start)
|
||||
{
|
||||
nbBytes = (uint)(bitD->ptr - bitD->start);
|
||||
result = BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
}
|
||||
|
||||
bitD->ptr -= nbBytes;
|
||||
bitD->bitsConsumed -= nbBytes * 8;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint BIT_endOfDStream(BIT_DStream_t* DStream)
|
||||
{
|
||||
return DStream->ptr == DStream->start && DStream->bitsConsumed == (uint)(sizeof(nuint) * 8)
|
||||
? 1U
|
||||
: 0U;
|
||||
}
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_initDStream(ref BIT_DStream_t bitD, void* srcBuffer, nuint srcSize)
|
||||
{
|
||||
if (srcSize < 1)
|
||||
{
|
||||
bitD = new BIT_DStream_t();
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
bitD.start = (sbyte*)srcBuffer;
|
||||
bitD.limitPtr = bitD.start + sizeof(nuint);
|
||||
if (srcSize >= (nuint)sizeof(nuint))
|
||||
{
|
||||
bitD.ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint);
|
||||
bitD.bitContainer = MEM_readLEST(bitD.ptr);
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bitD.ptr = bitD.start;
|
||||
bitD.bitContainer = *(byte*)bitD.start;
|
||||
switch (srcSize)
|
||||
{
|
||||
case 7:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16;
|
||||
goto case 6;
|
||||
case 6:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24;
|
||||
goto case 5;
|
||||
case 5:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32;
|
||||
goto case 4;
|
||||
case 4:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[3] << 24;
|
||||
goto case 3;
|
||||
case 3:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[2] << 16;
|
||||
goto case 2;
|
||||
case 2:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[1] << 8;
|
||||
goto default;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
}
|
||||
|
||||
bitD.bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8;
|
||||
}
|
||||
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
/*! BIT_lookBits() :
|
||||
* Provides next n bits from local register.
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBits(nuint bitD_bitContainer, uint bitD_bitsConsumed, uint nbBits)
|
||||
{
|
||||
return BIT_getMiddleBits(
|
||||
bitD_bitContainer,
|
||||
(uint)(sizeof(nuint) * 8) - bitD_bitsConsumed - nbBits,
|
||||
nbBits
|
||||
);
|
||||
}
|
||||
|
||||
/*! BIT_lookBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBitsFast(
|
||||
nuint bitD_bitContainer,
|
||||
uint bitD_bitsConsumed,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
|
||||
assert(nbBits >= 1);
|
||||
return bitD_bitContainer
|
||||
<< (int)(bitD_bitsConsumed & regMask)
|
||||
>> (int)(regMask + 1 - nbBits & regMask);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_skipBits(ref uint bitD_bitsConsumed, uint nbBits)
|
||||
{
|
||||
bitD_bitsConsumed += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value. */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBits(
|
||||
nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
nuint value = BIT_lookBits(bitD_bitContainer, bitD_bitsConsumed, nbBits);
|
||||
BIT_skipBits(ref bitD_bitsConsumed, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBitsFast(
|
||||
nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
nuint value = BIT_lookBitsFast(bitD_bitContainer, bitD_bitsConsumed, nbBits);
|
||||
assert(nbBits >= 1);
|
||||
BIT_skipBits(ref bitD_bitsConsumed, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStreamFast() :
|
||||
* Similar to BIT_reloadDStream(), but with two differences:
|
||||
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
|
||||
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
|
||||
* point you must use BIT_reloadDStream() to reload.
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStreamFast(
|
||||
ref nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
ref sbyte* bitD_ptr,
|
||||
sbyte* bitD_start,
|
||||
sbyte* bitD_limitPtr
|
||||
)
|
||||
{
|
||||
if (bitD_ptr < bitD_limitPtr)
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
return BIT_reloadDStream_internal(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start
|
||||
);
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not never beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream(
|
||||
ref nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
ref sbyte* bitD_ptr,
|
||||
sbyte* bitD_start,
|
||||
sbyte* bitD_limitPtr
|
||||
)
|
||||
{
|
||||
if (bitD_bitsConsumed > (uint)(sizeof(nuint) * 8))
|
||||
{
|
||||
bitD_ptr = (sbyte*)&static_zeroFilled[0];
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
}
|
||||
|
||||
assert(bitD_ptr >= bitD_start);
|
||||
if (bitD_ptr >= bitD_limitPtr)
|
||||
{
|
||||
return BIT_reloadDStream_internal(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start
|
||||
);
|
||||
}
|
||||
|
||||
if (bitD_ptr == bitD_start)
|
||||
{
|
||||
if (bitD_bitsConsumed < (uint)(sizeof(nuint) * 8))
|
||||
return BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
return BIT_DStream_status.BIT_DStream_completed;
|
||||
}
|
||||
|
||||
{
|
||||
uint nbBytes = bitD_bitsConsumed >> 3;
|
||||
BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished;
|
||||
if (bitD_ptr - nbBytes < bitD_start)
|
||||
{
|
||||
nbBytes = (uint)(bitD_ptr - bitD_start);
|
||||
result = BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
}
|
||||
|
||||
bitD_ptr -= nbBytes;
|
||||
bitD_bitsConsumed -= nbBytes * 8;
|
||||
bitD_bitContainer = MEM_readLEST(bitD_ptr);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream_internal() :
|
||||
* Simple variant of BIT_reloadDStream(), with two conditions:
|
||||
* 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
|
||||
* 2. look window is valid after shifted down : bitD->ptr >= bitD->start
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream_internal(
|
||||
ref nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
ref sbyte* bitD_ptr,
|
||||
sbyte* bitD_start
|
||||
)
|
||||
{
|
||||
assert(bitD_bitsConsumed <= (uint)(sizeof(nuint) * 8));
|
||||
bitD_ptr -= bitD_bitsConsumed >> 3;
|
||||
assert(bitD_ptr >= bitD_start);
|
||||
bitD_bitsConsumed &= 7;
|
||||
bitD_bitContainer = MEM_readLEST(bitD_ptr);
|
||||
return BIT_DStream_status.BIT_DStream_unfinished;
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint BIT_endOfDStream(
|
||||
uint DStream_bitsConsumed,
|
||||
sbyte* DStream_ptr,
|
||||
sbyte* DStream_start
|
||||
)
|
||||
{
|
||||
return DStream_ptr == DStream_start && DStream_bitsConsumed == (uint)(sizeof(nuint) * 8)
|
||||
? 1U
|
||||
: 0U;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct BlockSummary
|
||||
{
|
||||
public nuint nbSequences;
|
||||
public nuint blockSize;
|
||||
public nuint litSize;
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* COVER_best_t is used for two purposes:
|
||||
* 1. Synchronizing threads.
|
||||
* 2. Saving the best parameters and dictionary.
|
||||
*
|
||||
* All of the methods except COVER_best_init() are thread safe if zstd is
|
||||
* compiled with multithreaded support.
|
||||
*/
|
||||
public unsafe struct COVER_best_s
|
||||
{
|
||||
public void* mutex;
|
||||
public void* cond;
|
||||
public nuint liveJobs;
|
||||
public void* dict;
|
||||
public nuint dictSize;
|
||||
public ZDICT_cover_params_t parameters;
|
||||
public nuint compressedSize;
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-*************************************
|
||||
* Context
|
||||
***************************************/
|
||||
public unsafe struct COVER_ctx_t
|
||||
{
|
||||
public byte* samples;
|
||||
public nuint* offsets;
|
||||
public nuint* samplesSizes;
|
||||
public nuint nbSamples;
|
||||
public nuint nbTrainSamples;
|
||||
public nuint nbTestSamples;
|
||||
public uint* suffix;
|
||||
public nuint suffixSize;
|
||||
public uint* freqs;
|
||||
public uint* dmerAt;
|
||||
public uint d;
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* Struct used for the dictionary selection function.
|
||||
*/
|
||||
public unsafe struct COVER_dictSelection
|
||||
{
|
||||
public byte* dictContent;
|
||||
public nuint dictSize;
|
||||
public nuint totalCompressedSize;
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
*Number of epochs and size of each epoch.
|
||||
*/
|
||||
public struct COVER_epoch_info_t
|
||||
{
|
||||
public uint num;
|
||||
public uint size;
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct COVER_map_pair_t_s
|
||||
{
|
||||
public uint key;
|
||||
public uint value;
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct COVER_map_s
|
||||
{
|
||||
public COVER_map_pair_t_s* data;
|
||||
public uint sizeLog;
|
||||
public uint size;
|
||||
public uint sizeMask;
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* A segment is a range in the source as well as the score of the segment.
|
||||
*/
|
||||
public struct COVER_segment_t
|
||||
{
|
||||
public uint begin;
|
||||
public uint end;
|
||||
public uint score;
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* Parameters for COVER_tryParameters().
|
||||
*/
|
||||
public unsafe struct COVER_tryParameters_data_s
|
||||
{
|
||||
public COVER_ctx_t* ctx;
|
||||
public COVER_best_s* best;
|
||||
public nuint dictBufferCapacity;
|
||||
public ZDICT_cover_params_t parameters;
|
||||
}
|
||||
849
src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs
Normal file
849
src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs
Normal file
@@ -0,0 +1,849 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
private static readonly ZSTD_compressionParameters[][] ZSTD_defaultCParameters =
|
||||
new ZSTD_compressionParameters[4][]
|
||||
{
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 19,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 19,
|
||||
chainLog: 13,
|
||||
hashLog: 14,
|
||||
searchLog: 1,
|
||||
minMatch: 7,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 20,
|
||||
chainLog: 15,
|
||||
hashLog: 16,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 18,
|
||||
hashLog: 18,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 19,
|
||||
hashLog: 20,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 19,
|
||||
hashLog: 20,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 20,
|
||||
hashLog: 21,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 21,
|
||||
hashLog: 22,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 21,
|
||||
hashLog: 22,
|
||||
searchLog: 6,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 23,
|
||||
searchLog: 6,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 22,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 23,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 23,
|
||||
hashLog: 23,
|
||||
searchLog: 6,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 22,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 48,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 23,
|
||||
chainLog: 23,
|
||||
hashLog: 22,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 64,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 23,
|
||||
chainLog: 23,
|
||||
hashLog: 22,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 64,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 23,
|
||||
chainLog: 24,
|
||||
hashLog: 22,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 25,
|
||||
chainLog: 25,
|
||||
hashLog: 23,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 26,
|
||||
chainLog: 26,
|
||||
hashLog: 24,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 27,
|
||||
chainLog: 27,
|
||||
hashLog: 25,
|
||||
searchLog: 9,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 13,
|
||||
hashLog: 14,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 16,
|
||||
hashLog: 16,
|
||||
searchLog: 1,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 17,
|
||||
hashLog: 18,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 7,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 3,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 10,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 12,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 13,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 12,
|
||||
hashLog: 12,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 13,
|
||||
hashLog: 15,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 15,
|
||||
hashLog: 16,
|
||||
searchLog: 2,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 17,
|
||||
hashLog: 17,
|
||||
searchLog: 2,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 6,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 17,
|
||||
hashLog: 17,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 7,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 4,
|
||||
minMatch: 3,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 10,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 5,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 9,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 11,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 15,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 15,
|
||||
searchLog: 1,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 15,
|
||||
searchLog: 2,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 6,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 8,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 9,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 4,
|
||||
minMatch: 3,
|
||||
targetLength: 24,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 5,
|
||||
minMatch: 3,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 64,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 5,
|
||||
minMatch: 3,
|
||||
targetLength: 48,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 9,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 10,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
};
|
||||
}
|
||||
61
src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs
Normal file
61
src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs
Normal file
@@ -0,0 +1,61 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/* @return 1 if @u is a 2^n value, 0 otherwise
|
||||
* useful to check a value is valid for alignment restrictions */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static int ZSTD_isPower2(nuint u)
|
||||
{
|
||||
return (u & u - 1) == 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to perform a wrapped pointer difference without triggering
|
||||
* UBSAN.
|
||||
*
|
||||
* @returns lhs - rhs with wrapping
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nint ZSTD_wrappedPtrDiff(byte* lhs, byte* rhs)
|
||||
{
|
||||
return (nint)(lhs - rhs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to perform a wrapped pointer add without triggering UBSAN.
|
||||
*
|
||||
* @return ptr + add with wrapping
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte* ZSTD_wrappedPtrAdd(byte* ptr, nint add)
|
||||
{
|
||||
return ptr + add;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to perform a wrapped pointer subtraction without triggering
|
||||
* UBSAN.
|
||||
*
|
||||
* @return ptr - sub with wrapping
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte* ZSTD_wrappedPtrSub(byte* ptr, nint sub)
|
||||
{
|
||||
return ptr - sub;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to add to a pointer that works around C's undefined behavior
|
||||
* of adding 0 to NULL.
|
||||
*
|
||||
* @returns `ptr + add` except it defines `NULL + 0 == NULL`.
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte* ZSTD_maybeNullPtrAdd(byte* ptr, nint add)
|
||||
{
|
||||
return add > 0 ? ptr + add : ptr;
|
||||
}
|
||||
}
|
||||
444
src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs
Normal file
444
src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs
Normal file
@@ -0,0 +1,444 @@
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
private static int g_displayLevel = 0;
|
||||
|
||||
/**
|
||||
* Returns the sum of the sample sizes.
|
||||
*/
|
||||
private static nuint COVER_sum(nuint* samplesSizes, uint nbSamples)
|
||||
{
|
||||
nuint sum = 0;
|
||||
uint i;
|
||||
for (i = 0; i < nbSamples; ++i)
|
||||
{
|
||||
sum += samplesSizes[i];
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
/**
|
||||
* Warns the user when their corpus is too small.
|
||||
*/
|
||||
private static void COVER_warnOnSmallCorpus(nuint maxDictSize, nuint nbDmers, int displayLevel)
|
||||
{
|
||||
double ratio = nbDmers / (double)maxDictSize;
|
||||
if (ratio >= 10)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the number of epochs and the size of each epoch.
|
||||
* We will make sure that each epoch gets at least 10 * k bytes.
|
||||
*
|
||||
* The COVER algorithms divide the data up into epochs of equal size and
|
||||
* select one segment from each epoch.
|
||||
*
|
||||
* @param maxDictSize The maximum allowed dictionary size.
|
||||
* @param nbDmers The number of dmers we are training on.
|
||||
* @param k The parameter k (segment size).
|
||||
* @param passes The target number of passes over the dmer corpus.
|
||||
* More passes means a better dictionary.
|
||||
*/
|
||||
private static COVER_epoch_info_t COVER_computeEpochs(
|
||||
uint maxDictSize,
|
||||
uint nbDmers,
|
||||
uint k,
|
||||
uint passes
|
||||
)
|
||||
{
|
||||
uint minEpochSize = k * 10;
|
||||
COVER_epoch_info_t epochs;
|
||||
epochs.num = 1 > maxDictSize / k / passes ? 1 : maxDictSize / k / passes;
|
||||
epochs.size = nbDmers / epochs.num;
|
||||
if (epochs.size >= minEpochSize)
|
||||
{
|
||||
assert(epochs.size * epochs.num <= nbDmers);
|
||||
return epochs;
|
||||
}
|
||||
|
||||
epochs.size = minEpochSize < nbDmers ? minEpochSize : nbDmers;
|
||||
epochs.num = nbDmers / epochs.size;
|
||||
assert(epochs.size * epochs.num <= nbDmers);
|
||||
return epochs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks total compressed size of a dictionary
|
||||
*/
|
||||
private static nuint COVER_checkTotalCompressedSize(
|
||||
ZDICT_cover_params_t parameters,
|
||||
nuint* samplesSizes,
|
||||
byte* samples,
|
||||
nuint* offsets,
|
||||
nuint nbTrainSamples,
|
||||
nuint nbSamples,
|
||||
byte* dict,
|
||||
nuint dictBufferCapacity
|
||||
)
|
||||
{
|
||||
nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
/* Pointers */
|
||||
ZSTD_CCtx_s* cctx;
|
||||
ZSTD_CDict_s* cdict;
|
||||
void* dst;
|
||||
/* Local variables */
|
||||
nuint dstCapacity;
|
||||
nuint i;
|
||||
{
|
||||
nuint maxSampleSize = 0;
|
||||
i = parameters.splitPoint < 1 ? nbTrainSamples : 0;
|
||||
for (; i < nbSamples; ++i)
|
||||
{
|
||||
maxSampleSize = samplesSizes[i] > maxSampleSize ? samplesSizes[i] : maxSampleSize;
|
||||
}
|
||||
|
||||
dstCapacity = ZSTD_compressBound(maxSampleSize);
|
||||
dst = malloc(dstCapacity);
|
||||
}
|
||||
|
||||
cctx = ZSTD_createCCtx();
|
||||
cdict = ZSTD_createCDict(dict, dictBufferCapacity, parameters.zParams.compressionLevel);
|
||||
if (dst == null || cctx == null || cdict == null)
|
||||
{
|
||||
goto _compressCleanup;
|
||||
}
|
||||
|
||||
totalCompressedSize = dictBufferCapacity;
|
||||
i = parameters.splitPoint < 1 ? nbTrainSamples : 0;
|
||||
for (; i < nbSamples; ++i)
|
||||
{
|
||||
nuint size = ZSTD_compress_usingCDict(
|
||||
cctx,
|
||||
dst,
|
||||
dstCapacity,
|
||||
samples + offsets[i],
|
||||
samplesSizes[i],
|
||||
cdict
|
||||
);
|
||||
if (ERR_isError(size))
|
||||
{
|
||||
totalCompressedSize = size;
|
||||
goto _compressCleanup;
|
||||
}
|
||||
|
||||
totalCompressedSize += size;
|
||||
}
|
||||
|
||||
_compressCleanup:
|
||||
ZSTD_freeCCtx(cctx);
|
||||
ZSTD_freeCDict(cdict);
|
||||
if (dst != null)
|
||||
{
|
||||
free(dst);
|
||||
}
|
||||
|
||||
return totalCompressedSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the `COVER_best_t`.
|
||||
*/
|
||||
private static void COVER_best_init(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
return;
|
||||
SynchronizationWrapper.Init(&best->mutex);
|
||||
best->liveJobs = 0;
|
||||
best->dict = null;
|
||||
best->dictSize = 0;
|
||||
best->compressedSize = unchecked((nuint)(-1));
|
||||
best->parameters = new ZDICT_cover_params_t();
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait until liveJobs == 0.
|
||||
*/
|
||||
private static void COVER_best_wait(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Enter(&best->mutex);
|
||||
while (best->liveJobs != 0)
|
||||
{
|
||||
SynchronizationWrapper.Wait(&best->mutex);
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Call COVER_best_wait() and then destroy the COVER_best_t.
|
||||
*/
|
||||
private static void COVER_best_destroy(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
COVER_best_wait(best);
|
||||
if (best->dict != null)
|
||||
{
|
||||
free(best->dict);
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Free(&best->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a thread is about to be launched.
|
||||
* Increments liveJobs.
|
||||
*/
|
||||
private static void COVER_best_start(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Enter(&best->mutex);
|
||||
++best->liveJobs;
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a thread finishes executing, both on error or success.
|
||||
* Decrements liveJobs and signals any waiting threads if liveJobs == 0.
|
||||
* If this dictionary is the best so far save it and its parameters.
|
||||
*/
|
||||
private static void COVER_best_finish(
|
||||
COVER_best_s* best,
|
||||
ZDICT_cover_params_t parameters,
|
||||
COVER_dictSelection selection
|
||||
)
|
||||
{
|
||||
void* dict = selection.dictContent;
|
||||
nuint compressedSize = selection.totalCompressedSize;
|
||||
nuint dictSize = selection.dictSize;
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
nuint liveJobs;
|
||||
SynchronizationWrapper.Enter(&best->mutex);
|
||||
--best->liveJobs;
|
||||
liveJobs = best->liveJobs;
|
||||
if (compressedSize < best->compressedSize)
|
||||
{
|
||||
if (best->dict == null || best->dictSize < dictSize)
|
||||
{
|
||||
if (best->dict != null)
|
||||
{
|
||||
free(best->dict);
|
||||
}
|
||||
|
||||
best->dict = malloc(dictSize);
|
||||
if (best->dict == null)
|
||||
{
|
||||
best->compressedSize = unchecked(
|
||||
(nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)
|
||||
);
|
||||
best->dictSize = 0;
|
||||
SynchronizationWrapper.Pulse(&best->mutex);
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (dict != null)
|
||||
{
|
||||
memcpy(best->dict, dict, (uint)dictSize);
|
||||
best->dictSize = dictSize;
|
||||
best->parameters = parameters;
|
||||
best->compressedSize = compressedSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (liveJobs == 0)
|
||||
{
|
||||
SynchronizationWrapper.PulseAll(&best->mutex);
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
private static COVER_dictSelection setDictSelection(byte* buf, nuint s, nuint csz)
|
||||
{
|
||||
COVER_dictSelection ds;
|
||||
ds.dictContent = buf;
|
||||
ds.dictSize = s;
|
||||
ds.totalCompressedSize = csz;
|
||||
return ds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Error function for COVER_selectDict function. Returns a struct where
|
||||
* return.totalCompressedSize is a ZSTD error.
|
||||
*/
|
||||
private static COVER_dictSelection COVER_dictSelectionError(nuint error)
|
||||
{
|
||||
return setDictSelection(null, 0, error);
|
||||
}
|
||||
|
||||
/**
|
||||
* Error function for COVER_selectDict function. Checks if the return
|
||||
* value is an error.
|
||||
*/
|
||||
private static uint COVER_dictSelectionIsError(COVER_dictSelection selection)
|
||||
{
|
||||
return ERR_isError(selection.totalCompressedSize) || selection.dictContent == null
|
||||
? 1U
|
||||
: 0U;
|
||||
}
|
||||
|
||||
/**
|
||||
* Always call after selectDict is called to free up used memory from
|
||||
* newly created dictionary.
|
||||
*/
|
||||
private static void COVER_dictSelectionFree(COVER_dictSelection selection)
|
||||
{
|
||||
free(selection.dictContent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called to finalize the dictionary and select one based on whether or not
|
||||
* the shrink-dict flag was enabled. If enabled the dictionary used is the
|
||||
* smallest dictionary within a specified regression of the compressed size
|
||||
* from the largest dictionary.
|
||||
*/
|
||||
private static COVER_dictSelection COVER_selectDict(
|
||||
byte* customDictContent,
|
||||
nuint dictBufferCapacity,
|
||||
nuint dictContentSize,
|
||||
byte* samplesBuffer,
|
||||
nuint* samplesSizes,
|
||||
uint nbFinalizeSamples,
|
||||
nuint nbCheckSamples,
|
||||
nuint nbSamples,
|
||||
ZDICT_cover_params_t @params,
|
||||
nuint* offsets,
|
||||
nuint totalCompressedSize
|
||||
)
|
||||
{
|
||||
nuint largestDict = 0;
|
||||
nuint largestCompressed = 0;
|
||||
byte* customDictContentEnd = customDictContent + dictContentSize;
|
||||
byte* largestDictbuffer = (byte*)malloc(dictBufferCapacity);
|
||||
byte* candidateDictBuffer = (byte*)malloc(dictBufferCapacity);
|
||||
double regressionTolerance = (double)@params.shrinkDictMaxRegression / 100 + 1;
|
||||
if (largestDictbuffer == null || candidateDictBuffer == null)
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
memcpy(largestDictbuffer, customDictContent, (uint)dictContentSize);
|
||||
dictContentSize = ZDICT_finalizeDictionary(
|
||||
largestDictbuffer,
|
||||
dictBufferCapacity,
|
||||
customDictContent,
|
||||
dictContentSize,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbFinalizeSamples,
|
||||
@params.zParams
|
||||
);
|
||||
if (ZDICT_isError(dictContentSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
totalCompressedSize = COVER_checkTotalCompressedSize(
|
||||
@params,
|
||||
samplesSizes,
|
||||
samplesBuffer,
|
||||
offsets,
|
||||
nbCheckSamples,
|
||||
nbSamples,
|
||||
largestDictbuffer,
|
||||
dictContentSize
|
||||
);
|
||||
if (ERR_isError(totalCompressedSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(totalCompressedSize);
|
||||
}
|
||||
|
||||
if (@params.shrinkDict == 0)
|
||||
{
|
||||
free(candidateDictBuffer);
|
||||
return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize);
|
||||
}
|
||||
|
||||
largestDict = dictContentSize;
|
||||
largestCompressed = totalCompressedSize;
|
||||
dictContentSize = 256;
|
||||
while (dictContentSize < largestDict)
|
||||
{
|
||||
memcpy(candidateDictBuffer, largestDictbuffer, (uint)largestDict);
|
||||
dictContentSize = ZDICT_finalizeDictionary(
|
||||
candidateDictBuffer,
|
||||
dictBufferCapacity,
|
||||
customDictContentEnd - dictContentSize,
|
||||
dictContentSize,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbFinalizeSamples,
|
||||
@params.zParams
|
||||
);
|
||||
if (ZDICT_isError(dictContentSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
totalCompressedSize = COVER_checkTotalCompressedSize(
|
||||
@params,
|
||||
samplesSizes,
|
||||
samplesBuffer,
|
||||
offsets,
|
||||
nbCheckSamples,
|
||||
nbSamples,
|
||||
candidateDictBuffer,
|
||||
dictContentSize
|
||||
);
|
||||
if (ERR_isError(totalCompressedSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(totalCompressedSize);
|
||||
}
|
||||
|
||||
if (totalCompressedSize <= largestCompressed * regressionTolerance)
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
return setDictSelection(candidateDictBuffer, dictContentSize, totalCompressedSize);
|
||||
}
|
||||
|
||||
dictContentSize *= 2;
|
||||
}
|
||||
|
||||
dictContentSize = largestDict;
|
||||
totalCompressedSize = largestCompressed;
|
||||
free(candidateDictBuffer);
|
||||
return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize);
|
||||
}
|
||||
}
|
||||
12
src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs
Normal file
12
src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs
Normal file
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-***************************/
|
||||
/* generic DTableDesc */
|
||||
/*-***************************/
|
||||
public struct DTableDesc
|
||||
{
|
||||
public byte maxTableLog;
|
||||
public byte tableType;
|
||||
public byte tableLog;
|
||||
public byte reserved;
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct EStats_ress_t
|
||||
{
|
||||
/* dictionary */
|
||||
public ZSTD_CDict_s* dict;
|
||||
|
||||
/* working context */
|
||||
public ZSTD_CCtx_s* zc;
|
||||
|
||||
/* must be ZSTD_BLOCKSIZE_MAX allocated */
|
||||
public void* workPlace;
|
||||
}
|
||||
447
src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs
Normal file
447
src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs
Normal file
@@ -0,0 +1,447 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/*=== Version ===*/
|
||||
private static uint FSE_versionNumber()
|
||||
{
|
||||
return 0 * 100 * 100 + 9 * 100 + 0;
|
||||
}
|
||||
|
||||
/*=== Error Management ===*/
|
||||
private static bool FSE_isError(nuint code)
|
||||
{
|
||||
return ERR_isError(code);
|
||||
}
|
||||
|
||||
private static string FSE_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorName(code);
|
||||
}
|
||||
|
||||
/* Error Management */
|
||||
private static bool HUF_isError(nuint code)
|
||||
{
|
||||
return ERR_isError(code);
|
||||
}
|
||||
|
||||
private static string HUF_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorName(code);
|
||||
}
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding-decoding
|
||||
****************************************************************/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint FSE_readNCount_body(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize
|
||||
)
|
||||
{
|
||||
byte* istart = (byte*)headerBuffer;
|
||||
byte* iend = istart + hbSize;
|
||||
byte* ip = istart;
|
||||
int nbBits;
|
||||
int remaining;
|
||||
int threshold;
|
||||
uint bitStream;
|
||||
int bitCount;
|
||||
uint charnum = 0;
|
||||
uint maxSV1 = *maxSVPtr + 1;
|
||||
int previous0 = 0;
|
||||
if (hbSize < 8)
|
||||
{
|
||||
sbyte* buffer = stackalloc sbyte[8];
|
||||
/* This function only works when hbSize >= 8 */
|
||||
memset(buffer, 0, sizeof(sbyte) * 8);
|
||||
memcpy(buffer, headerBuffer, (uint)hbSize);
|
||||
{
|
||||
nuint countSize = FSE_readNCount(
|
||||
normalizedCounter,
|
||||
maxSVPtr,
|
||||
tableLogPtr,
|
||||
buffer,
|
||||
sizeof(sbyte) * 8
|
||||
);
|
||||
if (FSE_isError(countSize))
|
||||
return countSize;
|
||||
if (countSize > hbSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
return countSize;
|
||||
}
|
||||
}
|
||||
|
||||
assert(hbSize >= 8);
|
||||
memset(normalizedCounter, 0, (*maxSVPtr + 1) * sizeof(short));
|
||||
bitStream = MEM_readLE32(ip);
|
||||
nbBits = (int)((bitStream & 0xF) + 5);
|
||||
if (nbBits > 15)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
|
||||
bitStream >>= 4;
|
||||
bitCount = 4;
|
||||
*tableLogPtr = (uint)nbBits;
|
||||
remaining = (1 << nbBits) + 1;
|
||||
threshold = 1 << nbBits;
|
||||
nbBits++;
|
||||
for (; ; )
|
||||
{
|
||||
if (previous0 != 0)
|
||||
{
|
||||
/* Count the number of repeats. Each time the
|
||||
* 2-bit repeat code is 0b11 there is another
|
||||
* repeat.
|
||||
* Avoid UB by setting the high bit to 1.
|
||||
*/
|
||||
int repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1);
|
||||
while (repeats >= 12)
|
||||
{
|
||||
charnum += 3 * 12;
|
||||
if (ip <= iend - 7)
|
||||
{
|
||||
ip += 3;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 7 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1);
|
||||
}
|
||||
|
||||
charnum += (uint)(3 * repeats);
|
||||
bitStream >>= 2 * repeats;
|
||||
bitCount += 2 * repeats;
|
||||
assert((bitStream & 3) < 3);
|
||||
charnum += bitStream & 3;
|
||||
bitCount += 2;
|
||||
if (charnum >= maxSV1)
|
||||
break;
|
||||
if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4)
|
||||
{
|
||||
assert(bitCount >> 3 <= 3);
|
||||
ip += bitCount >> 3;
|
||||
bitCount &= 7;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
}
|
||||
|
||||
{
|
||||
int max = 2 * threshold - 1 - remaining;
|
||||
int count;
|
||||
if ((bitStream & (uint)(threshold - 1)) < (uint)max)
|
||||
{
|
||||
count = (int)(bitStream & (uint)(threshold - 1));
|
||||
bitCount += nbBits - 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
count = (int)(bitStream & (uint)(2 * threshold - 1));
|
||||
if (count >= threshold)
|
||||
count -= max;
|
||||
bitCount += nbBits;
|
||||
}
|
||||
|
||||
count--;
|
||||
if (count >= 0)
|
||||
{
|
||||
remaining -= count;
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(count == -1);
|
||||
remaining += count;
|
||||
}
|
||||
|
||||
normalizedCounter[charnum++] = (short)count;
|
||||
previous0 = count == 0 ? 1 : 0;
|
||||
assert(threshold > 1);
|
||||
if (remaining < threshold)
|
||||
{
|
||||
if (remaining <= 1)
|
||||
break;
|
||||
nbBits = (int)(ZSTD_highbit32((uint)remaining) + 1);
|
||||
threshold = 1 << nbBits - 1;
|
||||
}
|
||||
|
||||
if (charnum >= maxSV1)
|
||||
break;
|
||||
if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4)
|
||||
{
|
||||
ip += bitCount >> 3;
|
||||
bitCount &= 7;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
}
|
||||
}
|
||||
|
||||
if (remaining != 1)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
if (charnum > maxSV1)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall));
|
||||
if (bitCount > 32)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
*maxSVPtr = charnum - 1;
|
||||
ip += bitCount + 7 >> 3;
|
||||
return (nuint)(ip - istart);
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
private static nuint FSE_readNCount_body_default(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize
|
||||
)
|
||||
{
|
||||
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
|
||||
/*! FSE_readNCount_bmi2():
|
||||
* Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
|
||||
*/
|
||||
private static nuint FSE_readNCount_bmi2(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize,
|
||||
int bmi2
|
||||
)
|
||||
{
|
||||
return FSE_readNCount_body_default(
|
||||
normalizedCounter,
|
||||
maxSVPtr,
|
||||
tableLogPtr,
|
||||
headerBuffer,
|
||||
hbSize
|
||||
);
|
||||
}
|
||||
|
||||
/*! FSE_readNCount():
|
||||
Read compactly saved 'normalizedCounter' from 'rBuffer'.
|
||||
@return : size read from 'rBuffer',
|
||||
or an errorCode, which can be tested using FSE_isError().
|
||||
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
|
||||
private static nuint FSE_readNCount(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize
|
||||
)
|
||||
{
|
||||
return FSE_readNCount_bmi2(
|
||||
normalizedCounter,
|
||||
maxSVPtr,
|
||||
tableLogPtr,
|
||||
headerBuffer,
|
||||
hbSize,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
|
||||
@return : size read from `src` , or an error Code .
|
||||
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
|
||||
*/
|
||||
private static nuint HUF_readStats(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize
|
||||
)
|
||||
{
|
||||
uint* wksp = stackalloc uint[219];
|
||||
return HUF_readStats_wksp(
|
||||
huffWeight,
|
||||
hwSize,
|
||||
rankStats,
|
||||
nbSymbolsPtr,
|
||||
tableLogPtr,
|
||||
src,
|
||||
srcSize,
|
||||
wksp,
|
||||
sizeof(uint) * 219,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint HUF_readStats_body(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize,
|
||||
void* workSpace,
|
||||
nuint wkspSize,
|
||||
int bmi2
|
||||
)
|
||||
{
|
||||
uint weightTotal;
|
||||
byte* ip = (byte*)src;
|
||||
nuint iSize;
|
||||
nuint oSize;
|
||||
if (srcSize == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
iSize = ip[0];
|
||||
if (iSize >= 128)
|
||||
{
|
||||
oSize = iSize - 127;
|
||||
iSize = (oSize + 1) / 2;
|
||||
if (iSize + 1 > srcSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
if (oSize >= hwSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
ip += 1;
|
||||
{
|
||||
uint n;
|
||||
for (n = 0; n < oSize; n += 2)
|
||||
{
|
||||
huffWeight[n] = (byte)(ip[n / 2] >> 4);
|
||||
huffWeight[n + 1] = (byte)(ip[n / 2] & 15);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (iSize + 1 > srcSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
oSize = FSE_decompress_wksp_bmi2(
|
||||
huffWeight,
|
||||
hwSize - 1,
|
||||
ip + 1,
|
||||
iSize,
|
||||
6,
|
||||
workSpace,
|
||||
wkspSize,
|
||||
bmi2
|
||||
);
|
||||
if (FSE_isError(oSize))
|
||||
return oSize;
|
||||
}
|
||||
|
||||
memset(rankStats, 0, (12 + 1) * sizeof(uint));
|
||||
weightTotal = 0;
|
||||
{
|
||||
uint n;
|
||||
for (n = 0; n < oSize; n++)
|
||||
{
|
||||
if (huffWeight[n] > 12)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
rankStats[huffWeight[n]]++;
|
||||
weightTotal += (uint)(1 << huffWeight[n] >> 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (weightTotal == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
{
|
||||
uint tableLog = ZSTD_highbit32(weightTotal) + 1;
|
||||
if (tableLog > 12)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
*tableLogPtr = tableLog;
|
||||
{
|
||||
uint total = (uint)(1 << (int)tableLog);
|
||||
uint rest = total - weightTotal;
|
||||
uint verif = (uint)(1 << (int)ZSTD_highbit32(rest));
|
||||
uint lastWeight = ZSTD_highbit32(rest) + 1;
|
||||
if (verif != rest)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
huffWeight[oSize] = (byte)lastWeight;
|
||||
rankStats[lastWeight]++;
|
||||
}
|
||||
}
|
||||
|
||||
if (rankStats[1] < 2 || (rankStats[1] & 1) != 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
*nbSymbolsPtr = (uint)(oSize + 1);
|
||||
return iSize + 1;
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
private static nuint HUF_readStats_body_default(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize,
|
||||
void* workSpace,
|
||||
nuint wkspSize
|
||||
)
|
||||
{
|
||||
return HUF_readStats_body(
|
||||
huffWeight,
|
||||
hwSize,
|
||||
rankStats,
|
||||
nbSymbolsPtr,
|
||||
tableLogPtr,
|
||||
src,
|
||||
srcSize,
|
||||
workSpace,
|
||||
wkspSize,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
private static nuint HUF_readStats_wksp(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize,
|
||||
void* workSpace,
|
||||
nuint wkspSize,
|
||||
int flags
|
||||
)
|
||||
{
|
||||
return HUF_readStats_body_default(
|
||||
huffWeight,
|
||||
hwSize,
|
||||
rankStats,
|
||||
nbSymbolsPtr,
|
||||
tableLogPtr,
|
||||
src,
|
||||
srcSize,
|
||||
workSpace,
|
||||
wkspSize
|
||||
);
|
||||
}
|
||||
}
|
||||
110
src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs
Normal file
110
src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs
Normal file
@@ -0,0 +1,110 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static bool ERR_isError(nuint code)
|
||||
{
|
||||
return code > unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode));
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static ZSTD_ErrorCode ERR_getErrorCode(nuint code)
|
||||
{
|
||||
if (!ERR_isError(code))
|
||||
return 0;
|
||||
return (ZSTD_ErrorCode)(0 - code);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static string ERR_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorString(ERR_getErrorCode(code));
|
||||
}
|
||||
|
||||
/*-****************************************
|
||||
* Error Strings
|
||||
******************************************/
|
||||
private static string ERR_getErrorString(ZSTD_ErrorCode code)
|
||||
{
|
||||
const string notErrorCode = "Unspecified error code";
|
||||
switch (code)
|
||||
{
|
||||
case ZSTD_ErrorCode.ZSTD_error_no_error:
|
||||
return "No error detected";
|
||||
case ZSTD_ErrorCode.ZSTD_error_GENERIC:
|
||||
return "Error (generic)";
|
||||
case ZSTD_ErrorCode.ZSTD_error_prefix_unknown:
|
||||
return "Unknown frame descriptor";
|
||||
case ZSTD_ErrorCode.ZSTD_error_version_unsupported:
|
||||
return "Version not supported";
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported:
|
||||
return "Unsupported frame parameter";
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge:
|
||||
return "Frame requires too much memory for decoding";
|
||||
case ZSTD_ErrorCode.ZSTD_error_corruption_detected:
|
||||
return "Data corruption detected";
|
||||
case ZSTD_ErrorCode.ZSTD_error_checksum_wrong:
|
||||
return "Restored data doesn't match checksum";
|
||||
case ZSTD_ErrorCode.ZSTD_error_literals_headerWrong:
|
||||
return "Header of Literals' block doesn't respect format specification";
|
||||
case ZSTD_ErrorCode.ZSTD_error_parameter_unsupported:
|
||||
return "Unsupported parameter";
|
||||
case ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported:
|
||||
return "Unsupported combination of parameters";
|
||||
case ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound:
|
||||
return "Parameter is out of bound";
|
||||
case ZSTD_ErrorCode.ZSTD_error_init_missing:
|
||||
return "Context should be init first";
|
||||
case ZSTD_ErrorCode.ZSTD_error_memory_allocation:
|
||||
return "Allocation error : not enough memory";
|
||||
case ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall:
|
||||
return "workSpace buffer is not large enough";
|
||||
case ZSTD_ErrorCode.ZSTD_error_stage_wrong:
|
||||
return "Operation not authorized at current processing stage";
|
||||
case ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge:
|
||||
return "tableLog requires too much memory : unsupported";
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge:
|
||||
return "Unsupported max Symbol Value : too large";
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall:
|
||||
return "Specified maxSymbolValue is too small";
|
||||
case ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock:
|
||||
return "This mode cannot generate an uncompressed block";
|
||||
case ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected:
|
||||
return "pledged buffer stability condition is not respected";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted:
|
||||
return "Dictionary is corrupted";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionary_wrong:
|
||||
return "Dictionary mismatch";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed:
|
||||
return "Cannot create Dictionary from provided samples";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall:
|
||||
return "Destination buffer is too small";
|
||||
case ZSTD_ErrorCode.ZSTD_error_srcSize_wrong:
|
||||
return "Src size is incorrect";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstBuffer_null:
|
||||
return "Operation on NULL destination buffer";
|
||||
case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull:
|
||||
return "Operation made no progress over multiple calls, due to output buffer being full";
|
||||
case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty:
|
||||
return "Operation made no progress over multiple calls, due to input being empty";
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameIndex_tooLarge:
|
||||
return "Frame index is too large";
|
||||
case ZSTD_ErrorCode.ZSTD_error_seekableIO:
|
||||
return "An I/O error occurred when reading/seeking";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong:
|
||||
return "Destination buffer is wrong";
|
||||
case ZSTD_ErrorCode.ZSTD_error_srcBuffer_wrong:
|
||||
return "Source buffer is wrong";
|
||||
case ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed:
|
||||
return "Block-level external sequence producer returned an error code";
|
||||
case ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid:
|
||||
return "External sequences are not valid";
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxCode:
|
||||
default:
|
||||
return notErrorCode;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct EstimatedBlockSize
|
||||
{
|
||||
public nuint estLitSize;
|
||||
public nuint estBlockSize;
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-*************************************
|
||||
* Acceleration
|
||||
***************************************/
|
||||
public struct FASTCOVER_accel_t
|
||||
{
|
||||
/* Percentage of training samples used for ZDICT_finalizeDictionary */
|
||||
public uint finalize;
|
||||
|
||||
/* Number of dmer skipped between each dmer counted in computeFrequency */
|
||||
public uint skip;
|
||||
|
||||
public FASTCOVER_accel_t(uint finalize, uint skip)
|
||||
{
|
||||
this.finalize = finalize;
|
||||
this.skip = skip;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-*************************************
|
||||
* Context
|
||||
***************************************/
|
||||
public unsafe struct FASTCOVER_ctx_t
|
||||
{
|
||||
public byte* samples;
|
||||
public nuint* offsets;
|
||||
public nuint* samplesSizes;
|
||||
public nuint nbSamples;
|
||||
public nuint nbTrainSamples;
|
||||
public nuint nbTestSamples;
|
||||
public nuint nbDmers;
|
||||
public uint* freqs;
|
||||
public uint d;
|
||||
public uint f;
|
||||
public FASTCOVER_accel_t accelParams;
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* Parameters for FASTCOVER_tryParameters().
|
||||
*/
|
||||
public unsafe struct FASTCOVER_tryParameters_data_s
|
||||
{
|
||||
public FASTCOVER_ctx_t* ctx;
|
||||
public COVER_best_s* best;
|
||||
public nuint dictBufferCapacity;
|
||||
public ZDICT_cover_params_t parameters;
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct FPStats
|
||||
{
|
||||
public Fingerprint pastEvents;
|
||||
public Fingerprint newEvents;
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol compression API
|
||||
*******************************************/
|
||||
/*!
|
||||
This API consists of small unitary functions, which highly benefit from being inlined.
|
||||
Hence their body are included in next section.
|
||||
*/
|
||||
public unsafe struct FSE_CState_t
|
||||
{
|
||||
public nint value;
|
||||
public void* stateTable;
|
||||
public void* symbolTT;
|
||||
public uint stateLog;
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol decompression API
|
||||
*******************************************/
|
||||
public unsafe struct FSE_DState_t
|
||||
{
|
||||
public nuint state;
|
||||
|
||||
/* precise table may vary, depending on U16 */
|
||||
public void* table;
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* ====== Decompression ====== */
|
||||
public struct FSE_DTableHeader
|
||||
{
|
||||
public ushort tableLog;
|
||||
public ushort fastMode;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user