mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-09 21:24:08 +00:00
Compare commits
7 Commits
adam/enabl
...
adam/zstd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b010cce1ca | ||
|
|
ee2cbc8051 | ||
|
|
906baf18d2 | ||
|
|
0a7ffd003b | ||
|
|
b545973c55 | ||
|
|
999af800af | ||
|
|
5b5336f456 |
13
.github/COPILOT_AGENT_README.md
vendored
13
.github/COPILOT_AGENT_README.md
vendored
@@ -1,13 +0,0 @@
|
||||
# Copilot Coding Agent Configuration
|
||||
|
||||
This repository includes a minimal opt-in configuration and CI workflow to allow the GitHub Copilot coding agent to open and validate PRs.
|
||||
|
||||
- .copilot-agent.yml: opt-in config for automated agents
|
||||
- .github/workflows/dotnetcore.yml: CI runs on PRs touching the solution, source, or tests to validate changes
|
||||
- AGENTS.yml: general information for this project
|
||||
|
||||
Maintainers can adjust the allowed paths or disable the agent by editing or removing .copilot-agent.yml.
|
||||
|
||||
Notes:
|
||||
- Do not change any other files in the repository.
|
||||
- If build/test paths are different, update the workflow accordingly; this workflow targets SharpCompress.sln and the SharpCompress.Tests test project.
|
||||
7
.github/agents/copilot-agent.yml
vendored
7
.github/agents/copilot-agent.yml
vendored
@@ -1,7 +0,0 @@
|
||||
enabled: true
|
||||
agent:
|
||||
name: copilot-coding-agent
|
||||
allow:
|
||||
- paths: ["src/**/*", "tests/**/*", "README.md", "AGENTS.md"]
|
||||
actions: ["create", "modify"]
|
||||
require_review_before_merge: true
|
||||
4
.github/workflows/dotnetcore.yml
vendored
4
.github/workflows/dotnetcore.yml
vendored
@@ -14,8 +14,8 @@ jobs:
|
||||
os: [windows-latest, ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-dotnet@v5
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-dotnet@v4
|
||||
with:
|
||||
dotnet-version: 8.0.x
|
||||
- run: dotnet run --project build/build.csproj
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
<Project>
|
||||
<ItemGroup>
|
||||
<PackageVersion Include="Bullseye" Version="6.0.0" />
|
||||
<PackageVersion Include="AwesomeAssertions" Version="9.2.1" />
|
||||
<PackageVersion Include="AwesomeAssertions" Version="9.2.0" />
|
||||
<PackageVersion Include="Glob" Version="1.1.9" />
|
||||
<PackageVersion Include="JetBrains.Profiler.SelfApi" Version="2.5.14" />
|
||||
<PackageVersion Include="Microsoft.Bcl.AsyncInterfaces" Version="8.0.0" />
|
||||
<PackageVersion Include="Microsoft.NET.Test.Sdk" Version="18.0.0" />
|
||||
<PackageVersion Include="Microsoft.NET.Test.Sdk" Version="17.13.0" />
|
||||
<PackageVersion Include="Mono.Posix.NETStandard" Version="1.0.0" />
|
||||
<PackageVersion Include="SimpleExec" Version="12.0.0" />
|
||||
<PackageVersion Include="System.Buffers" Version="4.6.1" />
|
||||
@@ -13,6 +12,7 @@
|
||||
<PackageVersion Include="System.Text.Encoding.CodePages" Version="8.0.0" />
|
||||
<PackageVersion Include="xunit" Version="2.9.3" />
|
||||
<PackageVersion Include="xunit.runner.visualstudio" Version="3.1.5" />
|
||||
<PackageVersion Include="xunit.SkippableFact" Version="1.5.23" />
|
||||
<PackageVersion Include="ZstdSharp.Port" Version="0.8.6" />
|
||||
<PackageVersion Include="Microsoft.SourceLink.GitHub" Version="8.0.0" />
|
||||
<PackageVersion Include="Microsoft.NETFramework.ReferenceAssemblies" Version="1.0.3" />
|
||||
|
||||
@@ -23,8 +23,6 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Config", "Config", "{CDB425
|
||||
.github\workflows\dotnetcore.yml = .github\workflows\dotnetcore.yml
|
||||
EndProjectSection
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SharpCompress.Performance", "tests\SharpCompress.Performance\SharpCompress.Performance.csproj", "{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17}"
|
||||
EndProject
|
||||
Global
|
||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
||||
Debug|Any CPU = Debug|Any CPU
|
||||
@@ -43,10 +41,6 @@ Global
|
||||
{D4D613CB-5E94-47FB-85BE-B8423D20C545}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{D4D613CB-5E94-47FB-85BE-B8423D20C545}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{D4D613CB-5E94-47FB-85BE-B8423D20C545}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
EndGlobalSection
|
||||
GlobalSection(SolutionProperties) = preSolution
|
||||
HideSolutionNode = FALSE
|
||||
@@ -54,6 +48,5 @@ Global
|
||||
GlobalSection(NestedProjects) = preSolution
|
||||
{FD19DDD8-72B2-4024-8665-0D1F7A2AA998} = {3C5BE746-03E5-4895-9988-0B57F162F86C}
|
||||
{F2B1A1EB-0FA6-40D0-8908-E13247C7226F} = {0F0901FF-E8D9-426A-B5A2-17C7F47C1529}
|
||||
{5BDE6DBC-9E5F-4E21-AB71-F138A3E72B17} = {0F0901FF-E8D9-426A-B5A2-17C7F47C1529}
|
||||
EndGlobalSection
|
||||
EndGlobal
|
||||
|
||||
@@ -144,12 +144,6 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
/// <returns></returns>
|
||||
public IReader ExtractAllEntries()
|
||||
{
|
||||
if (!IsSolid && Type != ArchiveType.SevenZip)
|
||||
{
|
||||
throw new InvalidOperationException(
|
||||
"ExtractAllEntries can only be used on solid archives or 7Zip archives (which require random access)."
|
||||
);
|
||||
}
|
||||
((IArchiveExtractionListener)this).EnsureEntriesLoaded();
|
||||
return CreateReaderForSolidExtraction();
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ public static class ArchiveFactory
|
||||
/// <param name="options"></param>
|
||||
public static IArchive Open(string filePath, ReaderOptions? options = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
return Open(new FileInfo(filePath), options);
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ public static class ArchiveFactory
|
||||
/// <param name="options"></param>
|
||||
public static IArchive Open(IEnumerable<FileInfo> fileInfos, ReaderOptions? options = null)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var filesArray = fileInfos.ToArray();
|
||||
if (filesArray.Length == 0)
|
||||
{
|
||||
@@ -81,7 +81,7 @@ public static class ArchiveFactory
|
||||
return Open(fileInfo, options);
|
||||
}
|
||||
|
||||
fileInfo.NotNull(nameof(fileInfo));
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
options ??= new ReaderOptions { LeaveStreamOpen = false };
|
||||
|
||||
return FindFactory<IMultiArchiveFactory>(fileInfo).Open(filesArray, options);
|
||||
@@ -94,7 +94,7 @@ public static class ArchiveFactory
|
||||
/// <param name="options"></param>
|
||||
public static IArchive Open(IEnumerable<Stream> streams, ReaderOptions? options = null)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var streamsArray = streams.ToArray();
|
||||
if (streamsArray.Length == 0)
|
||||
{
|
||||
@@ -107,7 +107,7 @@ public static class ArchiveFactory
|
||||
return Open(firstStream, options);
|
||||
}
|
||||
|
||||
firstStream.NotNull(nameof(firstStream));
|
||||
firstStream.CheckNotNull(nameof(firstStream));
|
||||
options ??= new ReaderOptions();
|
||||
|
||||
return FindFactory<IMultiArchiveFactory>(firstStream).Open(streamsArray, options);
|
||||
@@ -129,7 +129,7 @@ public static class ArchiveFactory
|
||||
private static T FindFactory<T>(FileInfo finfo)
|
||||
where T : IFactory
|
||||
{
|
||||
finfo.NotNull(nameof(finfo));
|
||||
finfo.CheckNotNull(nameof(finfo));
|
||||
using Stream stream = finfo.OpenRead();
|
||||
return FindFactory<T>(stream);
|
||||
}
|
||||
@@ -137,7 +137,7 @@ public static class ArchiveFactory
|
||||
private static T FindFactory<T>(Stream stream)
|
||||
where T : IFactory
|
||||
{
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
if (!stream.CanRead || !stream.CanSeek)
|
||||
{
|
||||
throw new ArgumentException("Stream should be readable and seekable");
|
||||
@@ -172,7 +172,7 @@ public static class ArchiveFactory
|
||||
int bufferSize = ReaderOptions.DefaultBufferSize
|
||||
)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
using Stream s = File.OpenRead(filePath);
|
||||
return IsArchive(s, out type, bufferSize);
|
||||
}
|
||||
@@ -184,7 +184,7 @@ public static class ArchiveFactory
|
||||
)
|
||||
{
|
||||
type = null;
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
|
||||
if (!stream.CanRead || !stream.CanSeek)
|
||||
{
|
||||
@@ -215,7 +215,7 @@ public static class ArchiveFactory
|
||||
/// <returns></returns>
|
||||
public static IEnumerable<string> GetFileParts(string part1)
|
||||
{
|
||||
part1.NotNullOrEmpty(nameof(part1));
|
||||
part1.CheckNotNullOrEmpty(nameof(part1));
|
||||
return GetFileParts(new FileInfo(part1)).Select(a => a.FullName);
|
||||
}
|
||||
|
||||
@@ -226,7 +226,7 @@ public static class ArchiveFactory
|
||||
/// <returns></returns>
|
||||
public static IEnumerable<FileInfo> GetFileParts(FileInfo part1)
|
||||
{
|
||||
part1.NotNull(nameof(part1));
|
||||
part1.CheckNotNull(nameof(part1));
|
||||
yield return part1;
|
||||
|
||||
foreach (var factory in Factory.Factories.OfType<IFactory>())
|
||||
|
||||
@@ -21,7 +21,7 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static GZipArchive Open(string filePath, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static GZipArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
fileInfo.NotNull(nameof(fileInfo));
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
return new GZipArchive(
|
||||
new SourceStream(
|
||||
fileInfo,
|
||||
@@ -52,7 +52,7 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var files = fileInfos.ToArray();
|
||||
return new GZipArchive(
|
||||
new SourceStream(
|
||||
@@ -70,7 +70,7 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static GZipArchive Open(IEnumerable<Stream> streams, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var strms = streams.ToArray();
|
||||
return new GZipArchive(
|
||||
new SourceStream(
|
||||
@@ -88,7 +88,7 @@ public class GZipArchive : AbstractWritableArchive<GZipArchiveEntry, GZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static GZipArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
|
||||
if (stream is not { CanSeek: true })
|
||||
{
|
||||
|
||||
@@ -25,7 +25,7 @@ public static class IArchiveEntryExtensions
|
||||
using (entryStream)
|
||||
{
|
||||
using Stream s = new ListeningStream(streamListener, entryStream);
|
||||
s.CopyTo(streamToWriteTo);
|
||||
s.TransferTo(streamToWriteTo);
|
||||
}
|
||||
streamListener.FireEntryExtractionEnd(archiveEntry);
|
||||
}
|
||||
|
||||
@@ -45,10 +45,12 @@ public static class IArchiveExtensions
|
||||
var seenDirectories = new HashSet<string>();
|
||||
|
||||
// Extract
|
||||
foreach (var entry in archive.Entries)
|
||||
var entries = archive.ExtractAllEntries();
|
||||
while (entries.MoveToNextEntry())
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var entry = entries.Entry;
|
||||
if (entry.IsDirectory)
|
||||
{
|
||||
var dirPath = Path.Combine(destination, entry.Key.NotNull("Entry Key is null"));
|
||||
@@ -75,7 +77,7 @@ public static class IArchiveExtensions
|
||||
|
||||
// Write file
|
||||
using var fs = File.OpenWrite(path);
|
||||
entry.WriteTo(fs);
|
||||
entries.WriteEntryTo(fs);
|
||||
|
||||
// Update progress
|
||||
bytesRead += entry.Size;
|
||||
|
||||
@@ -95,7 +95,7 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
/// <param name="options"></param>
|
||||
public static RarArchive Open(string filePath, ReaderOptions? options = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
var fileInfo = new FileInfo(filePath);
|
||||
return new RarArchive(
|
||||
new SourceStream(
|
||||
@@ -113,7 +113,7 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
/// <param name="options"></param>
|
||||
public static RarArchive Open(FileInfo fileInfo, ReaderOptions? options = null)
|
||||
{
|
||||
fileInfo.NotNull(nameof(fileInfo));
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
return new RarArchive(
|
||||
new SourceStream(
|
||||
fileInfo,
|
||||
@@ -130,7 +130,7 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
/// <param name="options"></param>
|
||||
public static RarArchive Open(Stream stream, ReaderOptions? options = null)
|
||||
{
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
|
||||
if (stream is not { CanSeek: true })
|
||||
{
|
||||
@@ -150,7 +150,7 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var files = fileInfos.ToArray();
|
||||
return new RarArchive(
|
||||
new SourceStream(
|
||||
@@ -168,7 +168,7 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static RarArchive Open(IEnumerable<Stream> streams, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var strms = streams.ToArray();
|
||||
return new RarArchive(
|
||||
new SourceStream(
|
||||
|
||||
@@ -21,7 +21,7 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
/// <param name="readerOptions"></param>
|
||||
public static SevenZipArchive Open(string filePath, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty("filePath");
|
||||
filePath.CheckNotNullOrEmpty("filePath");
|
||||
return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
/// <param name="readerOptions"></param>
|
||||
public static SevenZipArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
fileInfo.NotNull("fileInfo");
|
||||
fileInfo.CheckNotNull("fileInfo");
|
||||
return new SevenZipArchive(
|
||||
new SourceStream(
|
||||
fileInfo,
|
||||
@@ -52,7 +52,7 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var files = fileInfos.ToArray();
|
||||
return new SevenZipArchive(
|
||||
new SourceStream(
|
||||
@@ -73,7 +73,7 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var strms = streams.ToArray();
|
||||
return new SevenZipArchive(
|
||||
new SourceStream(
|
||||
@@ -91,7 +91,7 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
/// <param name="readerOptions"></param>
|
||||
public static SevenZipArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
stream.NotNull("stream");
|
||||
stream.CheckNotNull("stream");
|
||||
|
||||
if (stream is not { CanSeek: true })
|
||||
{
|
||||
|
||||
@@ -22,7 +22,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static TarArchive Open(string filePath, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static TarArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
fileInfo.NotNull(nameof(fileInfo));
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
return new TarArchive(
|
||||
new SourceStream(
|
||||
fileInfo,
|
||||
@@ -53,7 +53,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var files = fileInfos.ToArray();
|
||||
return new TarArchive(
|
||||
new SourceStream(
|
||||
@@ -71,7 +71,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static TarArchive Open(IEnumerable<Stream> streams, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var strms = streams.ToArray();
|
||||
return new TarArchive(
|
||||
new SourceStream(
|
||||
@@ -89,7 +89,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static TarArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
|
||||
if (stream is not { CanSeek: true })
|
||||
{
|
||||
@@ -178,7 +178,7 @@ public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
using (var entryStream = entry.OpenEntryStream())
|
||||
{
|
||||
using var memoryStream = new MemoryStream();
|
||||
entryStream.CopyTo(memoryStream);
|
||||
entryStream.TransferTo(memoryStream);
|
||||
memoryStream.Position = 0;
|
||||
var bytes = memoryStream.ToArray();
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static ZipArchive Open(string filePath, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
filePath.NotNullOrEmpty(nameof(filePath));
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static ZipArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
fileInfo.NotNull(nameof(fileInfo));
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
return new ZipArchive(
|
||||
new SourceStream(
|
||||
fileInfo,
|
||||
@@ -74,7 +74,7 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
ReaderOptions? readerOptions = null
|
||||
)
|
||||
{
|
||||
fileInfos.NotNull(nameof(fileInfos));
|
||||
fileInfos.CheckNotNull(nameof(fileInfos));
|
||||
var files = fileInfos.ToArray();
|
||||
return new ZipArchive(
|
||||
new SourceStream(
|
||||
@@ -92,7 +92,7 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static ZipArchive Open(IEnumerable<Stream> streams, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
streams.NotNull(nameof(streams));
|
||||
streams.CheckNotNull(nameof(streams));
|
||||
var strms = streams.ToArray();
|
||||
return new ZipArchive(
|
||||
new SourceStream(
|
||||
@@ -110,7 +110,7 @@ public class ZipArchive : AbstractWritableArchive<ZipArchiveEntry, ZipVolume>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static ZipArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
stream.NotNull(nameof(stream));
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
|
||||
if (stream is not { CanSeek: true })
|
||||
{
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
[assembly: CLSCompliant(true)]
|
||||
[assembly: CLSCompliant(false)]
|
||||
[assembly: InternalsVisibleTo(
|
||||
"SharpCompress.Test,PublicKey=0024000004800000940000000602000000240000525341310004000001000100158bebf1433f76dffc356733c138babea7a47536c65ed8009b16372c6f4edbb20554db74a62687f56b97c20a6ce8c4b123280279e33c894e7b3aa93ab3c573656fde4db576cfe07dba09619ead26375b25d2c4a8e43f7be257d712b0dd2eb546f67adb09281338618a58ac834fc038dd7e2740a7ab3591826252e4f4516306dc"
|
||||
)]
|
||||
|
||||
@@ -7,54 +7,53 @@ using System.Threading.Tasks;
|
||||
using SharpCompress.Common.GZip;
|
||||
using SharpCompress.Common.Tar;
|
||||
|
||||
namespace SharpCompress.Common.Arc
|
||||
namespace SharpCompress.Common.Arc;
|
||||
|
||||
public class ArcEntry : Entry
|
||||
{
|
||||
public class ArcEntry : Entry
|
||||
private readonly ArcFilePart? _filePart;
|
||||
|
||||
internal ArcEntry(ArcFilePart? filePart)
|
||||
{
|
||||
private readonly ArcFilePart? _filePart;
|
||||
|
||||
internal ArcEntry(ArcFilePart? filePart)
|
||||
{
|
||||
_filePart = filePart;
|
||||
}
|
||||
|
||||
public override long Crc
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_filePart == null)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return _filePart.Header.Crc16;
|
||||
}
|
||||
}
|
||||
|
||||
public override string? Key => _filePart?.Header.Name;
|
||||
|
||||
public override string? LinkTarget => null;
|
||||
|
||||
public override long CompressedSize => _filePart?.Header.CompressedSize ?? 0;
|
||||
|
||||
public override CompressionType CompressionType =>
|
||||
_filePart?.Header.CompressionMethod ?? CompressionType.Unknown;
|
||||
|
||||
public override long Size => throw new NotImplementedException();
|
||||
|
||||
public override DateTime? LastModifiedTime => null;
|
||||
|
||||
public override DateTime? CreatedTime => null;
|
||||
|
||||
public override DateTime? LastAccessedTime => null;
|
||||
|
||||
public override DateTime? ArchivedTime => null;
|
||||
|
||||
public override bool IsEncrypted => false;
|
||||
|
||||
public override bool IsDirectory => false;
|
||||
|
||||
public override bool IsSplitAfter => false;
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => _filePart.Empty();
|
||||
_filePart = filePart;
|
||||
}
|
||||
|
||||
public override long Crc
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_filePart == null)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return _filePart.Header.Crc16;
|
||||
}
|
||||
}
|
||||
|
||||
public override string? Key => _filePart?.Header.Name;
|
||||
|
||||
public override string? LinkTarget => null;
|
||||
|
||||
public override long CompressedSize => _filePart?.Header.CompressedSize ?? 0;
|
||||
|
||||
public override CompressionType CompressionType =>
|
||||
_filePart?.Header.CompressionMethod ?? CompressionType.Unknown;
|
||||
|
||||
public override long Size => throw new NotImplementedException();
|
||||
|
||||
public override DateTime? LastModifiedTime => null;
|
||||
|
||||
public override DateTime? CreatedTime => null;
|
||||
|
||||
public override DateTime? LastAccessedTime => null;
|
||||
|
||||
public override DateTime? ArchivedTime => null;
|
||||
|
||||
public override bool IsEncrypted => false;
|
||||
|
||||
public override bool IsDirectory => false;
|
||||
|
||||
public override bool IsSplitAfter => false;
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => _filePart.Empty();
|
||||
}
|
||||
|
||||
@@ -3,74 +3,73 @@ using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Arc
|
||||
namespace SharpCompress.Common.Arc;
|
||||
|
||||
public class ArcEntryHeader
|
||||
{
|
||||
public class ArcEntryHeader
|
||||
public ArchiveEncoding ArchiveEncoding { get; }
|
||||
public CompressionType CompressionMethod { get; private set; }
|
||||
public string? Name { get; private set; }
|
||||
public long CompressedSize { get; private set; }
|
||||
public DateTime DateTime { get; private set; }
|
||||
public int Crc16 { get; private set; }
|
||||
public long OriginalSize { get; private set; }
|
||||
public long DataStartPosition { get; private set; }
|
||||
|
||||
public ArcEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
public ArchiveEncoding ArchiveEncoding { get; }
|
||||
public CompressionType CompressionMethod { get; private set; }
|
||||
public string? Name { get; private set; }
|
||||
public long CompressedSize { get; private set; }
|
||||
public DateTime DateTime { get; private set; }
|
||||
public int Crc16 { get; private set; }
|
||||
public long OriginalSize { get; private set; }
|
||||
public long DataStartPosition { get; private set; }
|
||||
this.ArchiveEncoding = archiveEncoding;
|
||||
}
|
||||
|
||||
public ArcEntryHeader(ArchiveEncoding archiveEncoding)
|
||||
public ArcEntryHeader? ReadHeader(Stream stream)
|
||||
{
|
||||
byte[] headerBytes = new byte[29];
|
||||
if (stream.Read(headerBytes, 0, headerBytes.Length) != headerBytes.Length)
|
||||
{
|
||||
this.ArchiveEncoding = archiveEncoding;
|
||||
return null;
|
||||
}
|
||||
DataStartPosition = stream.Position;
|
||||
return LoadFrom(headerBytes);
|
||||
}
|
||||
|
||||
public ArcEntryHeader? ReadHeader(Stream stream)
|
||||
public ArcEntryHeader LoadFrom(byte[] headerBytes)
|
||||
{
|
||||
CompressionMethod = GetCompressionType(headerBytes[1]);
|
||||
|
||||
// Read name
|
||||
int nameEnd = Array.IndexOf(headerBytes, (byte)0, 1); // Find null terminator
|
||||
Name = Encoding.UTF8.GetString(headerBytes, 2, nameEnd > 0 ? nameEnd - 2 : 12);
|
||||
|
||||
int offset = 15;
|
||||
CompressedSize = BitConverter.ToUInt32(headerBytes, offset);
|
||||
offset += 4;
|
||||
uint rawDateTime = BitConverter.ToUInt32(headerBytes, offset);
|
||||
DateTime = ConvertToDateTime(rawDateTime);
|
||||
offset += 4;
|
||||
Crc16 = BitConverter.ToUInt16(headerBytes, offset);
|
||||
offset += 2;
|
||||
OriginalSize = BitConverter.ToUInt32(headerBytes, offset);
|
||||
return this;
|
||||
}
|
||||
|
||||
private CompressionType GetCompressionType(byte value)
|
||||
{
|
||||
return value switch
|
||||
{
|
||||
byte[] headerBytes = new byte[29];
|
||||
if (stream.Read(headerBytes, 0, headerBytes.Length) != headerBytes.Length)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
DataStartPosition = stream.Position;
|
||||
return LoadFrom(headerBytes);
|
||||
}
|
||||
1 or 2 => CompressionType.None,
|
||||
3 => CompressionType.RLE90,
|
||||
4 => CompressionType.Squeezed,
|
||||
5 or 6 or 7 or 8 => CompressionType.Crunched,
|
||||
9 => CompressionType.Squashed,
|
||||
10 => CompressionType.Crushed,
|
||||
11 => CompressionType.Distilled,
|
||||
_ => CompressionType.Unknown,
|
||||
};
|
||||
}
|
||||
|
||||
public ArcEntryHeader LoadFrom(byte[] headerBytes)
|
||||
{
|
||||
CompressionMethod = GetCompressionType(headerBytes[1]);
|
||||
|
||||
// Read name
|
||||
int nameEnd = Array.IndexOf(headerBytes, (byte)0, 1); // Find null terminator
|
||||
Name = Encoding.UTF8.GetString(headerBytes, 2, nameEnd > 0 ? nameEnd - 2 : 12);
|
||||
|
||||
int offset = 15;
|
||||
CompressedSize = BitConverter.ToUInt32(headerBytes, offset);
|
||||
offset += 4;
|
||||
uint rawDateTime = BitConverter.ToUInt32(headerBytes, offset);
|
||||
DateTime = ConvertToDateTime(rawDateTime);
|
||||
offset += 4;
|
||||
Crc16 = BitConverter.ToUInt16(headerBytes, offset);
|
||||
offset += 2;
|
||||
OriginalSize = BitConverter.ToUInt32(headerBytes, offset);
|
||||
return this;
|
||||
}
|
||||
|
||||
private CompressionType GetCompressionType(byte value)
|
||||
{
|
||||
return value switch
|
||||
{
|
||||
1 or 2 => CompressionType.None,
|
||||
3 => CompressionType.RLE90,
|
||||
4 => CompressionType.Squeezed,
|
||||
5 or 6 or 7 or 8 => CompressionType.Crunched,
|
||||
9 => CompressionType.Squashed,
|
||||
10 => CompressionType.Crushed,
|
||||
11 => CompressionType.Distilled,
|
||||
_ => CompressionType.Unknown,
|
||||
};
|
||||
}
|
||||
|
||||
public static DateTime ConvertToDateTime(long rawDateTime)
|
||||
{
|
||||
// Convert Unix timestamp to DateTime (UTC)
|
||||
return DateTimeOffset.FromUnixTimeSeconds(rawDateTime).UtcDateTime;
|
||||
}
|
||||
public static DateTime ConvertToDateTime(long rawDateTime)
|
||||
{
|
||||
// Convert Unix timestamp to DateTime (UTC)
|
||||
return DateTimeOffset.FromUnixTimeSeconds(rawDateTime).UtcDateTime;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,63 +13,55 @@ using SharpCompress.Compressors.RLE90;
|
||||
using SharpCompress.Compressors.Squeezed;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Arc
|
||||
namespace SharpCompress.Common.Arc;
|
||||
|
||||
public class ArcFilePart : FilePart
|
||||
{
|
||||
public class ArcFilePart : FilePart
|
||||
private readonly Stream? _stream;
|
||||
|
||||
internal ArcFilePart(ArcEntryHeader localArcHeader, Stream? seekableStream)
|
||||
: base(localArcHeader.ArchiveEncoding)
|
||||
{
|
||||
private readonly Stream? _stream;
|
||||
|
||||
internal ArcFilePart(ArcEntryHeader localArcHeader, Stream? seekableStream)
|
||||
: base(localArcHeader.ArchiveEncoding)
|
||||
{
|
||||
_stream = seekableStream;
|
||||
Header = localArcHeader;
|
||||
}
|
||||
|
||||
internal ArcEntryHeader Header { get; set; }
|
||||
|
||||
internal override string? FilePartName => Header.Name;
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (_stream != null)
|
||||
{
|
||||
Stream compressedStream;
|
||||
switch (Header.CompressionMethod)
|
||||
{
|
||||
case CompressionType.None:
|
||||
compressedStream = new ReadOnlySubStream(
|
||||
_stream,
|
||||
Header.DataStartPosition,
|
||||
Header.CompressedSize
|
||||
);
|
||||
break;
|
||||
case CompressionType.RLE90:
|
||||
compressedStream = new RunLength90Stream(
|
||||
_stream,
|
||||
(int)Header.CompressedSize
|
||||
);
|
||||
break;
|
||||
case CompressionType.Squeezed:
|
||||
compressedStream = new SqueezeStream(_stream, (int)Header.CompressedSize);
|
||||
break;
|
||||
case CompressionType.Crunched:
|
||||
compressedStream = new ArcLzwStream(
|
||||
_stream,
|
||||
(int)Header.CompressedSize,
|
||||
true
|
||||
);
|
||||
break;
|
||||
default:
|
||||
throw new NotSupportedException(
|
||||
"CompressionMethod: " + Header.CompressionMethod
|
||||
);
|
||||
}
|
||||
return compressedStream;
|
||||
}
|
||||
return _stream.NotNull();
|
||||
}
|
||||
|
||||
internal override Stream? GetRawStream() => _stream;
|
||||
_stream = seekableStream;
|
||||
Header = localArcHeader;
|
||||
}
|
||||
|
||||
internal ArcEntryHeader Header { get; set; }
|
||||
|
||||
internal override string? FilePartName => Header.Name;
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (_stream != null)
|
||||
{
|
||||
Stream compressedStream;
|
||||
switch (Header.CompressionMethod)
|
||||
{
|
||||
case CompressionType.None:
|
||||
compressedStream = new ReadOnlySubStream(
|
||||
_stream,
|
||||
Header.DataStartPosition,
|
||||
Header.CompressedSize
|
||||
);
|
||||
break;
|
||||
case CompressionType.RLE90:
|
||||
compressedStream = new RunLength90Stream(_stream, (int)Header.CompressedSize);
|
||||
break;
|
||||
case CompressionType.Squeezed:
|
||||
compressedStream = new SqueezeStream(_stream, (int)Header.CompressedSize);
|
||||
break;
|
||||
case CompressionType.Crunched:
|
||||
compressedStream = new ArcLzwStream(_stream, (int)Header.CompressedSize, true);
|
||||
break;
|
||||
default:
|
||||
throw new NotSupportedException(
|
||||
"CompressionMethod: " + Header.CompressionMethod
|
||||
);
|
||||
}
|
||||
return compressedStream;
|
||||
}
|
||||
return _stream.NotNull();
|
||||
}
|
||||
|
||||
internal override Stream? GetRawStream() => _stream;
|
||||
}
|
||||
|
||||
@@ -6,11 +6,10 @@ using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Common.Arc
|
||||
namespace SharpCompress.Common.Arc;
|
||||
|
||||
public class ArcVolume : Volume
|
||||
{
|
||||
public class ArcVolume : Volume
|
||||
{
|
||||
public ArcVolume(Stream stream, ReaderOptions readerOptions, int index = 0)
|
||||
: base(stream, readerOptions, index) { }
|
||||
}
|
||||
public ArcVolume(Stream stream, ReaderOptions readerOptions, int index = 0)
|
||||
: base(stream, readerOptions, index) { }
|
||||
}
|
||||
|
||||
@@ -91,15 +91,8 @@ internal abstract class ZipFileEntry : ZipHeader
|
||||
|
||||
protected void LoadExtra(byte[] extra)
|
||||
{
|
||||
for (var i = 0; i < extra.Length; )
|
||||
for (var i = 0; i < extra.Length - 4; )
|
||||
{
|
||||
// Ensure we have at least a header (2-byte ID + 2-byte length)
|
||||
if (i + 4 > extra.Length)
|
||||
{
|
||||
// Incomplete header — stop parsing extras
|
||||
break;
|
||||
}
|
||||
|
||||
var type = (ExtraDataType)BinaryPrimitives.ReadUInt16LittleEndian(extra.AsSpan(i));
|
||||
if (!Enum.IsDefined(typeof(ExtraDataType), type))
|
||||
{
|
||||
@@ -113,17 +106,7 @@ internal abstract class ZipFileEntry : ZipHeader
|
||||
if (length > extra.Length)
|
||||
{
|
||||
// bad extras block
|
||||
break; // allow processing optional other blocks
|
||||
}
|
||||
// Some ZIP files contain vendor-specific or malformed extra fields where the declared
|
||||
// data length extends beyond the remaining buffer. This adjustment ensures that
|
||||
// we only read data within bounds (i + 4 + length <= extra.Length)
|
||||
// The example here is: 41 43 18 00 41 52 43 30 46 EB FF FF 51 29 03 C6 03 00 00 00 00 00 00 00 00
|
||||
// No existing zip utility uses 0x4341 ('AC')
|
||||
if (i + 4 + length > extra.Length)
|
||||
{
|
||||
// incomplete or corrupt field
|
||||
break; // allow processing optional other blocks
|
||||
return;
|
||||
}
|
||||
|
||||
var data = new byte[length];
|
||||
|
||||
@@ -13,8 +13,8 @@ using SharpCompress.Compressors.PPMd;
|
||||
using SharpCompress.Compressors.Reduce;
|
||||
using SharpCompress.Compressors.Shrink;
|
||||
using SharpCompress.Compressors.Xz;
|
||||
using SharpCompress.Compressors.ZStandard;
|
||||
using SharpCompress.IO;
|
||||
using ZstdSharp;
|
||||
|
||||
namespace SharpCompress.Common.Zip;
|
||||
|
||||
|
||||
@@ -1,36 +1,35 @@
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Filters
|
||||
namespace SharpCompress.Compressors.Filters;
|
||||
|
||||
internal class DeltaFilter : Filter
|
||||
{
|
||||
internal class DeltaFilter : Filter
|
||||
private const int DISTANCE_MIN = 1;
|
||||
private const int DISTANCE_MAX = 256;
|
||||
private const int DISTANCE_MASK = DISTANCE_MAX - 1;
|
||||
|
||||
private int _distance;
|
||||
private byte[] _history;
|
||||
private int _position;
|
||||
|
||||
public DeltaFilter(bool isEncoder, Stream baseStream, byte[] info)
|
||||
: base(isEncoder, baseStream, 1)
|
||||
{
|
||||
private const int DISTANCE_MIN = 1;
|
||||
private const int DISTANCE_MAX = 256;
|
||||
private const int DISTANCE_MASK = DISTANCE_MAX - 1;
|
||||
_distance = info[0];
|
||||
_history = new byte[DISTANCE_MAX];
|
||||
_position = 0;
|
||||
}
|
||||
|
||||
private int _distance;
|
||||
private byte[] _history;
|
||||
private int _position;
|
||||
protected override int Transform(byte[] buffer, int offset, int count)
|
||||
{
|
||||
var end = offset + count;
|
||||
|
||||
public DeltaFilter(bool isEncoder, Stream baseStream, byte[] info)
|
||||
: base(isEncoder, baseStream, 1)
|
||||
for (var i = offset; i < end; i++)
|
||||
{
|
||||
_distance = info[0];
|
||||
_history = new byte[DISTANCE_MAX];
|
||||
_position = 0;
|
||||
buffer[i] += _history[(_distance + _position--) & DISTANCE_MASK];
|
||||
_history[_position & DISTANCE_MASK] = buffer[i];
|
||||
}
|
||||
|
||||
protected override int Transform(byte[] buffer, int offset, int count)
|
||||
{
|
||||
var end = offset + count;
|
||||
|
||||
for (var i = offset; i < end; i++)
|
||||
{
|
||||
buffer[i] += _history[(_distance + _position--) & DISTANCE_MASK];
|
||||
_history[_position & DISTANCE_MASK] = buffer[i];
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
#nullable disable
|
||||
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA.LZ;
|
||||
|
||||
internal class OutWindow : IDisposable
|
||||
internal class OutWindow
|
||||
{
|
||||
private byte[] _buffer;
|
||||
private int _windowSize;
|
||||
@@ -16,22 +15,19 @@ internal class OutWindow : IDisposable
|
||||
private int _pendingDist;
|
||||
private Stream _stream;
|
||||
|
||||
private long _total;
|
||||
private long _limit;
|
||||
|
||||
public long Total => _total;
|
||||
public long _total;
|
||||
public long _limit;
|
||||
|
||||
public void Create(int windowSize)
|
||||
{
|
||||
if (_windowSize != windowSize)
|
||||
{
|
||||
if (_buffer is not null)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(_buffer);
|
||||
}
|
||||
_buffer = ArrayPool<byte>.Shared.Rent(windowSize);
|
||||
_buffer = new byte[windowSize];
|
||||
}
|
||||
else
|
||||
{
|
||||
_buffer[windowSize - 1] = 0;
|
||||
}
|
||||
_buffer[windowSize - 1] = 0;
|
||||
_windowSize = windowSize;
|
||||
_pos = 0;
|
||||
_streamPos = 0;
|
||||
@@ -40,22 +36,7 @@ internal class OutWindow : IDisposable
|
||||
_limit = 0;
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
ReleaseStream();
|
||||
if (_buffer is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
ArrayPool<byte>.Shared.Return(_buffer);
|
||||
_buffer = null;
|
||||
}
|
||||
|
||||
public void Reset()
|
||||
{
|
||||
ReleaseStream();
|
||||
Create(_windowSize);
|
||||
}
|
||||
public void Reset() => Create(_windowSize);
|
||||
|
||||
public void Init(Stream stream)
|
||||
{
|
||||
@@ -85,7 +66,7 @@ internal class OutWindow : IDisposable
|
||||
_stream = null;
|
||||
}
|
||||
|
||||
private void Flush()
|
||||
public void Flush()
|
||||
{
|
||||
if (_stream is null)
|
||||
{
|
||||
|
||||
@@ -294,7 +294,7 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
}
|
||||
else
|
||||
{
|
||||
_outWindow.SetLimit(long.MaxValue - _outWindow.Total);
|
||||
_outWindow.SetLimit(long.MaxValue - _outWindow._total);
|
||||
}
|
||||
|
||||
var rangeDecoder = new RangeCoder.Decoder();
|
||||
@@ -305,7 +305,6 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
_outWindow.ReleaseStream();
|
||||
rangeDecoder.ReleaseStream();
|
||||
|
||||
_outWindow.Dispose();
|
||||
_outWindow = null;
|
||||
}
|
||||
|
||||
@@ -317,7 +316,7 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
|
||||
while (outWindow.HasSpace)
|
||||
{
|
||||
var posState = (uint)outWindow.Total & _posStateMask;
|
||||
var posState = (uint)outWindow._total & _posStateMask;
|
||||
if (
|
||||
_isMatchDecoders[(_state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState]
|
||||
.Decode(rangeDecoder) == 0
|
||||
@@ -329,14 +328,18 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
{
|
||||
b = _literalDecoder.DecodeWithMatchByte(
|
||||
rangeDecoder,
|
||||
(uint)outWindow.Total,
|
||||
(uint)outWindow._total,
|
||||
prevByte,
|
||||
outWindow.GetByte((int)_rep0)
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
b = _literalDecoder.DecodeNormal(rangeDecoder, (uint)outWindow.Total, prevByte);
|
||||
b = _literalDecoder.DecodeNormal(
|
||||
rangeDecoder,
|
||||
(uint)outWindow._total,
|
||||
prevByte
|
||||
);
|
||||
}
|
||||
outWindow.PutByte(b);
|
||||
_state.UpdateChar();
|
||||
@@ -421,7 +424,7 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
_rep0 = posSlot;
|
||||
}
|
||||
}
|
||||
if (_rep0 >= outWindow.Total || _rep0 >= dictionarySizeCheck)
|
||||
if (_rep0 >= outWindow._total || _rep0 >= dictionarySizeCheck)
|
||||
{
|
||||
if (_rep0 == 0xFFFFFFFF)
|
||||
{
|
||||
|
||||
@@ -178,7 +178,6 @@ public class LzmaStream : Stream, IStreamStack
|
||||
_position = _encoder.Code(null, true);
|
||||
}
|
||||
_inputStream?.Dispose();
|
||||
_outWindow.Dispose();
|
||||
}
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Compressors.Filters;
|
||||
using SharpCompress.Compressors.LZMA.Utilites;
|
||||
using SharpCompress.Compressors.PPMd;
|
||||
using ZstdSharp;
|
||||
using SharpCompress.Compressors.ZStandard;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA;
|
||||
|
||||
|
||||
@@ -1,65 +1,64 @@
|
||||
namespace SharpCompress.Compressors.Lzw
|
||||
namespace SharpCompress.Compressors.Lzw;
|
||||
|
||||
/// <summary>
|
||||
/// This class contains constants used for LZW
|
||||
/// </summary>
|
||||
[System.Diagnostics.CodeAnalysis.SuppressMessage(
|
||||
"Naming",
|
||||
"CA1707:Identifiers should not contain underscores",
|
||||
Justification = "kept for backwards compatibility"
|
||||
)]
|
||||
public sealed class LzwConstants
|
||||
{
|
||||
/// <summary>
|
||||
/// This class contains constants used for LZW
|
||||
/// Magic number found at start of LZW header: 0x1f 0x9d
|
||||
/// </summary>
|
||||
[System.Diagnostics.CodeAnalysis.SuppressMessage(
|
||||
"Naming",
|
||||
"CA1707:Identifiers should not contain underscores",
|
||||
Justification = "kept for backwards compatibility"
|
||||
)]
|
||||
public sealed class LzwConstants
|
||||
{
|
||||
/// <summary>
|
||||
/// Magic number found at start of LZW header: 0x1f 0x9d
|
||||
/// </summary>
|
||||
public const int MAGIC = 0x1f9d;
|
||||
public const int MAGIC = 0x1f9d;
|
||||
|
||||
/// <summary>
|
||||
/// Maximum number of bits per code
|
||||
/// </summary>
|
||||
public const int MAX_BITS = 16;
|
||||
/// <summary>
|
||||
/// Maximum number of bits per code
|
||||
/// </summary>
|
||||
public const int MAX_BITS = 16;
|
||||
|
||||
/* 3rd header byte:
|
||||
* bit 0..4 Number of compression bits
|
||||
* bit 5 Extended header
|
||||
* bit 6 Free
|
||||
* bit 7 Block mode
|
||||
*/
|
||||
/* 3rd header byte:
|
||||
* bit 0..4 Number of compression bits
|
||||
* bit 5 Extended header
|
||||
* bit 6 Free
|
||||
* bit 7 Block mode
|
||||
*/
|
||||
|
||||
/// <summary>
|
||||
/// Mask for 'number of compression bits'
|
||||
/// </summary>
|
||||
public const int BIT_MASK = 0x1f;
|
||||
/// <summary>
|
||||
/// Mask for 'number of compression bits'
|
||||
/// </summary>
|
||||
public const int BIT_MASK = 0x1f;
|
||||
|
||||
/// <summary>
|
||||
/// Indicates the presence of a fourth header byte
|
||||
/// </summary>
|
||||
public const int EXTENDED_MASK = 0x20;
|
||||
/// <summary>
|
||||
/// Indicates the presence of a fourth header byte
|
||||
/// </summary>
|
||||
public const int EXTENDED_MASK = 0x20;
|
||||
|
||||
//public const int FREE_MASK = 0x40;
|
||||
//public const int FREE_MASK = 0x40;
|
||||
|
||||
/// <summary>
|
||||
/// Reserved bits
|
||||
/// </summary>
|
||||
public const int RESERVED_MASK = 0x60;
|
||||
/// <summary>
|
||||
/// Reserved bits
|
||||
/// </summary>
|
||||
public const int RESERVED_MASK = 0x60;
|
||||
|
||||
/// <summary>
|
||||
/// Block compression: if table is full and compression rate is dropping,
|
||||
/// clear the dictionary.
|
||||
/// </summary>
|
||||
public const int BLOCK_MODE_MASK = 0x80;
|
||||
/// <summary>
|
||||
/// Block compression: if table is full and compression rate is dropping,
|
||||
/// clear the dictionary.
|
||||
/// </summary>
|
||||
public const int BLOCK_MODE_MASK = 0x80;
|
||||
|
||||
/// <summary>
|
||||
/// LZW file header size (in bytes)
|
||||
/// </summary>
|
||||
public const int HDR_SIZE = 3;
|
||||
/// <summary>
|
||||
/// LZW file header size (in bytes)
|
||||
/// </summary>
|
||||
public const int HDR_SIZE = 3;
|
||||
|
||||
/// <summary>
|
||||
/// Initial number of bits per code
|
||||
/// </summary>
|
||||
public const int INIT_BITS = 9;
|
||||
/// <summary>
|
||||
/// Initial number of bits per code
|
||||
/// </summary>
|
||||
public const int INIT_BITS = 9;
|
||||
|
||||
private LzwConstants() { }
|
||||
}
|
||||
private LzwConstants() { }
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,52 +1,51 @@
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
|
||||
namespace SharpCompress.Compressors.RLE90
|
||||
namespace SharpCompress.Compressors.RLE90;
|
||||
|
||||
public static class RLE
|
||||
{
|
||||
public static class RLE
|
||||
private const byte DLE = 0x90;
|
||||
|
||||
/// <summary>
|
||||
/// Unpacks an RLE compressed buffer.
|
||||
/// Format: <char> DLE <count>, where count == 0 -> DLE
|
||||
/// </summary>
|
||||
/// <param name="compressedBuffer">The compressed buffer to unpack.</param>
|
||||
/// <returns>A list of unpacked bytes.</returns>
|
||||
public static List<byte> UnpackRLE(byte[] compressedBuffer)
|
||||
{
|
||||
private const byte DLE = 0x90;
|
||||
var result = new List<byte>(compressedBuffer.Length * 2); // Optimized initial capacity
|
||||
var countMode = false;
|
||||
byte last = 0;
|
||||
|
||||
/// <summary>
|
||||
/// Unpacks an RLE compressed buffer.
|
||||
/// Format: <char> DLE <count>, where count == 0 -> DLE
|
||||
/// </summary>
|
||||
/// <param name="compressedBuffer">The compressed buffer to unpack.</param>
|
||||
/// <returns>A list of unpacked bytes.</returns>
|
||||
public static List<byte> UnpackRLE(byte[] compressedBuffer)
|
||||
foreach (var c in compressedBuffer)
|
||||
{
|
||||
var result = new List<byte>(compressedBuffer.Length * 2); // Optimized initial capacity
|
||||
var countMode = false;
|
||||
byte last = 0;
|
||||
|
||||
foreach (var c in compressedBuffer)
|
||||
if (!countMode)
|
||||
{
|
||||
if (!countMode)
|
||||
if (c == DLE)
|
||||
{
|
||||
if (c == DLE)
|
||||
{
|
||||
countMode = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
result.Add(c);
|
||||
last = c;
|
||||
}
|
||||
countMode = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
countMode = false;
|
||||
if (c == 0)
|
||||
{
|
||||
result.Add(DLE);
|
||||
}
|
||||
else
|
||||
{
|
||||
result.AddRange(Enumerable.Repeat(last, c - 1));
|
||||
}
|
||||
result.Add(c);
|
||||
last = c;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
countMode = false;
|
||||
if (c == 0)
|
||||
{
|
||||
result.Add(DLE);
|
||||
}
|
||||
else
|
||||
{
|
||||
result.AddRange(Enumerable.Repeat(last, c - 1));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,91 +6,90 @@ using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.RLE90
|
||||
namespace SharpCompress.Compressors.RLE90;
|
||||
|
||||
public class RunLength90Stream : Stream, IStreamStack
|
||||
{
|
||||
public class RunLength90Stream : Stream, IStreamStack
|
||||
#if DEBUG_STREAMS
|
||||
long IStreamStack.InstanceId { get; set; }
|
||||
#endif
|
||||
int IStreamStack.DefaultBufferSize { get; set; }
|
||||
|
||||
Stream IStreamStack.BaseStream() => _stream;
|
||||
|
||||
int IStreamStack.BufferSize
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
int IStreamStack.BufferPosition
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
|
||||
void IStreamStack.SetPosition(long position) { }
|
||||
|
||||
private readonly Stream _stream;
|
||||
private const byte DLE = 0x90;
|
||||
private int _compressedSize;
|
||||
private bool _processed = false;
|
||||
|
||||
public RunLength90Stream(Stream stream, int compressedSize)
|
||||
{
|
||||
_stream = stream;
|
||||
_compressedSize = compressedSize;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugConstruct(typeof(RunLength90Stream));
|
||||
#endif
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
long IStreamStack.InstanceId { get; set; }
|
||||
this.DebugDispose(typeof(RunLength90Stream));
|
||||
#endif
|
||||
int IStreamStack.DefaultBufferSize { get; set; }
|
||||
|
||||
Stream IStreamStack.BaseStream() => _stream;
|
||||
|
||||
int IStreamStack.BufferSize
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
int IStreamStack.BufferPosition
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
|
||||
void IStreamStack.SetPosition(long position) { }
|
||||
|
||||
private readonly Stream _stream;
|
||||
private const byte DLE = 0x90;
|
||||
private int _compressedSize;
|
||||
private bool _processed = false;
|
||||
|
||||
public RunLength90Stream(Stream stream, int compressedSize)
|
||||
{
|
||||
_stream = stream;
|
||||
_compressedSize = compressedSize;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugConstruct(typeof(RunLength90Stream));
|
||||
#endif
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(RunLength90Stream));
|
||||
#endif
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotImplementedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _stream.Position;
|
||||
set => throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotImplementedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_processed)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
_processed = true;
|
||||
|
||||
using var binaryReader = new BinaryReader(_stream);
|
||||
byte[] compressedBuffer = binaryReader.ReadBytes(_compressedSize);
|
||||
|
||||
var unpacked = RLE.UnpackRLE(compressedBuffer);
|
||||
unpacked.CopyTo(buffer);
|
||||
|
||||
return unpacked.Count;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotImplementedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotImplementedException();
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotImplementedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _stream.Position;
|
||||
set => throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotImplementedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_processed)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
_processed = true;
|
||||
|
||||
using var binaryReader = new BinaryReader(_stream);
|
||||
byte[] compressedBuffer = binaryReader.ReadBytes(_compressedSize);
|
||||
|
||||
var unpacked = RLE.UnpackRLE(compressedBuffer);
|
||||
unpacked.CopyTo(buffer);
|
||||
|
||||
return unpacked.Count;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotImplementedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ using SharpCompress.Compressors.Rar.VM;
|
||||
|
||||
namespace SharpCompress.Compressors.Rar.UnpackV1;
|
||||
|
||||
internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
internal sealed partial class Unpack : BitInput, IRarUnpack, IDisposable
|
||||
{
|
||||
private readonly BitInput Inp;
|
||||
private bool disposed;
|
||||
@@ -22,17 +22,15 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
// to ease in porting Unpack50.cs
|
||||
Inp = this;
|
||||
|
||||
public override void Dispose()
|
||||
public void Dispose()
|
||||
{
|
||||
if (!disposed)
|
||||
{
|
||||
base.Dispose();
|
||||
if (!externalWindow)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(window);
|
||||
window = null;
|
||||
}
|
||||
rarVM.Dispose();
|
||||
disposed = true;
|
||||
}
|
||||
}
|
||||
@@ -576,111 +574,104 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
|
||||
var FilteredDataOffset = Prg.FilteredDataOffset;
|
||||
var FilteredDataSize = Prg.FilteredDataSize;
|
||||
var FilteredData = ArrayPool<byte>.Shared.Rent(FilteredDataSize);
|
||||
try
|
||||
var FilteredData = new byte[FilteredDataSize];
|
||||
|
||||
for (var i = 0; i < FilteredDataSize; i++)
|
||||
{
|
||||
Array.Copy(
|
||||
rarVM.Mem,
|
||||
FilteredDataOffset,
|
||||
FilteredData,
|
||||
0,
|
||||
FilteredDataSize
|
||||
);
|
||||
FilteredData[i] = rarVM.Mem[FilteredDataOffset + i];
|
||||
|
||||
prgStack[I] = null;
|
||||
while (I + 1 < prgStack.Count)
|
||||
// Prg.GlobalData.get(FilteredDataOffset
|
||||
// +
|
||||
// i);
|
||||
}
|
||||
|
||||
prgStack[I] = null;
|
||||
while (I + 1 < prgStack.Count)
|
||||
{
|
||||
var NextFilter = prgStack[I + 1];
|
||||
if (
|
||||
NextFilter is null
|
||||
|| NextFilter.BlockStart != BlockStart
|
||||
|| NextFilter.BlockLength != FilteredDataSize
|
||||
|| NextFilter.NextWindow
|
||||
)
|
||||
{
|
||||
var NextFilter = prgStack[I + 1];
|
||||
if (
|
||||
NextFilter is null
|
||||
|| NextFilter.BlockStart != BlockStart
|
||||
|| NextFilter.BlockLength != FilteredDataSize
|
||||
|| NextFilter.NextWindow
|
||||
)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
// apply several filters to same data block
|
||||
|
||||
rarVM.setMemory(0, FilteredData, 0, FilteredDataSize);
|
||||
|
||||
// .SetMemory(0,FilteredData,FilteredDataSize);
|
||||
|
||||
var pPrg = filters[NextFilter.ParentFilter].Program;
|
||||
var NextPrg = NextFilter.Program;
|
||||
|
||||
if (pPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
// copy global data from previous script execution
|
||||
// if any
|
||||
// NextPrg->GlobalData.Alloc(ParentPrg->GlobalData.Size());
|
||||
NextPrg.GlobalData.SetSize(pPrg.GlobalData.Count);
|
||||
|
||||
// memcpy(&NextPrg->GlobalData[VM_FIXEDGLOBALSIZE],&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],ParentPrg->GlobalData.Size()-VM_FIXEDGLOBALSIZE);
|
||||
for (
|
||||
var i = 0;
|
||||
i < pPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE;
|
||||
i++
|
||||
)
|
||||
{
|
||||
NextPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] =
|
||||
pPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i];
|
||||
}
|
||||
}
|
||||
|
||||
ExecuteCode(NextPrg);
|
||||
|
||||
if (NextPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
// save global data for next script execution
|
||||
if (pPrg.GlobalData.Count < NextPrg.GlobalData.Count)
|
||||
{
|
||||
pPrg.GlobalData.SetSize(NextPrg.GlobalData.Count);
|
||||
}
|
||||
|
||||
// memcpy(&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],&NextPrg->GlobalData[VM_FIXEDGLOBALSIZE],NextPrg->GlobalData.Size()-VM_FIXEDGLOBALSIZE);
|
||||
for (
|
||||
var i = 0;
|
||||
i < NextPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE;
|
||||
i++
|
||||
)
|
||||
{
|
||||
pPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] =
|
||||
NextPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
pPrg.GlobalData.Clear();
|
||||
}
|
||||
|
||||
FilteredDataOffset = NextPrg.FilteredDataOffset;
|
||||
FilteredDataSize = NextPrg.FilteredDataSize;
|
||||
if (FilteredData.Length < FilteredDataSize)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(FilteredData);
|
||||
FilteredData = ArrayPool<byte>.Shared.Rent(FilteredDataSize);
|
||||
}
|
||||
for (var i = 0; i < FilteredDataSize; i++)
|
||||
{
|
||||
FilteredData[i] = NextPrg.GlobalData[FilteredDataOffset + i];
|
||||
}
|
||||
|
||||
I++;
|
||||
prgStack[I] = null;
|
||||
break;
|
||||
}
|
||||
|
||||
writeStream.Write(FilteredData, 0, FilteredDataSize);
|
||||
writtenFileSize += FilteredDataSize;
|
||||
destUnpSize -= FilteredDataSize;
|
||||
WrittenBorder = BlockEnd;
|
||||
WriteSize = (unpPtr - WrittenBorder) & PackDef.MAXWINMASK;
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(FilteredData);
|
||||
// apply several filters to same data block
|
||||
|
||||
rarVM.setMemory(0, FilteredData, 0, FilteredDataSize);
|
||||
|
||||
// .SetMemory(0,FilteredData,FilteredDataSize);
|
||||
|
||||
var pPrg = filters[NextFilter.ParentFilter].Program;
|
||||
var NextPrg = NextFilter.Program;
|
||||
|
||||
if (pPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
// copy global data from previous script execution
|
||||
// if any
|
||||
// NextPrg->GlobalData.Alloc(ParentPrg->GlobalData.Size());
|
||||
NextPrg.GlobalData.SetSize(pPrg.GlobalData.Count);
|
||||
|
||||
// memcpy(&NextPrg->GlobalData[VM_FIXEDGLOBALSIZE],&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],ParentPrg->GlobalData.Size()-VM_FIXEDGLOBALSIZE);
|
||||
for (
|
||||
var i = 0;
|
||||
i < pPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE;
|
||||
i++
|
||||
)
|
||||
{
|
||||
NextPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] = pPrg.GlobalData[
|
||||
RarVM.VM_FIXEDGLOBALSIZE + i
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
ExecuteCode(NextPrg);
|
||||
|
||||
if (NextPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
// save global data for next script execution
|
||||
if (pPrg.GlobalData.Count < NextPrg.GlobalData.Count)
|
||||
{
|
||||
pPrg.GlobalData.SetSize(NextPrg.GlobalData.Count);
|
||||
}
|
||||
|
||||
// memcpy(&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],&NextPrg->GlobalData[VM_FIXEDGLOBALSIZE],NextPrg->GlobalData.Size()-VM_FIXEDGLOBALSIZE);
|
||||
for (
|
||||
var i = 0;
|
||||
i < NextPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE;
|
||||
i++
|
||||
)
|
||||
{
|
||||
pPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] = NextPrg.GlobalData[
|
||||
RarVM.VM_FIXEDGLOBALSIZE + i
|
||||
];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
pPrg.GlobalData.Clear();
|
||||
}
|
||||
FilteredDataOffset = NextPrg.FilteredDataOffset;
|
||||
FilteredDataSize = NextPrg.FilteredDataSize;
|
||||
|
||||
FilteredData = new byte[FilteredDataSize];
|
||||
for (var i = 0; i < FilteredDataSize; i++)
|
||||
{
|
||||
FilteredData[i] = NextPrg.GlobalData[FilteredDataOffset + i];
|
||||
}
|
||||
|
||||
I++;
|
||||
prgStack[I] = null;
|
||||
}
|
||||
writeStream.Write(FilteredData, 0, FilteredDataSize);
|
||||
unpSomeRead = true;
|
||||
writtenFileSize += FilteredDataSize;
|
||||
destUnpSize -= FilteredDataSize;
|
||||
WrittenBorder = BlockEnd;
|
||||
WriteSize = (unpPtr - WrittenBorder) & PackDef.MAXWINMASK;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -704,10 +695,15 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
|
||||
private void UnpWriteArea(int startPtr, int endPtr)
|
||||
{
|
||||
if (endPtr != startPtr)
|
||||
{
|
||||
unpSomeRead = true;
|
||||
}
|
||||
if (endPtr < startPtr)
|
||||
{
|
||||
UnpWriteData(window, startPtr, -startPtr & PackDef.MAXWINMASK);
|
||||
UnpWriteData(window, 0, endPtr);
|
||||
unpAllBuf = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -761,27 +757,19 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
// System.out.println("copyString(" + length + ", " + distance + ")");
|
||||
|
||||
var destPtr = unpPtr - distance;
|
||||
var safeZone = PackDef.MAXWINSIZE - 260;
|
||||
|
||||
// Fast path: use Array.Copy for bulk operations when in safe zone
|
||||
if (destPtr >= 0 && destPtr < safeZone && unpPtr < safeZone && distance >= length)
|
||||
// System.out.println(unpPtr+":"+distance);
|
||||
if (destPtr >= 0 && destPtr < PackDef.MAXWINSIZE - 260 && unpPtr < PackDef.MAXWINSIZE - 260)
|
||||
{
|
||||
// Non-overlapping copy: can use Array.Copy directly
|
||||
Array.Copy(window, destPtr, window, unpPtr, length);
|
||||
unpPtr += length;
|
||||
}
|
||||
else if (destPtr >= 0 && destPtr < safeZone && unpPtr < safeZone)
|
||||
{
|
||||
// Overlapping copy in safe zone: use byte-by-byte to handle self-referential copies
|
||||
for (int i = 0; i < length; i++)
|
||||
window[unpPtr++] = window[destPtr++];
|
||||
|
||||
while (--length > 0)
|
||||
{
|
||||
window[unpPtr + i] = window[destPtr + i];
|
||||
window[unpPtr++] = window[destPtr++];
|
||||
}
|
||||
unpPtr += length;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Slow path with wraparound mask
|
||||
while (length-- != 0)
|
||||
{
|
||||
window[unpPtr] = window[destPtr++ & PackDef.MAXWINMASK];
|
||||
@@ -1040,7 +1028,7 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
vmCode.Add((byte)(GetBits() >> 8));
|
||||
AddBits(8);
|
||||
}
|
||||
return AddVMCode(FirstByte, vmCode);
|
||||
return (AddVMCode(FirstByte, vmCode, Length));
|
||||
}
|
||||
|
||||
private bool ReadVMCodePPM()
|
||||
@@ -1085,12 +1073,12 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
}
|
||||
vmCode.Add((byte)Ch); // VMCode[I]=Ch;
|
||||
}
|
||||
return AddVMCode(FirstByte, vmCode);
|
||||
return (AddVMCode(FirstByte, vmCode, Length));
|
||||
}
|
||||
|
||||
private bool AddVMCode(int firstByte, List<byte> vmCode)
|
||||
private bool AddVMCode(int firstByte, List<byte> vmCode, int length)
|
||||
{
|
||||
using var Inp = new BitInput();
|
||||
var Inp = new BitInput();
|
||||
Inp.InitBitInput();
|
||||
|
||||
// memcpy(Inp.InBuf,Code,Min(BitInput::MAX_SIZE,CodeSize));
|
||||
@@ -1098,6 +1086,7 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
{
|
||||
Inp.InBuf[i] = vmCode[i];
|
||||
}
|
||||
rarVM.init();
|
||||
|
||||
int FiltPos;
|
||||
if ((firstByte & 0x80) != 0)
|
||||
@@ -1210,28 +1199,19 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
{
|
||||
return (false);
|
||||
}
|
||||
|
||||
var VMCode = ArrayPool<byte>.Shared.Rent(VMCodeSize);
|
||||
try
|
||||
Span<byte> VMCode = stackalloc byte[VMCodeSize];
|
||||
for (var I = 0; I < VMCodeSize; I++)
|
||||
{
|
||||
for (var I = 0; I < VMCodeSize; I++)
|
||||
if (Inp.Overflow(3))
|
||||
{
|
||||
if (Inp.Overflow(3))
|
||||
{
|
||||
return (false);
|
||||
}
|
||||
|
||||
VMCode[I] = (byte)(Inp.GetBits() >> 8);
|
||||
Inp.AddBits(8);
|
||||
return (false);
|
||||
}
|
||||
VMCode[I] = (byte)(Inp.GetBits() >> 8);
|
||||
Inp.AddBits(8);
|
||||
}
|
||||
|
||||
// VM.Prepare(&VMCode[0],VMCodeSize,&Filter->Prg);
|
||||
rarVM.prepare(VMCode.AsSpan(0, VMCodeSize), Filter.Program);
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(VMCode);
|
||||
}
|
||||
// VM.Prepare(&VMCode[0],VMCodeSize,&Filter->Prg);
|
||||
rarVM.prepare(VMCode, VMCodeSize, Filter.Program);
|
||||
}
|
||||
StackFilter.Program.AltCommands = Filter.Program.Commands; // StackFilter->Prg.AltCmd=&Filter->Prg.Cmd[0];
|
||||
StackFilter.Program.CommandCount = Filter.Program.CommandCount;
|
||||
|
||||
@@ -19,9 +19,14 @@ internal partial class Unpack
|
||||
|
||||
private bool suspended;
|
||||
|
||||
internal bool unpAllBuf;
|
||||
|
||||
//private ComprDataIO unpIO;
|
||||
private Stream readStream;
|
||||
private Stream writeStream;
|
||||
|
||||
internal bool unpSomeRead;
|
||||
|
||||
private int readTop;
|
||||
|
||||
private long destUnpSize;
|
||||
@@ -803,10 +808,15 @@ internal partial class Unpack
|
||||
|
||||
private void oldUnpWriteBuf()
|
||||
{
|
||||
if (unpPtr != wrPtr)
|
||||
{
|
||||
unpSomeRead = true;
|
||||
}
|
||||
if (unpPtr < wrPtr)
|
||||
{
|
||||
writeStream.Write(window, wrPtr, -wrPtr & PackDef.MAXWINMASK);
|
||||
writeStream.Write(window, 0, unpPtr);
|
||||
unpAllBuf = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using SharpCompress.Compressors.Rar.VM;
|
||||
|
||||
namespace SharpCompress.Compressors.Rar.UnpackV1;
|
||||
@@ -10,15 +9,167 @@ internal static class UnpackUtility
|
||||
internal static uint DecodeNumber(this BitInput input, Decode.Decode dec) =>
|
||||
(uint)input.decodeNumber(dec);
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
internal static int decodeNumber(this BitInput input, Decode.Decode dec)
|
||||
{
|
||||
int bits;
|
||||
long bitField = input.GetBits() & 0xfffe;
|
||||
|
||||
// if (bitField < dec.getDecodeLen()[8]) {
|
||||
// if (bitField < dec.getDecodeLen()[4]) {
|
||||
// if (bitField < dec.getDecodeLen()[2]) {
|
||||
// if (bitField < dec.getDecodeLen()[1]) {
|
||||
// bits = 1;
|
||||
// } else {
|
||||
// bits = 2;
|
||||
// }
|
||||
// } else {
|
||||
// if (bitField < dec.getDecodeLen()[3]) {
|
||||
// bits = 3;
|
||||
// } else {
|
||||
// bits = 4;
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// if (bitField < dec.getDecodeLen()[6]) {
|
||||
// if (bitField < dec.getDecodeLen()[5])
|
||||
// bits = 5;
|
||||
// else
|
||||
// bits = 6;
|
||||
// } else {
|
||||
// if (bitField < dec.getDecodeLen()[7]) {
|
||||
// bits = 7;
|
||||
// } else {
|
||||
// bits = 8;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// if (bitField < dec.getDecodeLen()[12]) {
|
||||
// if (bitField < dec.getDecodeLen()[10])
|
||||
// if (bitField < dec.getDecodeLen()[9])
|
||||
// bits = 9;
|
||||
// else
|
||||
// bits = 10;
|
||||
// else if (bitField < dec.getDecodeLen()[11])
|
||||
// bits = 11;
|
||||
// else
|
||||
// bits = 12;
|
||||
// } else {
|
||||
// if (bitField < dec.getDecodeLen()[14]) {
|
||||
// if (bitField < dec.getDecodeLen()[13]) {
|
||||
// bits = 13;
|
||||
// } else {
|
||||
// bits = 14;
|
||||
// }
|
||||
// } else {
|
||||
// bits = 15;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// addbits(bits);
|
||||
// int N = dec.getDecodePos()[bits]
|
||||
// + (((int) bitField - dec.getDecodeLen()[bits - 1]) >>> (16 - bits));
|
||||
// if (N >= dec.getMaxNum()) {
|
||||
// N = 0;
|
||||
// }
|
||||
// return (dec.getDecodeNum()[N]);
|
||||
var decodeLen = dec.DecodeLen;
|
||||
|
||||
// Binary search to find the bit length - faster than nested ifs
|
||||
int bits = FindDecodeBits(bitField, decodeLen);
|
||||
|
||||
if (bitField < decodeLen[8])
|
||||
{
|
||||
if (bitField < decodeLen[4])
|
||||
{
|
||||
if (bitField < decodeLen[2])
|
||||
{
|
||||
if (bitField < decodeLen[1])
|
||||
{
|
||||
bits = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 2;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (bitField < decodeLen[3])
|
||||
{
|
||||
bits = 3;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (bitField < decodeLen[6])
|
||||
{
|
||||
if (bitField < decodeLen[5])
|
||||
{
|
||||
bits = 5;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 6;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (bitField < decodeLen[7])
|
||||
{
|
||||
bits = 7;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (bitField < decodeLen[12])
|
||||
{
|
||||
if (bitField < decodeLen[10])
|
||||
{
|
||||
if (bitField < decodeLen[9])
|
||||
{
|
||||
bits = 9;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 10;
|
||||
}
|
||||
}
|
||||
else if (bitField < decodeLen[11])
|
||||
{
|
||||
bits = 11;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 12;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (bitField < decodeLen[14])
|
||||
{
|
||||
if (bitField < decodeLen[13])
|
||||
{
|
||||
bits = 13;
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 14;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bits = 15;
|
||||
}
|
||||
}
|
||||
}
|
||||
input.AddBits(bits);
|
||||
var N =
|
||||
dec.DecodePos[bits]
|
||||
@@ -30,52 +181,6 @@ internal static class UnpackUtility
|
||||
return (dec.DecodeNum[N]);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Fast binary search to find which bit length matches the bitField.
|
||||
/// Optimized with cached array access to minimize memory lookups.
|
||||
/// </summary>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static int FindDecodeBits(long bitField, int[] decodeLen)
|
||||
{
|
||||
// Cache critical values to reduce array access overhead
|
||||
long len4 = decodeLen[4];
|
||||
long len8 = decodeLen[8];
|
||||
long len12 = decodeLen[12];
|
||||
|
||||
if (bitField < len8)
|
||||
{
|
||||
if (bitField < len4)
|
||||
{
|
||||
long len2 = decodeLen[2];
|
||||
if (bitField < len2)
|
||||
{
|
||||
return bitField < decodeLen[1] ? 1 : 2;
|
||||
}
|
||||
return bitField < decodeLen[3] ? 3 : 4;
|
||||
}
|
||||
|
||||
long len6 = decodeLen[6];
|
||||
if (bitField < len6)
|
||||
{
|
||||
return bitField < decodeLen[5] ? 5 : 6;
|
||||
}
|
||||
return bitField < decodeLen[7] ? 7 : 8;
|
||||
}
|
||||
|
||||
if (bitField < len12)
|
||||
{
|
||||
long len10 = decodeLen[10];
|
||||
if (bitField < len10)
|
||||
{
|
||||
return bitField < decodeLen[9] ? 9 : 10;
|
||||
}
|
||||
return bitField < decodeLen[11] ? 11 : 12;
|
||||
}
|
||||
|
||||
long len14 = decodeLen[14];
|
||||
return bitField < len14 ? (bitField < decodeLen[13] ? 13 : 14) : 15;
|
||||
}
|
||||
|
||||
internal static void makeDecodeTables(
|
||||
Span<byte> lenTab,
|
||||
int offset,
|
||||
@@ -89,7 +194,8 @@ internal static class UnpackUtility
|
||||
long M,
|
||||
N;
|
||||
|
||||
new Span<int>(dec.DecodeNum).Clear();
|
||||
new Span<int>(dec.DecodeNum).Clear(); // memset(Dec->DecodeNum,0,Size*sizeof(*Dec->DecodeNum));
|
||||
|
||||
for (i = 0; i < size; i++)
|
||||
{
|
||||
lenCount[lenTab[offset + i] & 0xF]++;
|
||||
|
||||
@@ -413,7 +413,7 @@ internal partial class Unpack
|
||||
else
|
||||
//x memcpy(Mem,Window+BlockStart,BlockLength);
|
||||
{
|
||||
Buffer.BlockCopy(Window, (int)BlockStart, Mem, 0, (int)BlockLength);
|
||||
Utility.Copy(Window, BlockStart, Mem, 0, BlockLength);
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -427,21 +427,9 @@ internal partial class Unpack
|
||||
else
|
||||
{
|
||||
//x memcpy(Mem,Window+BlockStart,FirstPartLength);
|
||||
Buffer.BlockCopy(
|
||||
Window,
|
||||
(int)BlockStart,
|
||||
Mem,
|
||||
0,
|
||||
(int)FirstPartLength
|
||||
);
|
||||
Utility.Copy(Window, BlockStart, Mem, 0, FirstPartLength);
|
||||
//x memcpy(Mem+FirstPartLength,Window,BlockEnd);
|
||||
Buffer.BlockCopy(
|
||||
Window,
|
||||
0,
|
||||
Mem,
|
||||
(int)FirstPartLength,
|
||||
(int)BlockEnd
|
||||
);
|
||||
Utility.Copy(Window, 0, Mem, FirstPartLength, BlockEnd);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
|
||||
namespace SharpCompress.Compressors.Rar.VM;
|
||||
|
||||
internal class BitInput : IDisposable
|
||||
internal class BitInput
|
||||
{
|
||||
/// <summary> the max size of the input</summary>
|
||||
internal const int MAX_SIZE = 0x8000;
|
||||
@@ -23,11 +20,9 @@ internal class BitInput : IDisposable
|
||||
set => inBit = value;
|
||||
}
|
||||
public bool ExternalBuffer;
|
||||
private byte[] _privateBuffer = ArrayPool<byte>.Shared.Rent(MAX_SIZE);
|
||||
private bool _disposed;
|
||||
|
||||
/// <summary> </summary>
|
||||
internal BitInput() => InBuf = _privateBuffer;
|
||||
internal BitInput() => InBuf = new byte[MAX_SIZE];
|
||||
|
||||
internal byte[] InBuf { get; }
|
||||
|
||||
@@ -92,14 +87,4 @@ internal class BitInput : IDisposable
|
||||
/// <returns> true if an Oververflow would occur
|
||||
/// </returns>
|
||||
internal bool Overflow(int IncPtr) => (inAddr + IncPtr >= MAX_SIZE);
|
||||
|
||||
public virtual void Dispose()
|
||||
{
|
||||
if (_disposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
ArrayPool<byte>.Shared.Return(_privateBuffer);
|
||||
_disposed = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#nullable disable
|
||||
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.Buffers.Binary;
|
||||
using System.Collections.Generic;
|
||||
|
||||
@@ -15,9 +16,7 @@ internal sealed class RarVM : BitInput
|
||||
// Mem.set_Renamed(offset + 3, Byte.valueOf((sbyte) ((Utility.URShift(value_Renamed, 24)) & 0xff)));
|
||||
|
||||
//}
|
||||
internal byte[] Mem => _memory.NotNull();
|
||||
|
||||
private byte[]? _memory = ArrayPool<byte>.Shared.Rent(VM_MEMSIZE + 4);
|
||||
internal byte[] Mem { get; private set; }
|
||||
|
||||
public const int VM_MEMSIZE = 0x40000;
|
||||
|
||||
@@ -41,18 +40,11 @@ internal sealed class RarVM : BitInput
|
||||
|
||||
private int IP;
|
||||
|
||||
internal RarVM() { }
|
||||
internal RarVM() =>
|
||||
//InitBlock();
|
||||
Mem = null;
|
||||
|
||||
public override void Dispose()
|
||||
{
|
||||
base.Dispose();
|
||||
if (_memory is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
ArrayPool<byte>.Shared.Return(_memory);
|
||||
_memory = null;
|
||||
}
|
||||
internal void init() => Mem ??= new byte[VM_MEMSIZE + 4];
|
||||
|
||||
private bool IsVMMem(byte[] mem) => Mem == mem;
|
||||
|
||||
@@ -784,10 +776,9 @@ internal sealed class RarVM : BitInput
|
||||
}
|
||||
}
|
||||
|
||||
public void prepare(ReadOnlySpan<byte> code, VMPreparedProgram prg)
|
||||
public void prepare(ReadOnlySpan<byte> code, int codeSize, VMPreparedProgram prg)
|
||||
{
|
||||
InitBitInput();
|
||||
var codeSize = code.Length;
|
||||
var cpLength = Math.Min(MAX_SIZE, codeSize);
|
||||
|
||||
// memcpy(inBuf,Code,Min(CodeSize,BitInput::MAX_SIZE));
|
||||
@@ -804,7 +795,7 @@ internal sealed class RarVM : BitInput
|
||||
prg.CommandCount = 0;
|
||||
if (xorSum == code[0])
|
||||
{
|
||||
var filterType = IsStandardFilter(code);
|
||||
var filterType = IsStandardFilter(code, codeSize);
|
||||
if (filterType != VMStandardFilters.VMSF_NONE)
|
||||
{
|
||||
var curCmd = new VMPreparedCommand();
|
||||
@@ -1114,7 +1105,7 @@ internal sealed class RarVM : BitInput
|
||||
}
|
||||
}
|
||||
|
||||
private VMStandardFilters IsStandardFilter(ReadOnlySpan<byte> code)
|
||||
private VMStandardFilters IsStandardFilter(ReadOnlySpan<byte> code, int codeSize)
|
||||
{
|
||||
VMStandardFilterSignature[] stdList =
|
||||
{
|
||||
@@ -1139,7 +1130,6 @@ internal sealed class RarVM : BitInput
|
||||
|
||||
private void ExecuteStandardFilter(VMStandardFilters filterType)
|
||||
{
|
||||
var mem = Mem;
|
||||
switch (filterType)
|
||||
{
|
||||
case VMStandardFilters.VMSF_E8:
|
||||
@@ -1158,7 +1148,7 @@ internal sealed class RarVM : BitInput
|
||||
);
|
||||
for (var curPos = 0; curPos < dataSize - 4; )
|
||||
{
|
||||
var curByte = mem[curPos++];
|
||||
var curByte = Mem[curPos++];
|
||||
if (curByte == 0xe8 || curByte == cmpByte2)
|
||||
{
|
||||
// #ifdef PRESENT_INT32
|
||||
@@ -1174,19 +1164,19 @@ internal sealed class RarVM : BitInput
|
||||
// SET_VALUE(false,Data,Addr-Offset);
|
||||
// #else
|
||||
var offset = curPos + fileOffset;
|
||||
long Addr = GetValue(false, mem, curPos);
|
||||
long Addr = GetValue(false, Mem, curPos);
|
||||
if ((Addr & unchecked((int)0x80000000)) != 0)
|
||||
{
|
||||
if (((Addr + offset) & unchecked((int)0x80000000)) == 0)
|
||||
{
|
||||
SetValue(false, mem, curPos, (int)Addr + fileSize);
|
||||
SetValue(false, Mem, curPos, (int)Addr + fileSize);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (((Addr - fileSize) & unchecked((int)0x80000000)) != 0)
|
||||
{
|
||||
SetValue(false, mem, curPos, (int)(Addr - offset));
|
||||
SetValue(false, Mem, curPos, (int)(Addr - offset));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1214,7 +1204,7 @@ internal sealed class RarVM : BitInput
|
||||
|
||||
while (curPos < dataSize - 21)
|
||||
{
|
||||
var Byte = (mem[curPos] & 0x1f) - 0x10;
|
||||
var Byte = (Mem[curPos] & 0x1f) - 0x10;
|
||||
if (Byte >= 0)
|
||||
{
|
||||
var cmdMask = Masks[Byte];
|
||||
@@ -1260,7 +1250,7 @@ internal sealed class RarVM : BitInput
|
||||
var channels = R[0] & unchecked((int)0xFFffFFff);
|
||||
var srcPos = 0;
|
||||
var border = (dataSize * 2) & unchecked((int)0xFFffFFff);
|
||||
SetValue(false, mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
if (dataSize >= VM_GLOBALMEMADDR / 2)
|
||||
{
|
||||
break;
|
||||
@@ -1278,7 +1268,7 @@ internal sealed class RarVM : BitInput
|
||||
destPos += channels
|
||||
)
|
||||
{
|
||||
mem[destPos] = (PrevByte = (byte)(PrevByte - mem[srcPos++]));
|
||||
Mem[destPos] = (PrevByte = (byte)(PrevByte - Mem[srcPos++]));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1293,7 +1283,7 @@ internal sealed class RarVM : BitInput
|
||||
var channels = 3;
|
||||
var srcPos = 0;
|
||||
var destDataPos = dataSize;
|
||||
SetValue(false, mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
if (dataSize >= VM_GLOBALMEMADDR / 2 || posR < 0)
|
||||
{
|
||||
break;
|
||||
@@ -1309,8 +1299,8 @@ internal sealed class RarVM : BitInput
|
||||
if (upperPos >= 3)
|
||||
{
|
||||
var upperDataPos = destDataPos + upperPos;
|
||||
var upperByte = mem[upperDataPos] & 0xff;
|
||||
var upperLeftByte = mem[upperDataPos - 3] & 0xff;
|
||||
var upperByte = Mem[upperDataPos] & 0xff;
|
||||
var upperLeftByte = Mem[upperDataPos - 3] & 0xff;
|
||||
predicted = prevByte + upperByte - upperLeftByte;
|
||||
var pa = Math.Abs((int)(predicted - prevByte));
|
||||
var pb = Math.Abs((int)(predicted - upperByte));
|
||||
@@ -1336,15 +1326,15 @@ internal sealed class RarVM : BitInput
|
||||
predicted = prevByte;
|
||||
}
|
||||
|
||||
prevByte = ((predicted - mem[srcPos++]) & 0xff) & 0xff;
|
||||
mem[destDataPos + i] = (byte)(prevByte & 0xff);
|
||||
prevByte = ((predicted - Mem[srcPos++]) & 0xff) & 0xff;
|
||||
Mem[destDataPos + i] = (byte)(prevByte & 0xff);
|
||||
}
|
||||
}
|
||||
for (int i = posR, border = dataSize - 2; i < border; i += 3)
|
||||
{
|
||||
var G = mem[destDataPos + i + 1];
|
||||
mem[destDataPos + i] = (byte)(mem[destDataPos + i] + G);
|
||||
mem[destDataPos + i + 2] = (byte)(mem[destDataPos + i + 2] + G);
|
||||
var G = Mem[destDataPos + i + 1];
|
||||
Mem[destDataPos + i] = (byte)(Mem[destDataPos + i] + G);
|
||||
Mem[destDataPos + i + 2] = (byte)(Mem[destDataPos + i + 2] + G);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@@ -1357,7 +1347,7 @@ internal sealed class RarVM : BitInput
|
||||
var destDataPos = dataSize;
|
||||
|
||||
//byte *SrcData=Mem,*DestData=SrcData+DataSize;
|
||||
SetValue(false, mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
if (dataSize >= VM_GLOBALMEMADDR / 2)
|
||||
{
|
||||
break;
|
||||
@@ -1387,10 +1377,10 @@ internal sealed class RarVM : BitInput
|
||||
var predicted = (8 * prevByte) + (K1 * D1) + (K2 * D2) + (K3 * D3);
|
||||
predicted = Utility.URShift(predicted, 3) & 0xff;
|
||||
|
||||
long curByte = mem[srcPos++];
|
||||
long curByte = Mem[srcPos++];
|
||||
|
||||
predicted -= curByte;
|
||||
mem[destDataPos + i] = (byte)predicted;
|
||||
Mem[destDataPos + i] = (byte)predicted;
|
||||
prevDelta = (byte)(predicted - prevByte);
|
||||
|
||||
//fix java byte
|
||||
@@ -1490,15 +1480,15 @@ internal sealed class RarVM : BitInput
|
||||
}
|
||||
while (srcPos < dataSize)
|
||||
{
|
||||
var curByte = mem[srcPos++];
|
||||
if (curByte == 2 && (curByte = mem[srcPos++]) != 2)
|
||||
var curByte = Mem[srcPos++];
|
||||
if (curByte == 2 && (curByte = Mem[srcPos++]) != 2)
|
||||
{
|
||||
curByte = (byte)(curByte - 32);
|
||||
}
|
||||
mem[destPos++] = curByte;
|
||||
Mem[destPos++] = curByte;
|
||||
}
|
||||
SetValue(false, mem, VM_GLOBALMEMADDR + 0x1c, destPos - dataSize);
|
||||
SetValue(false, mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
SetValue(false, Mem, VM_GLOBALMEMADDR + 0x1c, destPos - dataSize);
|
||||
SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -1538,14 +1528,15 @@ internal sealed class RarVM : BitInput
|
||||
{
|
||||
if (pos < VM_MEMSIZE)
|
||||
{
|
||||
// Use Array.Copy for fast bulk memory operations instead of byte-by-byte loop
|
||||
// Calculate how much data can actually fit in VM memory
|
||||
int copyLength = Math.Min(dataSize, VM_MEMSIZE - pos);
|
||||
copyLength = Math.Min(copyLength, data.Length - offset);
|
||||
|
||||
if (copyLength > 0)
|
||||
//&& data!=Mem+Pos)
|
||||
//memmove(Mem+Pos,Data,Min(DataSize,VM_MEMSIZE-Pos));
|
||||
for (var i = 0; i < Math.Min(data.Length - offset, dataSize); i++)
|
||||
{
|
||||
Array.Copy(data, offset, Mem, pos, copyLength);
|
||||
if ((VM_MEMSIZE - pos) < i)
|
||||
{
|
||||
break;
|
||||
}
|
||||
Mem[pos + i] = data[offset + i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,79 +1,78 @@
|
||||
namespace SharpCompress.Compressors.Shrink
|
||||
namespace SharpCompress.Compressors.Shrink;
|
||||
|
||||
internal class BitStream
|
||||
{
|
||||
internal class BitStream
|
||||
private byte[] _src;
|
||||
private int _srcLen;
|
||||
private int _byteIdx;
|
||||
private int _bitIdx;
|
||||
private int _bitsLeft;
|
||||
private ulong _bitBuffer;
|
||||
private static uint[] _maskBits = new uint[17]
|
||||
{
|
||||
private byte[] _src;
|
||||
private int _srcLen;
|
||||
private int _byteIdx;
|
||||
private int _bitIdx;
|
||||
private int _bitsLeft;
|
||||
private ulong _bitBuffer;
|
||||
private static uint[] _maskBits = new uint[17]
|
||||
{
|
||||
0U,
|
||||
1U,
|
||||
3U,
|
||||
7U,
|
||||
15U,
|
||||
31U,
|
||||
63U,
|
||||
(uint)sbyte.MaxValue,
|
||||
(uint)byte.MaxValue,
|
||||
511U,
|
||||
1023U,
|
||||
2047U,
|
||||
4095U,
|
||||
8191U,
|
||||
16383U,
|
||||
(uint)short.MaxValue,
|
||||
(uint)ushort.MaxValue,
|
||||
};
|
||||
0U,
|
||||
1U,
|
||||
3U,
|
||||
7U,
|
||||
15U,
|
||||
31U,
|
||||
63U,
|
||||
(uint)sbyte.MaxValue,
|
||||
(uint)byte.MaxValue,
|
||||
511U,
|
||||
1023U,
|
||||
2047U,
|
||||
4095U,
|
||||
8191U,
|
||||
16383U,
|
||||
(uint)short.MaxValue,
|
||||
(uint)ushort.MaxValue,
|
||||
};
|
||||
|
||||
public BitStream(byte[] src, int srcLen)
|
||||
public BitStream(byte[] src, int srcLen)
|
||||
{
|
||||
_src = src;
|
||||
_srcLen = srcLen;
|
||||
_byteIdx = 0;
|
||||
_bitIdx = 0;
|
||||
}
|
||||
|
||||
public int BytesRead => (_byteIdx << 3) + _bitIdx;
|
||||
|
||||
private int NextByte()
|
||||
{
|
||||
if (_byteIdx >= _srcLen)
|
||||
{
|
||||
_src = src;
|
||||
_srcLen = srcLen;
|
||||
_byteIdx = 0;
|
||||
_bitIdx = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
public int BytesRead => (_byteIdx << 3) + _bitIdx;
|
||||
return _src[_byteIdx++];
|
||||
}
|
||||
|
||||
private int NextByte()
|
||||
public int NextBits(int nbits)
|
||||
{
|
||||
var result = 0;
|
||||
if (nbits > _bitsLeft)
|
||||
{
|
||||
if (_byteIdx >= _srcLen)
|
||||
int num;
|
||||
while (_bitsLeft <= 24 && (num = NextByte()) != 1234)
|
||||
{
|
||||
return 0;
|
||||
_bitBuffer |= (ulong)num << _bitsLeft;
|
||||
_bitsLeft += 8;
|
||||
}
|
||||
|
||||
return _src[_byteIdx++];
|
||||
}
|
||||
result = (int)((long)_bitBuffer & (long)_maskBits[nbits]);
|
||||
_bitBuffer >>= nbits;
|
||||
_bitsLeft -= nbits;
|
||||
return result;
|
||||
}
|
||||
|
||||
public int NextBits(int nbits)
|
||||
public bool Advance(int count)
|
||||
{
|
||||
if (_byteIdx > _srcLen)
|
||||
{
|
||||
var result = 0;
|
||||
if (nbits > _bitsLeft)
|
||||
{
|
||||
int num;
|
||||
while (_bitsLeft <= 24 && (num = NextByte()) != 1234)
|
||||
{
|
||||
_bitBuffer |= (ulong)num << _bitsLeft;
|
||||
_bitsLeft += 8;
|
||||
}
|
||||
}
|
||||
result = (int)((long)_bitBuffer & (long)_maskBits[nbits]);
|
||||
_bitBuffer >>= nbits;
|
||||
_bitsLeft -= nbits;
|
||||
return result;
|
||||
}
|
||||
|
||||
public bool Advance(int count)
|
||||
{
|
||||
if (_byteIdx > _srcLen)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,275 +1,297 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Compressors.Shrink
|
||||
namespace SharpCompress.Compressors.Shrink;
|
||||
|
||||
public class HwUnshrink
|
||||
{
|
||||
public class HwUnshrink
|
||||
private const int MIN_CODE_SIZE = 9;
|
||||
private const int MAX_CODE_SIZE = 13;
|
||||
|
||||
private const ushort MAX_CODE = (ushort)((1U << MAX_CODE_SIZE) - 1);
|
||||
private const ushort INVALID_CODE = ushort.MaxValue;
|
||||
private const ushort CONTROL_CODE = 256;
|
||||
private const ushort INC_CODE_SIZE = 1;
|
||||
private const ushort PARTIAL_CLEAR = 2;
|
||||
|
||||
private const int HASH_BITS = MAX_CODE_SIZE + 1; // For a load factor of 0.5.
|
||||
private const int HASHTAB_SIZE = 1 << HASH_BITS;
|
||||
private const ushort UNKNOWN_LEN = ushort.MaxValue;
|
||||
|
||||
private struct CodeTabEntry
|
||||
{
|
||||
private const int MIN_CODE_SIZE = 9;
|
||||
private const int MAX_CODE_SIZE = 13;
|
||||
public int prefixCode; // INVALID_CODE means the entry is invalid.
|
||||
public byte extByte;
|
||||
public ushort len;
|
||||
public int lastDstPos;
|
||||
}
|
||||
|
||||
private const ushort MAX_CODE = (ushort)((1U << MAX_CODE_SIZE) - 1);
|
||||
private const ushort INVALID_CODE = ushort.MaxValue;
|
||||
private const ushort CONTROL_CODE = 256;
|
||||
private const ushort INC_CODE_SIZE = 1;
|
||||
private const ushort PARTIAL_CLEAR = 2;
|
||||
|
||||
private const int HASH_BITS = MAX_CODE_SIZE + 1; // For a load factor of 0.5.
|
||||
private const int HASHTAB_SIZE = 1 << HASH_BITS;
|
||||
private const ushort UNKNOWN_LEN = ushort.MaxValue;
|
||||
|
||||
private struct CodeTabEntry
|
||||
private static void CodeTabInit(CodeTabEntry[] codeTab)
|
||||
{
|
||||
for (var i = 0; i <= byte.MaxValue; i++)
|
||||
{
|
||||
public int prefixCode; // INVALID_CODE means the entry is invalid.
|
||||
public byte extByte;
|
||||
public ushort len;
|
||||
public int lastDstPos;
|
||||
codeTab[i].prefixCode = (ushort)i;
|
||||
codeTab[i].extByte = (byte)i;
|
||||
codeTab[i].len = 1;
|
||||
}
|
||||
|
||||
private static void CodeTabInit(CodeTabEntry[] codeTab)
|
||||
for (var i = byte.MaxValue + 1; i <= MAX_CODE; i++)
|
||||
{
|
||||
for (var i = 0; i <= byte.MaxValue; i++)
|
||||
{
|
||||
codeTab[i].prefixCode = (ushort)i;
|
||||
codeTab[i].extByte = (byte)i;
|
||||
codeTab[i].len = 1;
|
||||
}
|
||||
codeTab[i].prefixCode = INVALID_CODE;
|
||||
}
|
||||
}
|
||||
|
||||
for (var i = byte.MaxValue + 1; i <= MAX_CODE; i++)
|
||||
private static void UnshrinkPartialClear(CodeTabEntry[] codeTab, ref CodeQueue queue)
|
||||
{
|
||||
var isPrefix = new bool[MAX_CODE + 1];
|
||||
int codeQueueSize;
|
||||
|
||||
// Scan for codes that have been used as a prefix.
|
||||
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
|
||||
{
|
||||
if (codeTab[i].prefixCode != INVALID_CODE)
|
||||
{
|
||||
isPrefix[codeTab[i].prefixCode] = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Clear "non-prefix" codes in the table; populate the code queue.
|
||||
codeQueueSize = 0;
|
||||
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
|
||||
{
|
||||
if (!isPrefix[i])
|
||||
{
|
||||
codeTab[i].prefixCode = INVALID_CODE;
|
||||
queue.codes[codeQueueSize++] = (ushort)i;
|
||||
}
|
||||
}
|
||||
|
||||
private static void UnshrinkPartialClear(CodeTabEntry[] codeTab, ref CodeQueue queue)
|
||||
queue.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
|
||||
queue.nextIdx = 0;
|
||||
}
|
||||
|
||||
private static bool ReadCode(
|
||||
BitStream stream,
|
||||
ref int codeSize,
|
||||
CodeTabEntry[] codeTab,
|
||||
ref CodeQueue queue,
|
||||
out int nextCode
|
||||
)
|
||||
{
|
||||
int code,
|
||||
controlCode;
|
||||
|
||||
code = (int)stream.NextBits(codeSize);
|
||||
if (!stream.Advance(codeSize))
|
||||
{
|
||||
var isPrefix = new bool[MAX_CODE + 1];
|
||||
int codeQueueSize;
|
||||
|
||||
// Scan for codes that have been used as a prefix.
|
||||
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
|
||||
{
|
||||
if (codeTab[i].prefixCode != INVALID_CODE)
|
||||
{
|
||||
isPrefix[codeTab[i].prefixCode] = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Clear "non-prefix" codes in the table; populate the code queue.
|
||||
codeQueueSize = 0;
|
||||
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
|
||||
{
|
||||
if (!isPrefix[i])
|
||||
{
|
||||
codeTab[i].prefixCode = INVALID_CODE;
|
||||
queue.codes[codeQueueSize++] = (ushort)i;
|
||||
}
|
||||
}
|
||||
|
||||
queue.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
|
||||
queue.nextIdx = 0;
|
||||
nextCode = INVALID_CODE;
|
||||
return false;
|
||||
}
|
||||
|
||||
private static bool ReadCode(
|
||||
BitStream stream,
|
||||
ref int codeSize,
|
||||
CodeTabEntry[] codeTab,
|
||||
ref CodeQueue queue,
|
||||
out int nextCode
|
||||
)
|
||||
// Handle regular codes (the common case).
|
||||
if (code != CONTROL_CODE)
|
||||
{
|
||||
int code,
|
||||
controlCode;
|
||||
|
||||
code = (int)stream.NextBits(codeSize);
|
||||
if (!stream.Advance(codeSize))
|
||||
{
|
||||
nextCode = INVALID_CODE;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Handle regular codes (the common case).
|
||||
if (code != CONTROL_CODE)
|
||||
{
|
||||
nextCode = code;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Handle control codes.
|
||||
controlCode = (ushort)stream.NextBits(codeSize);
|
||||
if (!stream.Advance(codeSize))
|
||||
{
|
||||
nextCode = INVALID_CODE;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (controlCode == INC_CODE_SIZE && codeSize < MAX_CODE_SIZE)
|
||||
{
|
||||
codeSize++;
|
||||
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
|
||||
}
|
||||
|
||||
if (controlCode == PARTIAL_CLEAR)
|
||||
{
|
||||
UnshrinkPartialClear(codeTab, ref queue);
|
||||
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
|
||||
}
|
||||
nextCode = code;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Handle control codes.
|
||||
controlCode = (ushort)stream.NextBits(codeSize);
|
||||
if (!stream.Advance(codeSize))
|
||||
{
|
||||
nextCode = INVALID_CODE;
|
||||
return true;
|
||||
}
|
||||
|
||||
private static void CopyFromPrevPos(byte[] dst, int prevPos, int dstPos, int len)
|
||||
if (controlCode == INC_CODE_SIZE && codeSize < MAX_CODE_SIZE)
|
||||
{
|
||||
if (dstPos + len > dst.Length)
|
||||
{
|
||||
// Not enough room in dst for the sloppy copy below.
|
||||
Array.Copy(dst, prevPos, dst, dstPos, len);
|
||||
return;
|
||||
}
|
||||
|
||||
if (prevPos + len > dstPos)
|
||||
{
|
||||
// Benign one-byte overlap possible in the KwKwK case.
|
||||
//assert(prevPos + len == dstPos + 1);
|
||||
//assert(dst[prevPos] == dst[prevPos + len - 1]);
|
||||
}
|
||||
|
||||
Buffer.BlockCopy(dst, prevPos, dst, dstPos, len);
|
||||
codeSize++;
|
||||
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
|
||||
}
|
||||
|
||||
private static UnshrnkStatus OutputCode(
|
||||
int code,
|
||||
byte[] dst,
|
||||
int dstPos,
|
||||
int dstCap,
|
||||
int prevCode,
|
||||
CodeTabEntry[] codeTab,
|
||||
ref CodeQueue queue,
|
||||
out byte firstByte,
|
||||
out int len
|
||||
)
|
||||
if (controlCode == PARTIAL_CLEAR)
|
||||
{
|
||||
int prefixCode;
|
||||
UnshrinkPartialClear(codeTab, ref queue);
|
||||
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
|
||||
}
|
||||
|
||||
//assert(code <= MAX_CODE && code != CONTROL_CODE);
|
||||
//assert(dstPos < dstCap);
|
||||
nextCode = INVALID_CODE;
|
||||
return true;
|
||||
}
|
||||
|
||||
private static void CopyFromPrevPos(byte[] dst, int prevPos, int dstPos, int len)
|
||||
{
|
||||
if (dstPos + len > dst.Length)
|
||||
{
|
||||
// Not enough room in dst for the sloppy copy below.
|
||||
Array.Copy(dst, prevPos, dst, dstPos, len);
|
||||
return;
|
||||
}
|
||||
|
||||
if (prevPos + len > dstPos)
|
||||
{
|
||||
// Benign one-byte overlap possible in the KwKwK case.
|
||||
//assert(prevPos + len == dstPos + 1);
|
||||
//assert(dst[prevPos] == dst[prevPos + len - 1]);
|
||||
}
|
||||
|
||||
Buffer.BlockCopy(dst, prevPos, dst, dstPos, len);
|
||||
}
|
||||
|
||||
private static UnshrnkStatus OutputCode(
|
||||
int code,
|
||||
byte[] dst,
|
||||
int dstPos,
|
||||
int dstCap,
|
||||
int prevCode,
|
||||
CodeTabEntry[] codeTab,
|
||||
ref CodeQueue queue,
|
||||
out byte firstByte,
|
||||
out int len
|
||||
)
|
||||
{
|
||||
int prefixCode;
|
||||
|
||||
//assert(code <= MAX_CODE && code != CONTROL_CODE);
|
||||
//assert(dstPos < dstCap);
|
||||
firstByte = 0;
|
||||
if (code <= byte.MaxValue)
|
||||
{
|
||||
// Output literal byte.
|
||||
firstByte = (byte)code;
|
||||
len = 1;
|
||||
dst[dstPos] = (byte)code;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
if (codeTab[code].prefixCode == INVALID_CODE || codeTab[code].prefixCode == code)
|
||||
{
|
||||
// Reject invalid codes. Self-referential codes may exist in the table but cannot be used.
|
||||
firstByte = 0;
|
||||
if (code <= byte.MaxValue)
|
||||
{
|
||||
// Output literal byte.
|
||||
firstByte = (byte)code;
|
||||
len = 1;
|
||||
dst[dstPos] = (byte)code;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
len = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
if (codeTab[code].prefixCode == INVALID_CODE || codeTab[code].prefixCode == code)
|
||||
{
|
||||
// Reject invalid codes. Self-referential codes may exist in the table but cannot be used.
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
if (codeTab[code].len != UNKNOWN_LEN)
|
||||
{
|
||||
// Output string with known length (the common case).
|
||||
if (dstCap - dstPos < codeTab[code].len)
|
||||
{
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
CopyFromPrevPos(dst, codeTab[code].lastDstPos, dstPos, codeTab[code].len);
|
||||
firstByte = dst[dstPos];
|
||||
len = codeTab[code].len;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
// Output a string of unknown length.
|
||||
//assert(codeTab[code].len == UNKNOWN_LEN);
|
||||
prefixCode = codeTab[code].prefixCode;
|
||||
// assert(prefixCode > CONTROL_CODE);
|
||||
|
||||
if (prefixCode == queue.codes[queue.nextIdx])
|
||||
{
|
||||
// The prefix code hasn't been added yet, but we were just about to: the KwKwK case.
|
||||
//assert(codeTab[prevCode].prefixCode != INVALID_CODE);
|
||||
codeTab[prefixCode].prefixCode = prevCode;
|
||||
codeTab[prefixCode].extByte = firstByte;
|
||||
codeTab[prefixCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[prefixCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
dst[dstPos] = firstByte;
|
||||
}
|
||||
else if (codeTab[prefixCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// The prefix code is still invalid.
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
// Output the prefix string, then the extension byte.
|
||||
len = codeTab[prefixCode].len + 1;
|
||||
if (dstCap - dstPos < len)
|
||||
if (codeTab[code].len != UNKNOWN_LEN)
|
||||
{
|
||||
// Output string with known length (the common case).
|
||||
if (dstCap - dstPos < codeTab[code].len)
|
||||
{
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
CopyFromPrevPos(dst, codeTab[prefixCode].lastDstPos, dstPos, codeTab[prefixCode].len);
|
||||
dst[dstPos + len - 1] = codeTab[code].extByte;
|
||||
CopyFromPrevPos(dst, codeTab[code].lastDstPos, dstPos, codeTab[code].len);
|
||||
firstByte = dst[dstPos];
|
||||
|
||||
// Update the code table now that the string has a length and pos.
|
||||
//assert(prevCode != code);
|
||||
codeTab[code].len = (ushort)len;
|
||||
codeTab[code].lastDstPos = dstPos;
|
||||
|
||||
len = codeTab[code].len;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
public static UnshrnkStatus Unshrink(
|
||||
byte[] src,
|
||||
int srcLen,
|
||||
out int srcUsed,
|
||||
byte[] dst,
|
||||
int dstCap,
|
||||
out int dstUsed
|
||||
)
|
||||
// Output a string of unknown length.
|
||||
//assert(codeTab[code].len == UNKNOWN_LEN);
|
||||
prefixCode = codeTab[code].prefixCode;
|
||||
// assert(prefixCode > CONTROL_CODE);
|
||||
|
||||
if (prefixCode == queue.codes[queue.nextIdx])
|
||||
{
|
||||
var codeTab = new CodeTabEntry[HASHTAB_SIZE];
|
||||
var queue = new CodeQueue();
|
||||
var stream = new BitStream(src, srcLen);
|
||||
int codeSize,
|
||||
dstPos,
|
||||
len;
|
||||
int currCode,
|
||||
prevCode,
|
||||
newCode;
|
||||
byte firstByte;
|
||||
// The prefix code hasn't been added yet, but we were just about to: the KwKwK case.
|
||||
//assert(codeTab[prevCode].prefixCode != INVALID_CODE);
|
||||
codeTab[prefixCode].prefixCode = prevCode;
|
||||
codeTab[prefixCode].extByte = firstByte;
|
||||
codeTab[prefixCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[prefixCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
dst[dstPos] = firstByte;
|
||||
}
|
||||
else if (codeTab[prefixCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// The prefix code is still invalid.
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
CodeTabInit(codeTab);
|
||||
CodeQueueInit(ref queue);
|
||||
codeSize = MIN_CODE_SIZE;
|
||||
dstPos = 0;
|
||||
// Output the prefix string, then the extension byte.
|
||||
len = codeTab[prefixCode].len + 1;
|
||||
if (dstCap - dstPos < len)
|
||||
{
|
||||
firstByte = 0;
|
||||
len = 0;
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
// Handle the first code separately since there is no previous code.
|
||||
if (!ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
|
||||
CopyFromPrevPos(dst, codeTab[prefixCode].lastDstPos, dstPos, codeTab[prefixCode].len);
|
||||
dst[dstPos + len - 1] = codeTab[code].extByte;
|
||||
firstByte = dst[dstPos];
|
||||
|
||||
// Update the code table now that the string has a length and pos.
|
||||
//assert(prevCode != code);
|
||||
codeTab[code].len = (ushort)len;
|
||||
codeTab[code].lastDstPos = dstPos;
|
||||
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
public static UnshrnkStatus Unshrink(
|
||||
byte[] src,
|
||||
int srcLen,
|
||||
out int srcUsed,
|
||||
byte[] dst,
|
||||
int dstCap,
|
||||
out int dstUsed
|
||||
)
|
||||
{
|
||||
var codeTab = new CodeTabEntry[HASHTAB_SIZE];
|
||||
var queue = new CodeQueue();
|
||||
var stream = new BitStream(src, srcLen);
|
||||
int codeSize,
|
||||
dstPos,
|
||||
len;
|
||||
int currCode,
|
||||
prevCode,
|
||||
newCode;
|
||||
byte firstByte;
|
||||
|
||||
CodeTabInit(codeTab);
|
||||
CodeQueueInit(ref queue);
|
||||
codeSize = MIN_CODE_SIZE;
|
||||
dstPos = 0;
|
||||
|
||||
// Handle the first code separately since there is no previous code.
|
||||
if (!ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
//assert(currCode != CONTROL_CODE);
|
||||
if (currCode > byte.MaxValue)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Error; // The first code must be a literal.
|
||||
}
|
||||
|
||||
if (dstPos == dstCap)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
firstByte = (byte)currCode;
|
||||
dst[dstPos] = (byte)currCode;
|
||||
codeTab[currCode].lastDstPos = dstPos;
|
||||
dstPos++;
|
||||
|
||||
prevCode = currCode;
|
||||
while (ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
|
||||
{
|
||||
if (currCode == INVALID_CODE)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
//assert(currCode != CONTROL_CODE);
|
||||
if (currCode > byte.MaxValue)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Error; // The first code must be a literal.
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
if (dstPos == dstCap)
|
||||
@@ -279,153 +301,130 @@ namespace SharpCompress.Compressors.Shrink
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
firstByte = (byte)currCode;
|
||||
dst[dstPos] = (byte)currCode;
|
||||
codeTab[currCode].lastDstPos = dstPos;
|
||||
dstPos++;
|
||||
|
||||
prevCode = currCode;
|
||||
while (ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
|
||||
// Handle KwKwK: next code used before being added.
|
||||
if (currCode == queue.codes[queue.nextIdx])
|
||||
{
|
||||
if (currCode == INVALID_CODE)
|
||||
if (codeTab[prevCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// The previous code is no longer valid.
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
if (dstPos == dstCap)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Full;
|
||||
}
|
||||
|
||||
// Handle KwKwK: next code used before being added.
|
||||
if (currCode == queue.codes[queue.nextIdx])
|
||||
{
|
||||
if (codeTab[prevCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// The previous code is no longer valid.
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return UnshrnkStatus.Error;
|
||||
}
|
||||
|
||||
// Extend the previous code with its first byte.
|
||||
//assert(currCode != prevCode);
|
||||
codeTab[currCode].prefixCode = prevCode;
|
||||
codeTab[currCode].extByte = firstByte;
|
||||
codeTab[currCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[currCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
//assert(dstPos < dstCap);
|
||||
dst[dstPos] = firstByte;
|
||||
}
|
||||
|
||||
// Output the string represented by the current code.
|
||||
var status = OutputCode(
|
||||
currCode,
|
||||
dst,
|
||||
dstPos,
|
||||
dstCap,
|
||||
prevCode,
|
||||
codeTab,
|
||||
ref queue,
|
||||
out firstByte,
|
||||
out len
|
||||
);
|
||||
if (status != UnshrnkStatus.Ok)
|
||||
{
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return status;
|
||||
}
|
||||
|
||||
// Verify that the output matches walking the prefixes.
|
||||
var c = currCode;
|
||||
for (var i = 0; i < len; i++)
|
||||
{
|
||||
// assert(codeTab[c].len == len - i);
|
||||
//assert(codeTab[c].extByte == dst[dstPos + len - i - 1]);
|
||||
c = codeTab[c].prefixCode;
|
||||
}
|
||||
|
||||
// Add a new code to the string table if there's room.
|
||||
// The string is the previous code's string extended with the first byte of the current code's string.
|
||||
newCode = CodeQueueRemoveNext(ref queue);
|
||||
if (newCode != INVALID_CODE)
|
||||
{
|
||||
//assert(codeTab[prevCode].lastDstPos < dstPos);
|
||||
codeTab[newCode].prefixCode = prevCode;
|
||||
codeTab[newCode].extByte = firstByte;
|
||||
codeTab[newCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[newCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
|
||||
if (codeTab[prevCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// prevCode was invalidated in a partial clearing. Until that code is re-used, the
|
||||
// string represented by newCode is indeterminate.
|
||||
codeTab[newCode].len = UNKNOWN_LEN;
|
||||
}
|
||||
// If prevCode was invalidated in a partial clearing, it's possible that newCode == prevCode,
|
||||
// in which case it will never be used or cleared.
|
||||
}
|
||||
|
||||
codeTab[currCode].lastDstPos = dstPos;
|
||||
dstPos += len;
|
||||
|
||||
prevCode = currCode;
|
||||
// Extend the previous code with its first byte.
|
||||
//assert(currCode != prevCode);
|
||||
codeTab[currCode].prefixCode = prevCode;
|
||||
codeTab[currCode].extByte = firstByte;
|
||||
codeTab[currCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[currCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
//assert(dstPos < dstCap);
|
||||
dst[dstPos] = firstByte;
|
||||
}
|
||||
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = dstPos;
|
||||
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
public enum UnshrnkStatus
|
||||
{
|
||||
Ok,
|
||||
Full,
|
||||
Error,
|
||||
}
|
||||
|
||||
private struct CodeQueue
|
||||
{
|
||||
public int nextIdx;
|
||||
public ushort[] codes;
|
||||
}
|
||||
|
||||
private static void CodeQueueInit(ref CodeQueue q)
|
||||
{
|
||||
int codeQueueSize;
|
||||
ushort code;
|
||||
|
||||
codeQueueSize = 0;
|
||||
q.codes = new ushort[MAX_CODE - CONTROL_CODE + 2];
|
||||
|
||||
for (code = CONTROL_CODE + 1; code <= MAX_CODE; code++)
|
||||
// Output the string represented by the current code.
|
||||
var status = OutputCode(
|
||||
currCode,
|
||||
dst,
|
||||
dstPos,
|
||||
dstCap,
|
||||
prevCode,
|
||||
codeTab,
|
||||
ref queue,
|
||||
out firstByte,
|
||||
out len
|
||||
);
|
||||
if (status != UnshrnkStatus.Ok)
|
||||
{
|
||||
q.codes[codeQueueSize++] = code;
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = 0;
|
||||
return status;
|
||||
}
|
||||
|
||||
//assert(codeQueueSize < q.codes.Length);
|
||||
q.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
|
||||
q.nextIdx = 0;
|
||||
}
|
||||
|
||||
private static ushort CodeQueueNext(ref CodeQueue q) =>
|
||||
//assert(q.nextIdx < q.codes.Length);
|
||||
q.codes[q.nextIdx];
|
||||
|
||||
private static ushort CodeQueueRemoveNext(ref CodeQueue q)
|
||||
{
|
||||
var code = CodeQueueNext(ref q);
|
||||
if (code != INVALID_CODE)
|
||||
// Verify that the output matches walking the prefixes.
|
||||
var c = currCode;
|
||||
for (var i = 0; i < len; i++)
|
||||
{
|
||||
q.nextIdx++;
|
||||
// assert(codeTab[c].len == len - i);
|
||||
//assert(codeTab[c].extByte == dst[dstPos + len - i - 1]);
|
||||
c = codeTab[c].prefixCode;
|
||||
}
|
||||
return code;
|
||||
|
||||
// Add a new code to the string table if there's room.
|
||||
// The string is the previous code's string extended with the first byte of the current code's string.
|
||||
newCode = CodeQueueRemoveNext(ref queue);
|
||||
if (newCode != INVALID_CODE)
|
||||
{
|
||||
//assert(codeTab[prevCode].lastDstPos < dstPos);
|
||||
codeTab[newCode].prefixCode = prevCode;
|
||||
codeTab[newCode].extByte = firstByte;
|
||||
codeTab[newCode].len = (ushort)(codeTab[prevCode].len + 1);
|
||||
codeTab[newCode].lastDstPos = codeTab[prevCode].lastDstPos;
|
||||
|
||||
if (codeTab[prevCode].prefixCode == INVALID_CODE)
|
||||
{
|
||||
// prevCode was invalidated in a partial clearing. Until that code is re-used, the
|
||||
// string represented by newCode is indeterminate.
|
||||
codeTab[newCode].len = UNKNOWN_LEN;
|
||||
}
|
||||
// If prevCode was invalidated in a partial clearing, it's possible that newCode == prevCode,
|
||||
// in which case it will never be used or cleared.
|
||||
}
|
||||
|
||||
codeTab[currCode].lastDstPos = dstPos;
|
||||
dstPos += len;
|
||||
|
||||
prevCode = currCode;
|
||||
}
|
||||
|
||||
srcUsed = stream.BytesRead;
|
||||
dstUsed = dstPos;
|
||||
|
||||
return UnshrnkStatus.Ok;
|
||||
}
|
||||
|
||||
public enum UnshrnkStatus
|
||||
{
|
||||
Ok,
|
||||
Full,
|
||||
Error,
|
||||
}
|
||||
|
||||
private struct CodeQueue
|
||||
{
|
||||
public int nextIdx;
|
||||
public ushort[] codes;
|
||||
}
|
||||
|
||||
private static void CodeQueueInit(ref CodeQueue q)
|
||||
{
|
||||
int codeQueueSize;
|
||||
ushort code;
|
||||
|
||||
codeQueueSize = 0;
|
||||
q.codes = new ushort[MAX_CODE - CONTROL_CODE + 2];
|
||||
|
||||
for (code = CONTROL_CODE + 1; code <= MAX_CODE; code++)
|
||||
{
|
||||
q.codes[codeQueueSize++] = code;
|
||||
}
|
||||
|
||||
//assert(codeQueueSize < q.codes.Length);
|
||||
q.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
|
||||
q.nextIdx = 0;
|
||||
}
|
||||
|
||||
private static ushort CodeQueueNext(ref CodeQueue q) =>
|
||||
//assert(q.nextIdx < q.codes.Length);
|
||||
q.codes[q.nextIdx];
|
||||
|
||||
private static ushort CodeQueueRemoveNext(ref CodeQueue q)
|
||||
{
|
||||
var code = CodeQueueNext(ref q);
|
||||
if (code != INVALID_CODE)
|
||||
{
|
||||
q.nextIdx++;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,139 +7,138 @@ using System.Threading.Tasks;
|
||||
using SharpCompress.Compressors.RLE90;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Squeezed
|
||||
namespace SharpCompress.Compressors.Squeezed;
|
||||
|
||||
public class SqueezeStream : Stream, IStreamStack
|
||||
{
|
||||
public class SqueezeStream : Stream, IStreamStack
|
||||
#if DEBUG_STREAMS
|
||||
long IStreamStack.InstanceId { get; set; }
|
||||
#endif
|
||||
int IStreamStack.DefaultBufferSize { get; set; }
|
||||
|
||||
Stream IStreamStack.BaseStream() => _stream;
|
||||
|
||||
int IStreamStack.BufferSize
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
int IStreamStack.BufferPosition
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
|
||||
void IStreamStack.SetPosition(long position) { }
|
||||
|
||||
private readonly Stream _stream;
|
||||
private readonly int _compressedSize;
|
||||
private const int NUMVALS = 257;
|
||||
private const int SPEOF = 256;
|
||||
private bool _processed = false;
|
||||
|
||||
public SqueezeStream(Stream stream, int compressedSize)
|
||||
{
|
||||
_stream = stream;
|
||||
_compressedSize = compressedSize;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugConstruct(typeof(SqueezeStream));
|
||||
#endif
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
long IStreamStack.InstanceId { get; set; }
|
||||
this.DebugDispose(typeof(SqueezeStream));
|
||||
#endif
|
||||
int IStreamStack.DefaultBufferSize { get; set; }
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
Stream IStreamStack.BaseStream() => _stream;
|
||||
public override bool CanRead => true;
|
||||
|
||||
int IStreamStack.BufferSize
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotImplementedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _stream.Position;
|
||||
set => throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotImplementedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_processed)
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
return 0;
|
||||
}
|
||||
int IStreamStack.BufferPosition
|
||||
_processed = true;
|
||||
using var binaryReader = new BinaryReader(_stream);
|
||||
|
||||
// Read numnodes (equivalent to convert_u16!(numnodes, buf))
|
||||
var numnodes = binaryReader.ReadUInt16();
|
||||
|
||||
// Validation: numnodes should be within bounds
|
||||
if (numnodes >= NUMVALS)
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
throw new InvalidDataException(
|
||||
$"Invalid number of nodes {numnodes} (max {NUMVALS - 1})"
|
||||
);
|
||||
}
|
||||
|
||||
void IStreamStack.SetPosition(long position) { }
|
||||
|
||||
private readonly Stream _stream;
|
||||
private readonly int _compressedSize;
|
||||
private const int NUMVALS = 257;
|
||||
private const int SPEOF = 256;
|
||||
private bool _processed = false;
|
||||
|
||||
public SqueezeStream(Stream stream, int compressedSize)
|
||||
// Handle the case where no nodes exist
|
||||
if (numnodes == 0)
|
||||
{
|
||||
_stream = stream;
|
||||
_compressedSize = compressedSize;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugConstruct(typeof(SqueezeStream));
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
// Build dnode (tree of nodes)
|
||||
var dnode = new int[numnodes, 2];
|
||||
for (int j = 0; j < numnodes; j++)
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(SqueezeStream));
|
||||
#endif
|
||||
base.Dispose(disposing);
|
||||
dnode[j, 0] = binaryReader.ReadInt16();
|
||||
dnode[j, 1] = binaryReader.ReadInt16();
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
// Initialize BitReader for reading bits
|
||||
var bitReader = new BitReader(_stream);
|
||||
var decoded = new List<byte>();
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotImplementedException();
|
||||
|
||||
public override long Position
|
||||
int i = 0;
|
||||
// Decode the buffer using the dnode tree
|
||||
while (true)
|
||||
{
|
||||
get => _stream.Position;
|
||||
set => throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotImplementedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_processed)
|
||||
i = dnode[i, bitReader.ReadBit() ? 1 : 0];
|
||||
if (i < 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
_processed = true;
|
||||
using var binaryReader = new BinaryReader(_stream);
|
||||
|
||||
// Read numnodes (equivalent to convert_u16!(numnodes, buf))
|
||||
var numnodes = binaryReader.ReadUInt16();
|
||||
|
||||
// Validation: numnodes should be within bounds
|
||||
if (numnodes >= NUMVALS)
|
||||
{
|
||||
throw new InvalidDataException(
|
||||
$"Invalid number of nodes {numnodes} (max {NUMVALS - 1})"
|
||||
);
|
||||
}
|
||||
|
||||
// Handle the case where no nodes exist
|
||||
if (numnodes == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Build dnode (tree of nodes)
|
||||
var dnode = new int[numnodes, 2];
|
||||
for (int j = 0; j < numnodes; j++)
|
||||
{
|
||||
dnode[j, 0] = binaryReader.ReadInt16();
|
||||
dnode[j, 1] = binaryReader.ReadInt16();
|
||||
}
|
||||
|
||||
// Initialize BitReader for reading bits
|
||||
var bitReader = new BitReader(_stream);
|
||||
var decoded = new List<byte>();
|
||||
|
||||
int i = 0;
|
||||
// Decode the buffer using the dnode tree
|
||||
while (true)
|
||||
{
|
||||
i = dnode[i, bitReader.ReadBit() ? 1 : 0];
|
||||
if (i < 0)
|
||||
i = (short)-(i + 1);
|
||||
if (i == SPEOF)
|
||||
{
|
||||
i = (short)-(i + 1);
|
||||
if (i == SPEOF)
|
||||
{
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
decoded.Add((byte)i);
|
||||
i = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
decoded.Add((byte)i);
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack the decoded buffer using the RLE class
|
||||
var unpacked = RLE.UnpackRLE(decoded.ToArray());
|
||||
unpacked.CopyTo(buffer, 0);
|
||||
return unpacked.Count();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotImplementedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotImplementedException();
|
||||
// Unpack the decoded buffer using the RLE class
|
||||
var unpacked = RLE.UnpackRLE(decoded.ToArray());
|
||||
unpacked.CopyTo(buffer, 0);
|
||||
return unpacked.Count();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotImplementedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
@@ -22,7 +22,9 @@ public class XZFooter
|
||||
|
||||
public static XZFooter FromStream(Stream stream)
|
||||
{
|
||||
var footer = new XZFooter(new BinaryReader(stream, Encoding.UTF8, true));
|
||||
var footer = new XZFooter(
|
||||
new BinaryReader(SharpCompressStream.Create(stream, leaveOpen: true), Encoding.UTF8)
|
||||
);
|
||||
footer.Process();
|
||||
return footer;
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ public class XZHeader
|
||||
|
||||
public static XZHeader FromStream(Stream stream)
|
||||
{
|
||||
var header = new XZHeader(new BinaryReader(stream, Encoding.UTF8, true));
|
||||
var header = new XZHeader(
|
||||
new BinaryReader(SharpCompressStream.Create(stream, leaveOpen: true), Encoding.UTF8)
|
||||
);
|
||||
header.Process();
|
||||
return header;
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ public class XZIndex
|
||||
public static XZIndex FromStream(Stream stream, bool indexMarkerAlreadyVerified)
|
||||
{
|
||||
var index = new XZIndex(
|
||||
new BinaryReader(stream, Encoding.UTF8, true),
|
||||
new BinaryReader(SharpCompressStream.Create(stream, leaveOpen: true), Encoding.UTF8),
|
||||
indexMarkerAlreadyVerified
|
||||
);
|
||||
index.Process();
|
||||
|
||||
311
src/SharpCompress/Compressors/ZStandard/BitOperations.cs
Normal file
311
src/SharpCompress/Compressors/ZStandard/BitOperations.cs
Normal file
@@ -0,0 +1,311 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
|
||||
#if !NETCOREAPP3_0_OR_GREATER
|
||||
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
// Some routines inspired by the Stanford Bit Twiddling Hacks by Sean Eron Anderson:
|
||||
// http://graphics.stanford.edu/~seander/bithacks.html
|
||||
|
||||
namespace System.Numerics
|
||||
{
|
||||
/// <summary>
|
||||
/// Utility methods for intrinsic bit-twiddling operations.
|
||||
/// The methods use hardware intrinsics when available on the underlying platform,
|
||||
/// otherwise they use optimized software fallbacks.
|
||||
/// </summary>
|
||||
public static unsafe class BitOperations
|
||||
{
|
||||
// hack: should be public because of inline
|
||||
public static readonly byte* TrailingZeroCountDeBruijn = GetArrayPointer(
|
||||
new byte[]
|
||||
{
|
||||
00,
|
||||
01,
|
||||
28,
|
||||
02,
|
||||
29,
|
||||
14,
|
||||
24,
|
||||
03,
|
||||
30,
|
||||
22,
|
||||
20,
|
||||
15,
|
||||
25,
|
||||
17,
|
||||
04,
|
||||
08,
|
||||
31,
|
||||
27,
|
||||
13,
|
||||
23,
|
||||
21,
|
||||
19,
|
||||
16,
|
||||
07,
|
||||
26,
|
||||
12,
|
||||
18,
|
||||
06,
|
||||
11,
|
||||
05,
|
||||
10,
|
||||
09,
|
||||
}
|
||||
);
|
||||
|
||||
// hack: should be public because of inline
|
||||
public static readonly byte* Log2DeBruijn = GetArrayPointer(
|
||||
new byte[]
|
||||
{
|
||||
00,
|
||||
09,
|
||||
01,
|
||||
10,
|
||||
13,
|
||||
21,
|
||||
02,
|
||||
29,
|
||||
11,
|
||||
14,
|
||||
16,
|
||||
18,
|
||||
22,
|
||||
25,
|
||||
03,
|
||||
30,
|
||||
08,
|
||||
12,
|
||||
20,
|
||||
28,
|
||||
15,
|
||||
17,
|
||||
24,
|
||||
07,
|
||||
19,
|
||||
27,
|
||||
23,
|
||||
06,
|
||||
26,
|
||||
05,
|
||||
04,
|
||||
31,
|
||||
}
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Returns the integer (floor) log of the specified value, base 2.
|
||||
/// Note that by convention, input value 0 returns 0 since log(0) is undefined.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int Log2(uint value)
|
||||
{
|
||||
// The 0->0 contract is fulfilled by setting the LSB to 1.
|
||||
// Log(1) is 0, and setting the LSB for values > 1 does not change the log2 result.
|
||||
value |= 1;
|
||||
|
||||
// value lzcnt actual expected
|
||||
// ..0001 31 31-31 0
|
||||
// ..0010 30 31-30 1
|
||||
// 0010.. 2 31-2 29
|
||||
// 0100.. 1 31-1 30
|
||||
// 1000.. 0 31-0 31
|
||||
|
||||
// Fallback contract is 0->0
|
||||
// No AggressiveInlining due to large method size
|
||||
// Has conventional contract 0->0 (Log(0) is undefined)
|
||||
|
||||
// Fill trailing zeros with ones, eg 00010010 becomes 00011111
|
||||
value |= value >> 01;
|
||||
value |= value >> 02;
|
||||
value |= value >> 04;
|
||||
value |= value >> 08;
|
||||
value |= value >> 16;
|
||||
|
||||
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
|
||||
return Log2DeBruijn[
|
||||
// Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_1100_0100_1010_1100_1101_1101u
|
||||
(int)((value * 0x07C4ACDDu) >> 27)
|
||||
];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns the integer (floor) log of the specified value, base 2.
|
||||
/// Note that by convention, input value 0 returns 0 since log(0) is undefined.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int Log2(ulong value)
|
||||
{
|
||||
value |= 1;
|
||||
|
||||
uint hi = (uint)(value >> 32);
|
||||
|
||||
if (hi == 0)
|
||||
{
|
||||
return Log2((uint)value);
|
||||
}
|
||||
|
||||
return 32 + Log2(hi);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in an integer value.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(int value) => TrailingZeroCount((uint)value);
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in an integer value.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(uint value)
|
||||
{
|
||||
// Unguarded fallback contract is 0->0, BSF contract is 0->undefined
|
||||
if (value == 0)
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
|
||||
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
|
||||
return TrailingZeroCountDeBruijn[
|
||||
// Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_0111_1100_1011_0101_0011_0001u
|
||||
(int)(((value & (uint)-(int)value) * 0x077CB531u) >> 27)
|
||||
]; // Multi-cast mitigates redundant conv.u8
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(long value) => TrailingZeroCount((ulong)value);
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(ulong value)
|
||||
{
|
||||
uint lo = (uint)value;
|
||||
|
||||
if (lo == 0)
|
||||
{
|
||||
return 32 + TrailingZeroCount((uint)(value >> 32));
|
||||
}
|
||||
|
||||
return TrailingZeroCount(lo);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value left by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROL.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..31] is treated as congruent mod 32.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static uint RotateLeft(uint value, int offset) =>
|
||||
(value << offset) | (value >> (32 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value left by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROL.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..63] is treated as congruent mod 64.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ulong RotateLeft(ulong value, int offset) =>
|
||||
(value << offset) | (value >> (64 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value right by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROR.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..31] is treated as congruent mod 32.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static uint RotateRight(uint value, int offset) =>
|
||||
(value >> offset) | (value << (32 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value right by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROR.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..63] is treated as congruent mod 64.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ulong RotateRight(ulong value, int offset) =>
|
||||
(value >> offset) | (value << (64 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of leading zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction LZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int LeadingZeroCount(uint value)
|
||||
{
|
||||
// Unguarded fallback contract is 0->31, BSR contract is 0->undefined
|
||||
if (value == 0)
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
|
||||
// No AggressiveInlining due to large method size
|
||||
// Has conventional contract 0->0 (Log(0) is undefined)
|
||||
|
||||
// Fill trailing zeros with ones, eg 00010010 becomes 00011111
|
||||
value |= value >> 01;
|
||||
value |= value >> 02;
|
||||
value |= value >> 04;
|
||||
value |= value >> 08;
|
||||
value |= value >> 16;
|
||||
|
||||
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
|
||||
return 31
|
||||
^ Log2DeBruijn[
|
||||
// uint|long -> IntPtr cast on 32-bit platforms does expensive overflow checks not needed here
|
||||
(int)((value * 0x07C4ACDDu) >> 27)
|
||||
];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of leading zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction LZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int LeadingZeroCount(ulong value)
|
||||
{
|
||||
uint hi = (uint)(value >> 32);
|
||||
|
||||
if (hi == 0)
|
||||
{
|
||||
return 32 + LeadingZeroCount((uint)value);
|
||||
}
|
||||
|
||||
return LeadingZeroCount(hi);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
301
src/SharpCompress/Compressors/ZStandard/CompressionStream.cs
Normal file
301
src/SharpCompress/Compressors/ZStandard/CompressionStream.cs
Normal file
@@ -0,0 +1,301 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public class CompressionStream : Stream
|
||||
{
|
||||
private readonly Stream innerStream;
|
||||
private readonly byte[] outputBuffer;
|
||||
private readonly bool preserveCompressor;
|
||||
private readonly bool leaveOpen;
|
||||
private Compressor? compressor;
|
||||
private ZSTD_outBuffer_s output;
|
||||
|
||||
public CompressionStream(
|
||||
Stream stream,
|
||||
int level = Compressor.DefaultCompressionLevel,
|
||||
int bufferSize = 0,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
: this(stream, new Compressor(level), bufferSize, false, leaveOpen) { }
|
||||
|
||||
public CompressionStream(
|
||||
Stream stream,
|
||||
Compressor compressor,
|
||||
int bufferSize = 0,
|
||||
bool preserveCompressor = true,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
{
|
||||
if (stream == null)
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
|
||||
if (!stream.CanWrite)
|
||||
throw new ArgumentException("Stream is not writable", nameof(stream));
|
||||
|
||||
if (bufferSize < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(bufferSize));
|
||||
|
||||
innerStream = stream;
|
||||
this.compressor = compressor;
|
||||
this.preserveCompressor = preserveCompressor;
|
||||
this.leaveOpen = leaveOpen;
|
||||
|
||||
var outputBufferSize =
|
||||
bufferSize > 0
|
||||
? bufferSize
|
||||
: (int)Unsafe.Methods.ZSTD_CStreamOutSize().EnsureZstdSuccess();
|
||||
outputBuffer = ArrayPool<byte>.Shared.Rent(outputBufferSize);
|
||||
output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)outputBufferSize };
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_cParameter parameter, int value)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
compressor.NotNull().SetParameter(parameter, value);
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_cParameter parameter)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
return compressor.NotNull().GetParameter(parameter);
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
compressor.NotNull().LoadDictionary(dict);
|
||||
}
|
||||
|
||||
~CompressionStream() => Dispose(false);
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override async ValueTask DisposeAsync()
|
||||
#else
|
||||
public async Task DisposeAsync()
|
||||
#endif
|
||||
{
|
||||
if (compressor == null)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_end).ConfigureAwait(false);
|
||||
}
|
||||
finally
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (compressor == null)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
if (disposing)
|
||||
FlushInternal(ZSTD_EndDirective.ZSTD_e_end);
|
||||
}
|
||||
finally
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
}
|
||||
}
|
||||
|
||||
private void ReleaseUnmanagedResources()
|
||||
{
|
||||
if (!preserveCompressor)
|
||||
{
|
||||
compressor.NotNull().Dispose();
|
||||
}
|
||||
compressor = null;
|
||||
|
||||
if (outputBuffer != null)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(outputBuffer);
|
||||
}
|
||||
|
||||
if (!leaveOpen)
|
||||
{
|
||||
innerStream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
public override void Flush() => FlushInternal(ZSTD_EndDirective.ZSTD_e_flush);
|
||||
|
||||
public override async Task FlushAsync(CancellationToken cancellationToken) =>
|
||||
await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_flush, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
private void FlushInternal(ZSTD_EndDirective directive) => WriteInternal(null, directive);
|
||||
|
||||
private async Task FlushInternalAsync(
|
||||
ZSTD_EndDirective directive,
|
||||
CancellationToken cancellationToken = default
|
||||
) => await WriteInternalAsync(null, directive, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
Write(new ReadOnlySpan<byte>(buffer, offset, count));
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override void Write(ReadOnlySpan<byte> buffer) =>
|
||||
WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue);
|
||||
#else
|
||||
public void Write(ReadOnlySpan<byte> buffer) =>
|
||||
WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue);
|
||||
#endif
|
||||
|
||||
private void WriteInternal(ReadOnlySpan<byte> buffer, ZSTD_EndDirective directive)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
var input = new ZSTD_inBuffer_s
|
||||
{
|
||||
pos = 0,
|
||||
size = buffer != null ? (nuint)buffer.Length : 0,
|
||||
};
|
||||
nuint remaining;
|
||||
do
|
||||
{
|
||||
output.pos = 0;
|
||||
remaining = CompressStream(ref input, buffer, directive);
|
||||
|
||||
var written = (int)output.pos;
|
||||
if (written > 0)
|
||||
innerStream.Write(outputBuffer, 0, written);
|
||||
} while (
|
||||
directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0
|
||||
);
|
||||
}
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
private async ValueTask WriteInternalAsync(
|
||||
ReadOnlyMemory<byte>? buffer,
|
||||
ZSTD_EndDirective directive,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#else
|
||||
private async Task WriteInternalAsync(
|
||||
ReadOnlyMemory<byte>? buffer,
|
||||
ZSTD_EndDirective directive,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#endif
|
||||
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
var input = new ZSTD_inBuffer_s
|
||||
{
|
||||
pos = 0,
|
||||
size = buffer.HasValue ? (nuint)buffer.Value.Length : 0,
|
||||
};
|
||||
nuint remaining;
|
||||
do
|
||||
{
|
||||
output.pos = 0;
|
||||
remaining = CompressStream(
|
||||
ref input,
|
||||
buffer.HasValue ? buffer.Value.Span : null,
|
||||
directive
|
||||
);
|
||||
|
||||
var written = (int)output.pos;
|
||||
if (written > 0)
|
||||
await innerStream
|
||||
.WriteAsync(outputBuffer, 0, written, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
} while (
|
||||
directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0
|
||||
);
|
||||
}
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => WriteAsync(new ReadOnlyMemory<byte>(buffer, offset, count), cancellationToken).AsTask();
|
||||
|
||||
public override async ValueTask WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
#else
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => WriteAsync(new ReadOnlyMemory<byte>(buffer, offset, count), cancellationToken);
|
||||
|
||||
public async Task WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
#endif
|
||||
|
||||
internal unsafe nuint CompressStream(
|
||||
ref ZSTD_inBuffer_s input,
|
||||
ReadOnlySpan<byte> inputBuffer,
|
||||
ZSTD_EndDirective directive
|
||||
)
|
||||
{
|
||||
fixed (byte* inputBufferPtr = inputBuffer)
|
||||
fixed (byte* outputBufferPtr = outputBuffer)
|
||||
{
|
||||
input.src = inputBufferPtr;
|
||||
output.dst = outputBufferPtr;
|
||||
return compressor
|
||||
.NotNull()
|
||||
.CompressStream(ref input, ref output, directive)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanRead => false;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => true;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (compressor == null)
|
||||
throw new ObjectDisposedException(nameof(CompressionStream));
|
||||
}
|
||||
|
||||
public void SetPledgedSrcSize(ulong pledgedSrcSize)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
compressor.NotNull().SetPledgedSrcSize(pledgedSrcSize);
|
||||
}
|
||||
}
|
||||
204
src/SharpCompress/Compressors/ZStandard/Compressor.cs
Normal file
204
src/SharpCompress/Compressors/ZStandard/Compressor.cs
Normal file
@@ -0,0 +1,204 @@
|
||||
using System;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public unsafe class Compressor : IDisposable
|
||||
{
|
||||
/// <summary>
|
||||
/// Minimum negative compression level allowed
|
||||
/// </summary>
|
||||
public static int MinCompressionLevel => Unsafe.Methods.ZSTD_minCLevel();
|
||||
|
||||
/// <summary>
|
||||
/// Maximum compression level available
|
||||
/// </summary>
|
||||
public static int MaxCompressionLevel => Unsafe.Methods.ZSTD_maxCLevel();
|
||||
|
||||
/// <summary>
|
||||
/// Default compression level
|
||||
/// </summary>
|
||||
/// <see cref="Unsafe.Methods.ZSTD_defaultCLevel"/>
|
||||
public const int DefaultCompressionLevel = 3;
|
||||
|
||||
private int level = DefaultCompressionLevel;
|
||||
|
||||
private readonly SafeCctxHandle handle;
|
||||
|
||||
public int Level
|
||||
{
|
||||
get => level;
|
||||
set
|
||||
{
|
||||
if (level != value)
|
||||
{
|
||||
level = value;
|
||||
SetParameter(ZSTD_cParameter.ZSTD_c_compressionLevel, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_cParameter parameter, int value)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
Unsafe.Methods.ZSTD_CCtx_setParameter(cctx, parameter, value).EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_cParameter parameter)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
int value;
|
||||
Unsafe.Methods.ZSTD_CCtx_getParameter(cctx, parameter, &value).EnsureZstdSuccess();
|
||||
return value;
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
var dictReadOnlySpan = new ReadOnlySpan<byte>(dict);
|
||||
LoadDictionary(dictReadOnlySpan);
|
||||
}
|
||||
|
||||
public void LoadDictionary(ReadOnlySpan<byte> dict)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
fixed (byte* dictPtr = dict)
|
||||
Unsafe
|
||||
.Methods.ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint)dict.Length)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public Compressor(int level = DefaultCompressionLevel)
|
||||
{
|
||||
handle = SafeCctxHandle.Create();
|
||||
Level = level;
|
||||
}
|
||||
|
||||
public static int GetCompressBound(int length) =>
|
||||
(int)Unsafe.Methods.ZSTD_compressBound((nuint)length);
|
||||
|
||||
public static ulong GetCompressBoundLong(ulong length) =>
|
||||
Unsafe.Methods.ZSTD_compressBound((nuint)length);
|
||||
|
||||
public Span<byte> Wrap(ReadOnlySpan<byte> src)
|
||||
{
|
||||
var dest = new byte[GetCompressBound(src.Length)];
|
||||
var length = Wrap(src, dest);
|
||||
return new Span<byte>(dest, 0, length);
|
||||
}
|
||||
|
||||
public int Wrap(byte[] src, byte[] dest, int offset) =>
|
||||
Wrap(src, new Span<byte>(dest, offset, dest.Length - offset));
|
||||
|
||||
public int Wrap(ReadOnlySpan<byte> src, Span<byte> dest)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
return (int)
|
||||
Unsafe
|
||||
.Methods.ZSTD_compress2(
|
||||
cctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public int Wrap(ArraySegment<byte> src, ArraySegment<byte> dest) =>
|
||||
Wrap((ReadOnlySpan<byte>)src, dest);
|
||||
|
||||
public int Wrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength
|
||||
) =>
|
||||
Wrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength)
|
||||
);
|
||||
|
||||
public bool TryWrap(byte[] src, byte[] dest, int offset, out int written) =>
|
||||
TryWrap(src, new Span<byte>(dest, offset, dest.Length - offset), out written);
|
||||
|
||||
public bool TryWrap(ReadOnlySpan<byte> src, Span<byte> dest, out int written)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
nuint returnValue;
|
||||
using (var cctx = handle.Acquire())
|
||||
{
|
||||
returnValue = Unsafe.Methods.ZSTD_compress2(
|
||||
cctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
);
|
||||
}
|
||||
|
||||
if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))
|
||||
{
|
||||
written = default;
|
||||
return false;
|
||||
}
|
||||
|
||||
returnValue.EnsureZstdSuccess();
|
||||
written = (int)returnValue;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public bool TryWrap(ArraySegment<byte> src, ArraySegment<byte> dest, out int written) =>
|
||||
TryWrap((ReadOnlySpan<byte>)src, dest, out written);
|
||||
|
||||
public bool TryWrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength,
|
||||
out int written
|
||||
) =>
|
||||
TryWrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength),
|
||||
out written
|
||||
);
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
handle.Dispose();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
|
||||
internal nuint CompressStream(
|
||||
ref ZSTD_inBuffer_s input,
|
||||
ref ZSTD_outBuffer_s output,
|
||||
ZSTD_EndDirective directive
|
||||
)
|
||||
{
|
||||
fixed (ZSTD_inBuffer_s* inputPtr = &input)
|
||||
fixed (ZSTD_outBuffer_s* outputPtr = &output)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
return Unsafe
|
||||
.Methods.ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public void SetPledgedSrcSize(ulong pledgedSrcSize)
|
||||
{
|
||||
using var cctx = handle.Acquire();
|
||||
Unsafe.Methods.ZSTD_CCtx_setPledgedSrcSize(cctx, pledgedSrcSize).EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
8
src/SharpCompress/Compressors/ZStandard/Constants.cs
Normal file
8
src/SharpCompress/Compressors/ZStandard/Constants.cs
Normal file
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
internal class Constants
|
||||
{
|
||||
//NOTE: https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element#remarks
|
||||
//NOTE: https://github.com/dotnet/runtime/blob/v5.0.0-rtm.20519.4/src/libraries/System.Private.CoreLib/src/System/Array.cs#L27
|
||||
public const ulong MaxByteArrayLength = 0x7FFFFFC7;
|
||||
}
|
||||
293
src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs
Normal file
293
src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs
Normal file
@@ -0,0 +1,293 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public class DecompressionStream : Stream
|
||||
{
|
||||
private readonly Stream innerStream;
|
||||
private readonly byte[] inputBuffer;
|
||||
private readonly int inputBufferSize;
|
||||
private readonly bool preserveDecompressor;
|
||||
private readonly bool leaveOpen;
|
||||
private readonly bool checkEndOfStream;
|
||||
private Decompressor? decompressor;
|
||||
private ZSTD_inBuffer_s input;
|
||||
private nuint lastDecompressResult = 0;
|
||||
private bool contextDrained = true;
|
||||
|
||||
public DecompressionStream(
|
||||
Stream stream,
|
||||
int bufferSize = 0,
|
||||
bool checkEndOfStream = true,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
: this(stream, new Decompressor(), bufferSize, checkEndOfStream, false, leaveOpen) { }
|
||||
|
||||
public DecompressionStream(
|
||||
Stream stream,
|
||||
Decompressor decompressor,
|
||||
int bufferSize = 0,
|
||||
bool checkEndOfStream = true,
|
||||
bool preserveDecompressor = true,
|
||||
bool leaveOpen = true
|
||||
)
|
||||
{
|
||||
if (stream == null)
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
|
||||
if (!stream.CanRead)
|
||||
throw new ArgumentException("Stream is not readable", nameof(stream));
|
||||
|
||||
if (bufferSize < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(bufferSize));
|
||||
|
||||
innerStream = stream;
|
||||
this.decompressor = decompressor;
|
||||
this.preserveDecompressor = preserveDecompressor;
|
||||
this.leaveOpen = leaveOpen;
|
||||
this.checkEndOfStream = checkEndOfStream;
|
||||
|
||||
inputBufferSize =
|
||||
bufferSize > 0
|
||||
? bufferSize
|
||||
: (int)Unsafe.Methods.ZSTD_DStreamInSize().EnsureZstdSuccess();
|
||||
inputBuffer = ArrayPool<byte>.Shared.Rent(inputBufferSize);
|
||||
input = new ZSTD_inBuffer_s { pos = (nuint)inputBufferSize, size = (nuint)inputBufferSize };
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_dParameter parameter, int value)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
decompressor.NotNull().SetParameter(parameter, value);
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_dParameter parameter)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
return decompressor.NotNull().GetParameter(parameter);
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
decompressor.NotNull().LoadDictionary(dict);
|
||||
}
|
||||
|
||||
~DecompressionStream() => Dispose(false);
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (decompressor == null)
|
||||
return;
|
||||
|
||||
if (!preserveDecompressor)
|
||||
{
|
||||
decompressor.Dispose();
|
||||
}
|
||||
decompressor = null;
|
||||
|
||||
if (inputBuffer != null)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(inputBuffer);
|
||||
}
|
||||
|
||||
if (!leaveOpen)
|
||||
{
|
||||
innerStream.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) =>
|
||||
Read(new Span<byte>(buffer, offset, count));
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override int Read(Span<byte> buffer)
|
||||
#else
|
||||
public int Read(Span<byte> buffer)
|
||||
#endif
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
// Guard against infinite loop (output.pos would never become non-zero)
|
||||
if (buffer.Length == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length };
|
||||
while (true)
|
||||
{
|
||||
// If there is still input available, or there might be data buffered in the decompressor context, flush that out
|
||||
while (input.pos < input.size || !contextDrained)
|
||||
{
|
||||
nuint oldInputPos = input.pos;
|
||||
nuint result = DecompressStream(ref output, buffer);
|
||||
if (output.pos > 0 || oldInputPos != input.pos)
|
||||
{
|
||||
// Keep result from last decompress call that made some progress, so we known if we're at end of frame
|
||||
lastDecompressResult = result;
|
||||
}
|
||||
// If decompression filled the output buffer, there might still be data buffered in the decompressor context
|
||||
contextDrained = output.pos < output.size;
|
||||
// If we have data to return, return it immediately, so we won't stall on Read
|
||||
if (output.pos > 0)
|
||||
{
|
||||
return (int)output.pos;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, read some more input
|
||||
int bytesRead;
|
||||
if ((bytesRead = innerStream.Read(inputBuffer, 0, inputBufferSize)) == 0)
|
||||
{
|
||||
if (checkEndOfStream && lastDecompressResult != 0)
|
||||
{
|
||||
throw new EndOfStreamException("Premature end of stream");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
input.size = (nuint)bytesRead;
|
||||
input.pos = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => ReadAsync(new Memory<byte>(buffer, offset, count), cancellationToken).AsTask();
|
||||
|
||||
public override async ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#else
|
||||
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => ReadAsync(new Memory<byte>(buffer, offset, count), cancellationToken);
|
||||
|
||||
public async Task<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
#endif
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
// Guard against infinite loop (output.pos would never become non-zero)
|
||||
if (buffer.Length == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length };
|
||||
while (true)
|
||||
{
|
||||
// If there is still input available, or there might be data buffered in the decompressor context, flush that out
|
||||
while (input.pos < input.size || !contextDrained)
|
||||
{
|
||||
nuint oldInputPos = input.pos;
|
||||
nuint result = DecompressStream(ref output, buffer.Span);
|
||||
if (output.pos > 0 || oldInputPos != input.pos)
|
||||
{
|
||||
// Keep result from last decompress call that made some progress, so we known if we're at end of frame
|
||||
lastDecompressResult = result;
|
||||
}
|
||||
// If decompression filled the output buffer, there might still be data buffered in the decompressor context
|
||||
contextDrained = output.pos < output.size;
|
||||
// If we have data to return, return it immediately, so we won't stall on Read
|
||||
if (output.pos > 0)
|
||||
{
|
||||
return (int)output.pos;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, read some more input
|
||||
int bytesRead;
|
||||
if (
|
||||
(
|
||||
bytesRead = await innerStream
|
||||
.ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken)
|
||||
.ConfigureAwait(false)
|
||||
) == 0
|
||||
)
|
||||
{
|
||||
if (checkEndOfStream && lastDecompressResult != 0)
|
||||
{
|
||||
throw new EndOfStreamException("Premature end of stream");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
input.size = (nuint)bytesRead;
|
||||
input.pos = 0;
|
||||
}
|
||||
}
|
||||
|
||||
private unsafe nuint DecompressStream(ref ZSTD_outBuffer_s output, Span<byte> outputBuffer)
|
||||
{
|
||||
fixed (byte* inputBufferPtr = inputBuffer)
|
||||
fixed (byte* outputBufferPtr = outputBuffer)
|
||||
{
|
||||
input.src = inputBufferPtr;
|
||||
output.dst = outputBufferPtr;
|
||||
return decompressor.NotNull().DecompressStream(ref input, ref output);
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotSupportedException();
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (decompressor == null)
|
||||
throw new ObjectDisposedException(nameof(DecompressionStream));
|
||||
}
|
||||
|
||||
#if NETSTANDARD2_0 || NETFRAMEWORK
|
||||
public virtual Task DisposeAsync()
|
||||
{
|
||||
try
|
||||
{
|
||||
Dispose();
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
catch (Exception exc)
|
||||
{
|
||||
return Task.FromException(exc);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
176
src/SharpCompress/Compressors/ZStandard/Decompressor.cs
Normal file
176
src/SharpCompress/Compressors/ZStandard/Decompressor.cs
Normal file
@@ -0,0 +1,176 @@
|
||||
using System;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public unsafe class Decompressor : IDisposable
|
||||
{
|
||||
private readonly SafeDctxHandle handle;
|
||||
|
||||
public Decompressor()
|
||||
{
|
||||
handle = SafeDctxHandle.Create();
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_dParameter parameter, int value)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
Unsafe.Methods.ZSTD_DCtx_setParameter(dctx, parameter, value).EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_dParameter parameter)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
int value;
|
||||
Unsafe.Methods.ZSTD_DCtx_getParameter(dctx, parameter, &value).EnsureZstdSuccess();
|
||||
return value;
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
var dictReadOnlySpan = new ReadOnlySpan<byte>(dict);
|
||||
this.LoadDictionary(dictReadOnlySpan);
|
||||
}
|
||||
|
||||
public void LoadDictionary(ReadOnlySpan<byte> dict)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
fixed (byte* dictPtr = dict)
|
||||
Unsafe
|
||||
.Methods.ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint)dict.Length)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public static ulong GetDecompressedSize(ReadOnlySpan<byte> src)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
return Unsafe
|
||||
.Methods.ZSTD_decompressBound(srcPtr, (nuint)src.Length)
|
||||
.EnsureContentSizeOk();
|
||||
}
|
||||
|
||||
public static ulong GetDecompressedSize(ArraySegment<byte> src) =>
|
||||
GetDecompressedSize((ReadOnlySpan<byte>)src);
|
||||
|
||||
public static ulong GetDecompressedSize(byte[] src, int srcOffset, int srcLength) =>
|
||||
GetDecompressedSize(new ReadOnlySpan<byte>(src, srcOffset, srcLength));
|
||||
|
||||
public Span<byte> Unwrap(ReadOnlySpan<byte> src, int maxDecompressedSize = int.MaxValue)
|
||||
{
|
||||
var expectedDstSize = GetDecompressedSize(src);
|
||||
if (expectedDstSize > (ulong)maxDecompressedSize)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall,
|
||||
$"Decompressed content size {expectedDstSize} is greater than {nameof(maxDecompressedSize)} {maxDecompressedSize}"
|
||||
);
|
||||
if (expectedDstSize > Constants.MaxByteArrayLength)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall,
|
||||
$"Decompressed content size {expectedDstSize} is greater than max possible byte array size {Constants.MaxByteArrayLength}"
|
||||
);
|
||||
|
||||
var dest = new byte[expectedDstSize];
|
||||
var length = Unwrap(src, dest);
|
||||
return new Span<byte>(dest, 0, length);
|
||||
}
|
||||
|
||||
public int Unwrap(byte[] src, byte[] dest, int offset) =>
|
||||
Unwrap(src, new Span<byte>(dest, offset, dest.Length - offset));
|
||||
|
||||
public int Unwrap(ReadOnlySpan<byte> src, Span<byte> dest)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
return (int)
|
||||
Unsafe
|
||||
.Methods.ZSTD_decompressDCtx(
|
||||
dctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public int Unwrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength
|
||||
) =>
|
||||
Unwrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength)
|
||||
);
|
||||
|
||||
public bool TryUnwrap(byte[] src, byte[] dest, int offset, out int written) =>
|
||||
TryUnwrap(src, new Span<byte>(dest, offset, dest.Length - offset), out written);
|
||||
|
||||
public bool TryUnwrap(ReadOnlySpan<byte> src, Span<byte> dest, out int written)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
nuint returnValue;
|
||||
using (var dctx = handle.Acquire())
|
||||
{
|
||||
returnValue = Unsafe.Methods.ZSTD_decompressDCtx(
|
||||
dctx,
|
||||
destPtr,
|
||||
(nuint)dest.Length,
|
||||
srcPtr,
|
||||
(nuint)src.Length
|
||||
);
|
||||
}
|
||||
|
||||
if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))
|
||||
{
|
||||
written = default;
|
||||
return false;
|
||||
}
|
||||
|
||||
returnValue.EnsureZstdSuccess();
|
||||
written = (int)returnValue;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public bool TryUnwrap(
|
||||
byte[] src,
|
||||
int srcOffset,
|
||||
int srcLength,
|
||||
byte[] dst,
|
||||
int dstOffset,
|
||||
int dstLength,
|
||||
out int written
|
||||
) =>
|
||||
TryUnwrap(
|
||||
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
|
||||
new Span<byte>(dst, dstOffset, dstLength),
|
||||
out written
|
||||
);
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
handle.Dispose();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
|
||||
internal nuint DecompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s output)
|
||||
{
|
||||
fixed (ZSTD_inBuffer_s* inputPtr = &input)
|
||||
fixed (ZSTD_outBuffer_s* outputPtr = &output)
|
||||
{
|
||||
using var dctx = handle.Acquire();
|
||||
return Unsafe
|
||||
.Methods.ZSTD_decompressStream(dctx, outputPtr, inputPtr)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
}
|
||||
141
src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs
Normal file
141
src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs
Normal file
@@ -0,0 +1,141 @@
|
||||
using System;
|
||||
using System.Collections.Concurrent;
|
||||
using System.Collections.Generic;
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
internal unsafe class JobThreadPool : IDisposable
|
||||
{
|
||||
private int numThreads;
|
||||
private readonly List<JobThread> threads;
|
||||
private readonly BlockingCollection<Job> queue;
|
||||
|
||||
private struct Job
|
||||
{
|
||||
public void* function;
|
||||
public void* opaque;
|
||||
}
|
||||
|
||||
private class JobThread
|
||||
{
|
||||
private Thread Thread { get; }
|
||||
public CancellationTokenSource CancellationTokenSource { get; }
|
||||
|
||||
public JobThread(Thread thread)
|
||||
{
|
||||
CancellationTokenSource = new CancellationTokenSource();
|
||||
Thread = thread;
|
||||
}
|
||||
|
||||
public void Start()
|
||||
{
|
||||
Thread.Start(this);
|
||||
}
|
||||
|
||||
public void Cancel()
|
||||
{
|
||||
CancellationTokenSource.Cancel();
|
||||
}
|
||||
|
||||
public void Join()
|
||||
{
|
||||
Thread.Join();
|
||||
}
|
||||
}
|
||||
|
||||
private void Worker(object? obj)
|
||||
{
|
||||
if (obj is not JobThread poolThread)
|
||||
return;
|
||||
|
||||
var cancellationToken = poolThread.CancellationTokenSource.Token;
|
||||
while (!queue.IsCompleted && !cancellationToken.IsCancellationRequested)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (queue.TryTake(out var job, -1, cancellationToken))
|
||||
((delegate* managed<void*, void>)job.function)(job.opaque);
|
||||
}
|
||||
catch (InvalidOperationException) { }
|
||||
catch (OperationCanceledException) { }
|
||||
}
|
||||
}
|
||||
|
||||
public JobThreadPool(int num, int queueSize)
|
||||
{
|
||||
numThreads = num;
|
||||
queue = new BlockingCollection<Job>(queueSize + 1);
|
||||
threads = new List<JobThread>(num);
|
||||
for (var i = 0; i < numThreads; i++)
|
||||
CreateThread();
|
||||
}
|
||||
|
||||
private void CreateThread()
|
||||
{
|
||||
var poolThread = new JobThread(new Thread(Worker));
|
||||
threads.Add(poolThread);
|
||||
poolThread.Start();
|
||||
}
|
||||
|
||||
public void Resize(int num)
|
||||
{
|
||||
lock (threads)
|
||||
{
|
||||
if (num < numThreads)
|
||||
{
|
||||
for (var i = numThreads - 1; i >= num; i--)
|
||||
{
|
||||
threads[i].Cancel();
|
||||
threads.RemoveAt(i);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (var i = numThreads; i < num; i++)
|
||||
CreateThread();
|
||||
}
|
||||
}
|
||||
|
||||
numThreads = num;
|
||||
}
|
||||
|
||||
public void Add(void* function, void* opaque)
|
||||
{
|
||||
queue.Add(new Job { function = function, opaque = opaque });
|
||||
}
|
||||
|
||||
public bool TryAdd(void* function, void* opaque)
|
||||
{
|
||||
return queue.TryAdd(new Job { function = function, opaque = opaque });
|
||||
}
|
||||
|
||||
public void Join(bool cancel = true)
|
||||
{
|
||||
queue.CompleteAdding();
|
||||
List<JobThread> jobThreads;
|
||||
lock (threads)
|
||||
jobThreads = new List<JobThread>(threads);
|
||||
|
||||
if (cancel)
|
||||
{
|
||||
foreach (var thread in jobThreads)
|
||||
thread.Cancel();
|
||||
}
|
||||
|
||||
foreach (var thread in jobThreads)
|
||||
thread.Join();
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
queue.Dispose();
|
||||
}
|
||||
|
||||
public int Size()
|
||||
{
|
||||
// todo not implemented
|
||||
// https://github.com/dotnet/runtime/issues/24200
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
163
src/SharpCompress/Compressors/ZStandard/SafeHandles.cs
Normal file
163
src/SharpCompress/Compressors/ZStandard/SafeHandles.cs
Normal file
@@ -0,0 +1,163 @@
|
||||
using System;
|
||||
using System.Runtime.InteropServices;
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
/// <summary>
|
||||
/// Provides the base class for ZstdSharp <see cref="SafeHandle"/> implementations.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// Even though ZstdSharp is a managed library, its internals are using unmanaged
|
||||
/// memory and we are using safe handles in the library's high-level API to ensure
|
||||
/// proper disposal of unmanaged resources and increase safety.
|
||||
/// </remarks>
|
||||
/// <seealso cref="SafeCctxHandle"/>
|
||||
/// <seealso cref="SafeDctxHandle"/>
|
||||
internal abstract unsafe class SafeZstdHandle : SafeHandle
|
||||
{
|
||||
/// <summary>
|
||||
/// Parameterless constructor is hidden. Use the static <c>Create</c> factory
|
||||
/// method to create a new safe handle instance.
|
||||
/// </summary>
|
||||
protected SafeZstdHandle()
|
||||
: base(IntPtr.Zero, true) { }
|
||||
|
||||
public sealed override bool IsInvalid => handle == IntPtr.Zero;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Safely wraps an unmanaged Zstd compression context.
|
||||
/// </summary>
|
||||
internal sealed unsafe class SafeCctxHandle : SafeZstdHandle
|
||||
{
|
||||
/// <inheritdoc/>
|
||||
private SafeCctxHandle() { }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new instance of <see cref="SafeCctxHandle"/>.
|
||||
/// </summary>
|
||||
/// <returns></returns>
|
||||
/// <exception cref="ZstdException">Creation failed.</exception>
|
||||
public static SafeCctxHandle Create()
|
||||
{
|
||||
var safeHandle = new SafeCctxHandle();
|
||||
bool success = false;
|
||||
try
|
||||
{
|
||||
var cctx = Unsafe.Methods.ZSTD_createCCtx();
|
||||
if (cctx == null)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create cctx");
|
||||
safeHandle.SetHandle((IntPtr)cctx);
|
||||
success = true;
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (!success)
|
||||
{
|
||||
safeHandle.SetHandleAsInvalid();
|
||||
}
|
||||
}
|
||||
return safeHandle;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Acquires a reference to the safe handle.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// A <see cref="SafeHandleHolder{T}"/> instance that can be implicitly converted to a pointer
|
||||
/// to <see cref="ZSTD_CCtx_s"/>.
|
||||
/// </returns>
|
||||
public SafeHandleHolder<ZSTD_CCtx_s> Acquire() => new(this);
|
||||
|
||||
protected override bool ReleaseHandle()
|
||||
{
|
||||
return Unsafe.Methods.ZSTD_freeCCtx((ZSTD_CCtx_s*)handle) == 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Safely wraps an unmanaged Zstd compression context.
|
||||
/// </summary>
|
||||
internal sealed unsafe class SafeDctxHandle : SafeZstdHandle
|
||||
{
|
||||
/// <inheritdoc/>
|
||||
private SafeDctxHandle() { }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new instance of <see cref="SafeDctxHandle"/>.
|
||||
/// </summary>
|
||||
/// <returns></returns>
|
||||
/// <exception cref="ZstdException">Creation failed.</exception>
|
||||
public static SafeDctxHandle Create()
|
||||
{
|
||||
var safeHandle = new SafeDctxHandle();
|
||||
bool success = false;
|
||||
try
|
||||
{
|
||||
var dctx = Unsafe.Methods.ZSTD_createDCtx();
|
||||
if (dctx == null)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create dctx");
|
||||
safeHandle.SetHandle((IntPtr)dctx);
|
||||
success = true;
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (!success)
|
||||
{
|
||||
safeHandle.SetHandleAsInvalid();
|
||||
}
|
||||
}
|
||||
return safeHandle;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Acquires a reference to the safe handle.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// A <see cref="SafeHandleHolder{T}"/> instance that can be implicitly converted to a pointer
|
||||
/// to <see cref="ZSTD_DCtx_s"/>.
|
||||
/// </returns>
|
||||
public SafeHandleHolder<ZSTD_DCtx_s> Acquire() => new(this);
|
||||
|
||||
protected override bool ReleaseHandle()
|
||||
{
|
||||
return Unsafe.Methods.ZSTD_freeDCtx((ZSTD_DCtx_s*)handle) == 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Provides a convenient interface to safely acquire pointers of a specific type
|
||||
/// from a <see cref="SafeHandle"/>, by utilizing <see langword="using"/> blocks.
|
||||
/// </summary>
|
||||
/// <typeparam name="T">The type of pointers to return.</typeparam>
|
||||
/// <remarks>
|
||||
/// Safe handle holders can be <see cref="Dispose"/>d to decrement the safe handle's
|
||||
/// reference count, and can be implicitly converted to pointers to <see cref="T"/>.
|
||||
/// </remarks>
|
||||
internal unsafe ref struct SafeHandleHolder<T>
|
||||
where T : unmanaged
|
||||
{
|
||||
private readonly SafeHandle _handle;
|
||||
|
||||
private bool _refAdded;
|
||||
|
||||
public SafeHandleHolder(SafeHandle safeHandle)
|
||||
{
|
||||
_handle = safeHandle;
|
||||
_refAdded = false;
|
||||
safeHandle.DangerousAddRef(ref _refAdded);
|
||||
}
|
||||
|
||||
public static implicit operator T*(SafeHandleHolder<T> holder) =>
|
||||
(T*)holder._handle.DangerousGetHandle();
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (_refAdded)
|
||||
{
|
||||
_handle.DangerousRelease();
|
||||
_refAdded = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
internal static unsafe class SynchronizationWrapper
|
||||
{
|
||||
private static object UnwrapObject(void** obj) => UnmanagedObject.Unwrap<object>(*obj);
|
||||
|
||||
public static void Init(void** obj) => *obj = UnmanagedObject.Wrap(new object());
|
||||
|
||||
public static void Free(void** obj) => UnmanagedObject.Free(*obj);
|
||||
|
||||
public static void Enter(void** obj) => Monitor.Enter(UnwrapObject(obj));
|
||||
|
||||
public static void Exit(void** obj) => Monitor.Exit(UnwrapObject(obj));
|
||||
|
||||
public static void Pulse(void** obj) => Monitor.Pulse(UnwrapObject(obj));
|
||||
|
||||
public static void PulseAll(void** obj) => Monitor.PulseAll(UnwrapObject(obj));
|
||||
|
||||
public static void Wait(void** mutex) => Monitor.Wait(UnwrapObject(mutex));
|
||||
}
|
||||
48
src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs
Normal file
48
src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs
Normal file
@@ -0,0 +1,48 @@
|
||||
using SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
public static unsafe class ThrowHelper
|
||||
{
|
||||
private const ulong ZSTD_CONTENTSIZE_UNKNOWN = unchecked(0UL - 1);
|
||||
private const ulong ZSTD_CONTENTSIZE_ERROR = unchecked(0UL - 2);
|
||||
|
||||
public static nuint EnsureZstdSuccess(this nuint returnValue)
|
||||
{
|
||||
if (Unsafe.Methods.ZSTD_isError(returnValue))
|
||||
ThrowException(returnValue, Unsafe.Methods.ZSTD_getErrorName(returnValue));
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
public static nuint EnsureZdictSuccess(this nuint returnValue)
|
||||
{
|
||||
if (Unsafe.Methods.ZDICT_isError(returnValue))
|
||||
ThrowException(returnValue, Unsafe.Methods.ZDICT_getErrorName(returnValue));
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
public static ulong EnsureContentSizeOk(this ulong returnValue)
|
||||
{
|
||||
if (returnValue == ZSTD_CONTENTSIZE_UNKNOWN)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_GENERIC,
|
||||
"Decompressed content size is not specified"
|
||||
);
|
||||
|
||||
if (returnValue == ZSTD_CONTENTSIZE_ERROR)
|
||||
throw new ZstdException(
|
||||
ZSTD_ErrorCode.ZSTD_error_GENERIC,
|
||||
"Decompressed content size cannot be determined (e.g. invalid magic number, srcSize too small)"
|
||||
);
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
private static void ThrowException(nuint returnValue, string message)
|
||||
{
|
||||
var code = 0 - returnValue;
|
||||
throw new ZstdException((ZSTD_ErrorCode)code, message);
|
||||
}
|
||||
}
|
||||
18
src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs
Normal file
18
src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs
Normal file
@@ -0,0 +1,18 @@
|
||||
using System;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard;
|
||||
|
||||
/*
|
||||
* Wrap object to void* to make it unmanaged
|
||||
*/
|
||||
internal static unsafe class UnmanagedObject
|
||||
{
|
||||
public static void* Wrap(object obj) => (void*)GCHandle.ToIntPtr(GCHandle.Alloc(obj));
|
||||
|
||||
private static GCHandle UnwrapGcHandle(void* value) => GCHandle.FromIntPtr((IntPtr)value);
|
||||
|
||||
public static T Unwrap<T>(void* value) => (T)UnwrapGcHandle(value).Target!;
|
||||
|
||||
public static void Free(void* value) => UnwrapGcHandle(value).Free();
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/* custom memory allocation functions */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void* ZSTD_customMalloc(nuint size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc != null)
|
||||
return ((delegate* managed<void*, nuint, void*>)customMem.customAlloc)(
|
||||
customMem.opaque,
|
||||
size
|
||||
);
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void* ZSTD_customCalloc(nuint size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc != null)
|
||||
{
|
||||
/* calloc implemented as malloc+memset;
|
||||
* not as efficient as calloc, but next best guess for custom malloc */
|
||||
void* ptr = ((delegate* managed<void*, nuint, void*>)customMem.customAlloc)(
|
||||
customMem.opaque,
|
||||
size
|
||||
);
|
||||
memset(ptr, 0, (uint)size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
return calloc(1, size);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
|
||||
{
|
||||
if (ptr != null)
|
||||
{
|
||||
if (customMem.customFree != null)
|
||||
((delegate* managed<void*, void*, void>)customMem.customFree)(
|
||||
customMem.opaque,
|
||||
ptr
|
||||
);
|
||||
else
|
||||
free(ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* bitStream can mix input from multiple sources.
|
||||
* A critical property of these streams is that they encode and decode in **reverse** direction.
|
||||
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
|
||||
*/
|
||||
public unsafe struct BIT_CStream_t
|
||||
{
|
||||
public nuint bitContainer;
|
||||
public uint bitPos;
|
||||
public sbyte* startPtr;
|
||||
public sbyte* ptr;
|
||||
public sbyte* endPtr;
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public enum BIT_DStream_status
|
||||
{
|
||||
/* fully refilled */
|
||||
BIT_DStream_unfinished = 0,
|
||||
|
||||
/* still some bits left in bitstream */
|
||||
BIT_DStream_endOfBuffer = 1,
|
||||
|
||||
/* bitstream entirely consumed, bit-exact */
|
||||
BIT_DStream_completed = 2,
|
||||
|
||||
/* user requested more bits than present in bitstream */
|
||||
BIT_DStream_overflow = 3,
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-********************************************
|
||||
* bitStream decoding API (read backward)
|
||||
**********************************************/
|
||||
public unsafe struct BIT_DStream_t
|
||||
{
|
||||
public nuint bitContainer;
|
||||
public uint bitsConsumed;
|
||||
public sbyte* ptr;
|
||||
public sbyte* start;
|
||||
public sbyte* limitPtr;
|
||||
}
|
||||
60
src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs
Normal file
60
src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs
Normal file
@@ -0,0 +1,60 @@
|
||||
using System;
|
||||
using System.Numerics;
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countTrailingZeros32(uint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.TrailingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countLeadingZeros32(uint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.LeadingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countTrailingZeros64(ulong val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.TrailingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_countLeadingZeros64(ulong val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.LeadingZeroCount(val);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_NbCommonBytes(nuint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
if (BitConverter.IsLittleEndian)
|
||||
{
|
||||
return MEM_64bits
|
||||
? (uint)BitOperations.TrailingZeroCount(val) >> 3
|
||||
: (uint)BitOperations.TrailingZeroCount((uint)val) >> 3;
|
||||
}
|
||||
|
||||
return MEM_64bits
|
||||
? (uint)BitOperations.LeadingZeroCount(val) >> 3
|
||||
: (uint)BitOperations.LeadingZeroCount((uint)val) >> 3;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ZSTD_highbit32(uint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
return (uint)BitOperations.Log2(val);
|
||||
}
|
||||
}
|
||||
739
src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs
Normal file
739
src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs
Normal file
@@ -0,0 +1,739 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
#if NETCOREAPP3_0_OR_GREATER
|
||||
using System.Runtime.Intrinsics.X86;
|
||||
#endif
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
#if NET7_0_OR_GREATER
|
||||
private static ReadOnlySpan<uint> Span_BIT_mask =>
|
||||
new uint[32]
|
||||
{
|
||||
0,
|
||||
1,
|
||||
3,
|
||||
7,
|
||||
0xF,
|
||||
0x1F,
|
||||
0x3F,
|
||||
0x7F,
|
||||
0xFF,
|
||||
0x1FF,
|
||||
0x3FF,
|
||||
0x7FF,
|
||||
0xFFF,
|
||||
0x1FFF,
|
||||
0x3FFF,
|
||||
0x7FFF,
|
||||
0xFFFF,
|
||||
0x1FFFF,
|
||||
0x3FFFF,
|
||||
0x7FFFF,
|
||||
0xFFFFF,
|
||||
0x1FFFFF,
|
||||
0x3FFFFF,
|
||||
0x7FFFFF,
|
||||
0xFFFFFF,
|
||||
0x1FFFFFF,
|
||||
0x3FFFFFF,
|
||||
0x7FFFFFF,
|
||||
0xFFFFFFF,
|
||||
0x1FFFFFFF,
|
||||
0x3FFFFFFF,
|
||||
0x7FFFFFFF,
|
||||
};
|
||||
private static uint* BIT_mask =>
|
||||
(uint*)
|
||||
System.Runtime.CompilerServices.Unsafe.AsPointer(
|
||||
ref MemoryMarshal.GetReference(Span_BIT_mask)
|
||||
);
|
||||
#else
|
||||
|
||||
private static readonly uint* BIT_mask = GetArrayPointer(
|
||||
new uint[32]
|
||||
{
|
||||
0,
|
||||
1,
|
||||
3,
|
||||
7,
|
||||
0xF,
|
||||
0x1F,
|
||||
0x3F,
|
||||
0x7F,
|
||||
0xFF,
|
||||
0x1FF,
|
||||
0x3FF,
|
||||
0x7FF,
|
||||
0xFFF,
|
||||
0x1FFF,
|
||||
0x3FFF,
|
||||
0x7FFF,
|
||||
0xFFFF,
|
||||
0x1FFFF,
|
||||
0x3FFFF,
|
||||
0x7FFFF,
|
||||
0xFFFFF,
|
||||
0x1FFFFF,
|
||||
0x3FFFFF,
|
||||
0x7FFFFF,
|
||||
0xFFFFFF,
|
||||
0x1FFFFFF,
|
||||
0x3FFFFFF,
|
||||
0x7FFFFFF,
|
||||
0xFFFFFFF,
|
||||
0x1FFFFFFF,
|
||||
0x3FFFFFFF,
|
||||
0x7FFFFFFF,
|
||||
}
|
||||
);
|
||||
#endif
|
||||
/*-**************************************************************
|
||||
* bitStream encoding
|
||||
****************************************************************/
|
||||
/*! BIT_initCStream() :
|
||||
* `dstCapacity` must be > sizeof(size_t)
|
||||
* @return : 0 if success,
|
||||
* otherwise an error code (can be tested using ERR_isError()) */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_initCStream(ref BIT_CStream_t bitC, void* startPtr, nuint dstCapacity)
|
||||
{
|
||||
bitC.bitContainer = 0;
|
||||
bitC.bitPos = 0;
|
||||
bitC.startPtr = (sbyte*)startPtr;
|
||||
bitC.ptr = bitC.startPtr;
|
||||
bitC.endPtr = bitC.startPtr + dstCapacity - sizeof(nuint);
|
||||
if (dstCapacity <= (nuint)sizeof(nuint))
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
return 0;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getLowerBits(nuint bitContainer, uint nbBits)
|
||||
{
|
||||
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
|
||||
#if NETCOREAPP3_1_OR_GREATER
|
||||
if (Bmi2.X64.IsSupported)
|
||||
{
|
||||
return (nuint)Bmi2.X64.ZeroHighBits(bitContainer, nbBits);
|
||||
}
|
||||
|
||||
if (Bmi2.IsSupported)
|
||||
{
|
||||
return Bmi2.ZeroHighBits((uint)bitContainer, nbBits);
|
||||
}
|
||||
#endif
|
||||
|
||||
return bitContainer & BIT_mask[nbBits];
|
||||
}
|
||||
|
||||
/*! BIT_addBits() :
|
||||
* can add up to 31 bits into `bitC`.
|
||||
* Note : does not check for register overflow ! */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_addBits(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
nuint value,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
|
||||
assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
bitC_bitContainer |= BIT_getLowerBits(value, nbBits) << (int)bitC_bitPos;
|
||||
bitC_bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_addBitsFast() :
|
||||
* works only if `value` is _clean_,
|
||||
* meaning all high bits above nbBits are 0 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_addBitsFast(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
nuint value,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
assert(value >> (int)nbBits == 0);
|
||||
assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
bitC_bitContainer |= value << (int)bitC_bitPos;
|
||||
bitC_bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_flushBitsFast() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* unsafe version; does not check buffer overflow */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_flushBitsFast(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
ref sbyte* bitC_ptr,
|
||||
sbyte* bitC_endPtr
|
||||
)
|
||||
{
|
||||
nuint nbBytes = bitC_bitPos >> 3;
|
||||
assert(bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
assert(bitC_ptr <= bitC_endPtr);
|
||||
MEM_writeLEST(bitC_ptr, bitC_bitContainer);
|
||||
bitC_ptr += nbBytes;
|
||||
bitC_bitPos &= 7;
|
||||
bitC_bitContainer >>= (int)(nbBytes * 8);
|
||||
}
|
||||
|
||||
/*! BIT_flushBits() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* safe version; check for buffer overflow, and prevents it.
|
||||
* note : does not signal buffer overflow.
|
||||
* overflow will be revealed later on using BIT_closeCStream() */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_flushBits(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
ref sbyte* bitC_ptr,
|
||||
sbyte* bitC_endPtr
|
||||
)
|
||||
{
|
||||
nuint nbBytes = bitC_bitPos >> 3;
|
||||
assert(bitC_bitPos < (uint)(sizeof(nuint) * 8));
|
||||
assert(bitC_ptr <= bitC_endPtr);
|
||||
MEM_writeLEST(bitC_ptr, bitC_bitContainer);
|
||||
bitC_ptr += nbBytes;
|
||||
if (bitC_ptr > bitC_endPtr)
|
||||
bitC_ptr = bitC_endPtr;
|
||||
bitC_bitPos &= 7;
|
||||
bitC_bitContainer >>= (int)(nbBytes * 8);
|
||||
}
|
||||
|
||||
/*! BIT_closeCStream() :
|
||||
* @return : size of CStream, in bytes,
|
||||
* or 0 if it could not fit into dstBuffer */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_closeCStream(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
sbyte* bitC_ptr,
|
||||
sbyte* bitC_endPtr,
|
||||
sbyte* bitC_startPtr
|
||||
)
|
||||
{
|
||||
BIT_addBitsFast(ref bitC_bitContainer, ref bitC_bitPos, 1, 1);
|
||||
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
|
||||
if (bitC_ptr >= bitC_endPtr)
|
||||
return 0;
|
||||
return (nuint)(bitC_ptr - bitC_startPtr) + (nuint)(bitC_bitPos > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint srcSize)
|
||||
{
|
||||
if (srcSize < 1)
|
||||
{
|
||||
*bitD = new BIT_DStream_t();
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
bitD->start = (sbyte*)srcBuffer;
|
||||
bitD->limitPtr = bitD->start + sizeof(nuint);
|
||||
if (srcSize >= (nuint)sizeof(nuint))
|
||||
{
|
||||
bitD->ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint);
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bitD->ptr = bitD->start;
|
||||
bitD->bitContainer = *(byte*)bitD->start;
|
||||
switch (srcSize)
|
||||
{
|
||||
case 7:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16;
|
||||
goto case 6;
|
||||
case 6:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24;
|
||||
goto case 5;
|
||||
case 5:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32;
|
||||
goto case 4;
|
||||
case 4:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[3] << 24;
|
||||
goto case 3;
|
||||
case 3:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[2] << 16;
|
||||
goto case 2;
|
||||
case 2:
|
||||
bitD->bitContainer += (nuint)((byte*)srcBuffer)[1] << 8;
|
||||
goto default;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
}
|
||||
|
||||
bitD->bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8;
|
||||
}
|
||||
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getUpperBits(nuint bitContainer, uint start)
|
||||
{
|
||||
return bitContainer >> (int)start;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getMiddleBits(nuint bitContainer, uint start, uint nbBits)
|
||||
{
|
||||
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
|
||||
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
|
||||
#if NETCOREAPP3_1_OR_GREATER
|
||||
if (Bmi2.X64.IsSupported)
|
||||
{
|
||||
return (nuint)Bmi2.X64.ZeroHighBits(bitContainer >> (int)(start & regMask), nbBits);
|
||||
}
|
||||
|
||||
if (Bmi2.IsSupported)
|
||||
{
|
||||
return Bmi2.ZeroHighBits((uint)(bitContainer >> (int)(start & regMask)), nbBits);
|
||||
}
|
||||
#endif
|
||||
|
||||
return (nuint)(bitContainer >> (int)(start & regMask) & ((ulong)1 << (int)nbBits) - 1);
|
||||
}
|
||||
|
||||
/*! BIT_lookBits() :
|
||||
* Provides next n bits from local register.
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
return BIT_getMiddleBits(
|
||||
bitD->bitContainer,
|
||||
(uint)(sizeof(nuint) * 8) - bitD->bitsConsumed - nbBits,
|
||||
nbBits
|
||||
);
|
||||
}
|
||||
|
||||
/*! BIT_lookBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBitsFast(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
|
||||
assert(nbBits >= 1);
|
||||
return bitD->bitContainer
|
||||
<< (int)(bitD->bitsConsumed & regMask)
|
||||
>> (int)(regMask + 1 - nbBits & regMask);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_skipBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
bitD->bitsConsumed += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value. */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
nuint value = BIT_lookBits(bitD, nbBits);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBitsFast(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
nuint value = BIT_lookBitsFast(bitD, nbBits);
|
||||
assert(nbBits >= 1);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream_internal() :
|
||||
* Simple variant of BIT_reloadDStream(), with two conditions:
|
||||
* 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
|
||||
* 2. look window is valid after shifted down : bitD->ptr >= bitD->start
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD)
|
||||
{
|
||||
assert(bitD->bitsConsumed <= (uint)(sizeof(nuint) * 8));
|
||||
bitD->ptr -= bitD->bitsConsumed >> 3;
|
||||
assert(bitD->ptr >= bitD->start);
|
||||
bitD->bitsConsumed &= 7;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
return BIT_DStream_status.BIT_DStream_unfinished;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStreamFast() :
|
||||
* Similar to BIT_reloadDStream(), but with two differences:
|
||||
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
|
||||
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
|
||||
* point you must use BIT_reloadDStream() to reload.
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->ptr < bitD->limitPtr)
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
return BIT_reloadDStream_internal(bitD);
|
||||
}
|
||||
|
||||
#if NET7_0_OR_GREATER
|
||||
private static ReadOnlySpan<byte> Span_static_zeroFilled =>
|
||||
new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 };
|
||||
private static nuint* static_zeroFilled =>
|
||||
(nuint*)
|
||||
System.Runtime.CompilerServices.Unsafe.AsPointer(
|
||||
ref MemoryMarshal.GetReference(Span_static_zeroFilled)
|
||||
);
|
||||
#else
|
||||
|
||||
private static readonly nuint* static_zeroFilled = (nuint*)GetArrayPointer(
|
||||
new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }
|
||||
);
|
||||
#endif
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not never beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->bitsConsumed > (uint)(sizeof(nuint) * 8))
|
||||
{
|
||||
bitD->ptr = (sbyte*)&static_zeroFilled[0];
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
}
|
||||
|
||||
assert(bitD->ptr >= bitD->start);
|
||||
if (bitD->ptr >= bitD->limitPtr)
|
||||
{
|
||||
return BIT_reloadDStream_internal(bitD);
|
||||
}
|
||||
|
||||
if (bitD->ptr == bitD->start)
|
||||
{
|
||||
if (bitD->bitsConsumed < (uint)(sizeof(nuint) * 8))
|
||||
return BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
return BIT_DStream_status.BIT_DStream_completed;
|
||||
}
|
||||
|
||||
{
|
||||
uint nbBytes = bitD->bitsConsumed >> 3;
|
||||
BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished;
|
||||
if (bitD->ptr - nbBytes < bitD->start)
|
||||
{
|
||||
nbBytes = (uint)(bitD->ptr - bitD->start);
|
||||
result = BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
}
|
||||
|
||||
bitD->ptr -= nbBytes;
|
||||
bitD->bitsConsumed -= nbBytes * 8;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint BIT_endOfDStream(BIT_DStream_t* DStream)
|
||||
{
|
||||
return DStream->ptr == DStream->start && DStream->bitsConsumed == (uint)(sizeof(nuint) * 8)
|
||||
? 1U
|
||||
: 0U;
|
||||
}
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_initDStream(ref BIT_DStream_t bitD, void* srcBuffer, nuint srcSize)
|
||||
{
|
||||
if (srcSize < 1)
|
||||
{
|
||||
bitD = new BIT_DStream_t();
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
bitD.start = (sbyte*)srcBuffer;
|
||||
bitD.limitPtr = bitD.start + sizeof(nuint);
|
||||
if (srcSize >= (nuint)sizeof(nuint))
|
||||
{
|
||||
bitD.ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint);
|
||||
bitD.bitContainer = MEM_readLEST(bitD.ptr);
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bitD.ptr = bitD.start;
|
||||
bitD.bitContainer = *(byte*)bitD.start;
|
||||
switch (srcSize)
|
||||
{
|
||||
case 7:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16;
|
||||
goto case 6;
|
||||
case 6:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24;
|
||||
goto case 5;
|
||||
case 5:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32;
|
||||
goto case 4;
|
||||
case 4:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[3] << 24;
|
||||
goto case 3;
|
||||
case 3:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[2] << 16;
|
||||
goto case 2;
|
||||
case 2:
|
||||
bitD.bitContainer += (nuint)((byte*)srcBuffer)[1] << 8;
|
||||
goto default;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
|
||||
bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
}
|
||||
|
||||
bitD.bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8;
|
||||
}
|
||||
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
/*! BIT_lookBits() :
|
||||
* Provides next n bits from local register.
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBits(nuint bitD_bitContainer, uint bitD_bitsConsumed, uint nbBits)
|
||||
{
|
||||
return BIT_getMiddleBits(
|
||||
bitD_bitContainer,
|
||||
(uint)(sizeof(nuint) * 8) - bitD_bitsConsumed - nbBits,
|
||||
nbBits
|
||||
);
|
||||
}
|
||||
|
||||
/*! BIT_lookBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBitsFast(
|
||||
nuint bitD_bitContainer,
|
||||
uint bitD_bitsConsumed,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
|
||||
assert(nbBits >= 1);
|
||||
return bitD_bitContainer
|
||||
<< (int)(bitD_bitsConsumed & regMask)
|
||||
>> (int)(regMask + 1 - nbBits & regMask);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_skipBits(ref uint bitD_bitsConsumed, uint nbBits)
|
||||
{
|
||||
bitD_bitsConsumed += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value. */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBits(
|
||||
nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
nuint value = BIT_lookBits(bitD_bitContainer, bitD_bitsConsumed, nbBits);
|
||||
BIT_skipBits(ref bitD_bitsConsumed, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBitsFast(
|
||||
nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
uint nbBits
|
||||
)
|
||||
{
|
||||
nuint value = BIT_lookBitsFast(bitD_bitContainer, bitD_bitsConsumed, nbBits);
|
||||
assert(nbBits >= 1);
|
||||
BIT_skipBits(ref bitD_bitsConsumed, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStreamFast() :
|
||||
* Similar to BIT_reloadDStream(), but with two differences:
|
||||
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
|
||||
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
|
||||
* point you must use BIT_reloadDStream() to reload.
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStreamFast(
|
||||
ref nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
ref sbyte* bitD_ptr,
|
||||
sbyte* bitD_start,
|
||||
sbyte* bitD_limitPtr
|
||||
)
|
||||
{
|
||||
if (bitD_ptr < bitD_limitPtr)
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
return BIT_reloadDStream_internal(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start
|
||||
);
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not never beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream(
|
||||
ref nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
ref sbyte* bitD_ptr,
|
||||
sbyte* bitD_start,
|
||||
sbyte* bitD_limitPtr
|
||||
)
|
||||
{
|
||||
if (bitD_bitsConsumed > (uint)(sizeof(nuint) * 8))
|
||||
{
|
||||
bitD_ptr = (sbyte*)&static_zeroFilled[0];
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
}
|
||||
|
||||
assert(bitD_ptr >= bitD_start);
|
||||
if (bitD_ptr >= bitD_limitPtr)
|
||||
{
|
||||
return BIT_reloadDStream_internal(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start
|
||||
);
|
||||
}
|
||||
|
||||
if (bitD_ptr == bitD_start)
|
||||
{
|
||||
if (bitD_bitsConsumed < (uint)(sizeof(nuint) * 8))
|
||||
return BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
return BIT_DStream_status.BIT_DStream_completed;
|
||||
}
|
||||
|
||||
{
|
||||
uint nbBytes = bitD_bitsConsumed >> 3;
|
||||
BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished;
|
||||
if (bitD_ptr - nbBytes < bitD_start)
|
||||
{
|
||||
nbBytes = (uint)(bitD_ptr - bitD_start);
|
||||
result = BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
}
|
||||
|
||||
bitD_ptr -= nbBytes;
|
||||
bitD_bitsConsumed -= nbBytes * 8;
|
||||
bitD_bitContainer = MEM_readLEST(bitD_ptr);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream_internal() :
|
||||
* Simple variant of BIT_reloadDStream(), with two conditions:
|
||||
* 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
|
||||
* 2. look window is valid after shifted down : bitD->ptr >= bitD->start
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream_internal(
|
||||
ref nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed,
|
||||
ref sbyte* bitD_ptr,
|
||||
sbyte* bitD_start
|
||||
)
|
||||
{
|
||||
assert(bitD_bitsConsumed <= (uint)(sizeof(nuint) * 8));
|
||||
bitD_ptr -= bitD_bitsConsumed >> 3;
|
||||
assert(bitD_ptr >= bitD_start);
|
||||
bitD_bitsConsumed &= 7;
|
||||
bitD_bitContainer = MEM_readLEST(bitD_ptr);
|
||||
return BIT_DStream_status.BIT_DStream_unfinished;
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint BIT_endOfDStream(
|
||||
uint DStream_bitsConsumed,
|
||||
sbyte* DStream_ptr,
|
||||
sbyte* DStream_start
|
||||
)
|
||||
{
|
||||
return DStream_ptr == DStream_start && DStream_bitsConsumed == (uint)(sizeof(nuint) * 8)
|
||||
? 1U
|
||||
: 0U;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct BlockSummary
|
||||
{
|
||||
public nuint nbSequences;
|
||||
public nuint blockSize;
|
||||
public nuint litSize;
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* COVER_best_t is used for two purposes:
|
||||
* 1. Synchronizing threads.
|
||||
* 2. Saving the best parameters and dictionary.
|
||||
*
|
||||
* All of the methods except COVER_best_init() are thread safe if zstd is
|
||||
* compiled with multithreaded support.
|
||||
*/
|
||||
public unsafe struct COVER_best_s
|
||||
{
|
||||
public void* mutex;
|
||||
public void* cond;
|
||||
public nuint liveJobs;
|
||||
public void* dict;
|
||||
public nuint dictSize;
|
||||
public ZDICT_cover_params_t parameters;
|
||||
public nuint compressedSize;
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-*************************************
|
||||
* Context
|
||||
***************************************/
|
||||
public unsafe struct COVER_ctx_t
|
||||
{
|
||||
public byte* samples;
|
||||
public nuint* offsets;
|
||||
public nuint* samplesSizes;
|
||||
public nuint nbSamples;
|
||||
public nuint nbTrainSamples;
|
||||
public nuint nbTestSamples;
|
||||
public uint* suffix;
|
||||
public nuint suffixSize;
|
||||
public uint* freqs;
|
||||
public uint* dmerAt;
|
||||
public uint d;
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* Struct used for the dictionary selection function.
|
||||
*/
|
||||
public unsafe struct COVER_dictSelection
|
||||
{
|
||||
public byte* dictContent;
|
||||
public nuint dictSize;
|
||||
public nuint totalCompressedSize;
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
*Number of epochs and size of each epoch.
|
||||
*/
|
||||
public struct COVER_epoch_info_t
|
||||
{
|
||||
public uint num;
|
||||
public uint size;
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct COVER_map_pair_t_s
|
||||
{
|
||||
public uint key;
|
||||
public uint value;
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct COVER_map_s
|
||||
{
|
||||
public COVER_map_pair_t_s* data;
|
||||
public uint sizeLog;
|
||||
public uint size;
|
||||
public uint sizeMask;
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* A segment is a range in the source as well as the score of the segment.
|
||||
*/
|
||||
public struct COVER_segment_t
|
||||
{
|
||||
public uint begin;
|
||||
public uint end;
|
||||
public uint score;
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* Parameters for COVER_tryParameters().
|
||||
*/
|
||||
public unsafe struct COVER_tryParameters_data_s
|
||||
{
|
||||
public COVER_ctx_t* ctx;
|
||||
public COVER_best_s* best;
|
||||
public nuint dictBufferCapacity;
|
||||
public ZDICT_cover_params_t parameters;
|
||||
}
|
||||
849
src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs
Normal file
849
src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs
Normal file
@@ -0,0 +1,849 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
private static readonly ZSTD_compressionParameters[][] ZSTD_defaultCParameters =
|
||||
new ZSTD_compressionParameters[4][]
|
||||
{
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 19,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 19,
|
||||
chainLog: 13,
|
||||
hashLog: 14,
|
||||
searchLog: 1,
|
||||
minMatch: 7,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 20,
|
||||
chainLog: 15,
|
||||
hashLog: 16,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 18,
|
||||
hashLog: 18,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 19,
|
||||
hashLog: 20,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 21,
|
||||
chainLog: 19,
|
||||
hashLog: 20,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 20,
|
||||
hashLog: 21,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 21,
|
||||
hashLog: 22,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 21,
|
||||
hashLog: 22,
|
||||
searchLog: 6,
|
||||
minMatch: 5,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 23,
|
||||
searchLog: 6,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 22,
|
||||
searchLog: 4,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 23,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 23,
|
||||
hashLog: 23,
|
||||
searchLog: 6,
|
||||
minMatch: 5,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 22,
|
||||
chainLog: 22,
|
||||
hashLog: 22,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 48,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 23,
|
||||
chainLog: 23,
|
||||
hashLog: 22,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 64,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 23,
|
||||
chainLog: 23,
|
||||
hashLog: 22,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 64,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 23,
|
||||
chainLog: 24,
|
||||
hashLog: 22,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 25,
|
||||
chainLog: 25,
|
||||
hashLog: 23,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 26,
|
||||
chainLog: 26,
|
||||
hashLog: 24,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 27,
|
||||
chainLog: 27,
|
||||
hashLog: 25,
|
||||
searchLog: 9,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 13,
|
||||
hashLog: 14,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 16,
|
||||
hashLog: 16,
|
||||
searchLog: 1,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 17,
|
||||
hashLog: 18,
|
||||
searchLog: 5,
|
||||
minMatch: 5,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 3,
|
||||
minMatch: 5,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 7,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 16,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 4,
|
||||
minMatch: 3,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 18,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 10,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 12,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 18,
|
||||
chainLog: 19,
|
||||
hashLog: 19,
|
||||
searchLog: 13,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 12,
|
||||
hashLog: 12,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 6,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 13,
|
||||
hashLog: 15,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 15,
|
||||
hashLog: 16,
|
||||
searchLog: 2,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 17,
|
||||
hashLog: 17,
|
||||
searchLog: 2,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 16,
|
||||
hashLog: 17,
|
||||
searchLog: 6,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 17,
|
||||
hashLog: 17,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 7,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 4,
|
||||
minMatch: 3,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 10,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 5,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 9,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 17,
|
||||
chainLog: 18,
|
||||
hashLog: 17,
|
||||
searchLog: 11,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
new ZSTD_compressionParameters[23]
|
||||
{
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 12,
|
||||
hashLog: 13,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 1,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 15,
|
||||
searchLog: 1,
|
||||
minMatch: 5,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 15,
|
||||
searchLog: 1,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_fast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 15,
|
||||
searchLog: 2,
|
||||
minMatch: 4,
|
||||
targetLength: 0,
|
||||
strategy: ZSTD_strategy.ZSTD_dfast
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 2,
|
||||
strategy: ZSTD_strategy.ZSTD_greedy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 4,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 4,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 6,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 14,
|
||||
hashLog: 14,
|
||||
searchLog: 8,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_lazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 5,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 9,
|
||||
minMatch: 4,
|
||||
targetLength: 8,
|
||||
strategy: ZSTD_strategy.ZSTD_btlazy2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 3,
|
||||
minMatch: 4,
|
||||
targetLength: 12,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 4,
|
||||
minMatch: 3,
|
||||
targetLength: 24,
|
||||
strategy: ZSTD_strategy.ZSTD_btopt
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 14,
|
||||
searchLog: 5,
|
||||
minMatch: 3,
|
||||
targetLength: 32,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 64,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 5,
|
||||
minMatch: 3,
|
||||
targetLength: 48,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 6,
|
||||
minMatch: 3,
|
||||
targetLength: 128,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 7,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 256,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 8,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 9,
|
||||
minMatch: 3,
|
||||
targetLength: 512,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
new ZSTD_compressionParameters(
|
||||
windowLog: 14,
|
||||
chainLog: 15,
|
||||
hashLog: 15,
|
||||
searchLog: 10,
|
||||
minMatch: 3,
|
||||
targetLength: 999,
|
||||
strategy: ZSTD_strategy.ZSTD_btultra2
|
||||
),
|
||||
},
|
||||
};
|
||||
}
|
||||
61
src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs
Normal file
61
src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs
Normal file
@@ -0,0 +1,61 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/* @return 1 if @u is a 2^n value, 0 otherwise
|
||||
* useful to check a value is valid for alignment restrictions */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static int ZSTD_isPower2(nuint u)
|
||||
{
|
||||
return (u & u - 1) == 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to perform a wrapped pointer difference without triggering
|
||||
* UBSAN.
|
||||
*
|
||||
* @returns lhs - rhs with wrapping
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nint ZSTD_wrappedPtrDiff(byte* lhs, byte* rhs)
|
||||
{
|
||||
return (nint)(lhs - rhs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to perform a wrapped pointer add without triggering UBSAN.
|
||||
*
|
||||
* @return ptr + add with wrapping
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte* ZSTD_wrappedPtrAdd(byte* ptr, nint add)
|
||||
{
|
||||
return ptr + add;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to perform a wrapped pointer subtraction without triggering
|
||||
* UBSAN.
|
||||
*
|
||||
* @return ptr - sub with wrapping
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte* ZSTD_wrappedPtrSub(byte* ptr, nint sub)
|
||||
{
|
||||
return ptr - sub;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to add to a pointer that works around C's undefined behavior
|
||||
* of adding 0 to NULL.
|
||||
*
|
||||
* @returns `ptr + add` except it defines `NULL + 0 == NULL`.
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte* ZSTD_maybeNullPtrAdd(byte* ptr, nint add)
|
||||
{
|
||||
return add > 0 ? ptr + add : ptr;
|
||||
}
|
||||
}
|
||||
444
src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs
Normal file
444
src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs
Normal file
@@ -0,0 +1,444 @@
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
private static int g_displayLevel = 0;
|
||||
|
||||
/**
|
||||
* Returns the sum of the sample sizes.
|
||||
*/
|
||||
private static nuint COVER_sum(nuint* samplesSizes, uint nbSamples)
|
||||
{
|
||||
nuint sum = 0;
|
||||
uint i;
|
||||
for (i = 0; i < nbSamples; ++i)
|
||||
{
|
||||
sum += samplesSizes[i];
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
/**
|
||||
* Warns the user when their corpus is too small.
|
||||
*/
|
||||
private static void COVER_warnOnSmallCorpus(nuint maxDictSize, nuint nbDmers, int displayLevel)
|
||||
{
|
||||
double ratio = nbDmers / (double)maxDictSize;
|
||||
if (ratio >= 10)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the number of epochs and the size of each epoch.
|
||||
* We will make sure that each epoch gets at least 10 * k bytes.
|
||||
*
|
||||
* The COVER algorithms divide the data up into epochs of equal size and
|
||||
* select one segment from each epoch.
|
||||
*
|
||||
* @param maxDictSize The maximum allowed dictionary size.
|
||||
* @param nbDmers The number of dmers we are training on.
|
||||
* @param k The parameter k (segment size).
|
||||
* @param passes The target number of passes over the dmer corpus.
|
||||
* More passes means a better dictionary.
|
||||
*/
|
||||
private static COVER_epoch_info_t COVER_computeEpochs(
|
||||
uint maxDictSize,
|
||||
uint nbDmers,
|
||||
uint k,
|
||||
uint passes
|
||||
)
|
||||
{
|
||||
uint minEpochSize = k * 10;
|
||||
COVER_epoch_info_t epochs;
|
||||
epochs.num = 1 > maxDictSize / k / passes ? 1 : maxDictSize / k / passes;
|
||||
epochs.size = nbDmers / epochs.num;
|
||||
if (epochs.size >= minEpochSize)
|
||||
{
|
||||
assert(epochs.size * epochs.num <= nbDmers);
|
||||
return epochs;
|
||||
}
|
||||
|
||||
epochs.size = minEpochSize < nbDmers ? minEpochSize : nbDmers;
|
||||
epochs.num = nbDmers / epochs.size;
|
||||
assert(epochs.size * epochs.num <= nbDmers);
|
||||
return epochs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks total compressed size of a dictionary
|
||||
*/
|
||||
private static nuint COVER_checkTotalCompressedSize(
|
||||
ZDICT_cover_params_t parameters,
|
||||
nuint* samplesSizes,
|
||||
byte* samples,
|
||||
nuint* offsets,
|
||||
nuint nbTrainSamples,
|
||||
nuint nbSamples,
|
||||
byte* dict,
|
||||
nuint dictBufferCapacity
|
||||
)
|
||||
{
|
||||
nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
/* Pointers */
|
||||
ZSTD_CCtx_s* cctx;
|
||||
ZSTD_CDict_s* cdict;
|
||||
void* dst;
|
||||
/* Local variables */
|
||||
nuint dstCapacity;
|
||||
nuint i;
|
||||
{
|
||||
nuint maxSampleSize = 0;
|
||||
i = parameters.splitPoint < 1 ? nbTrainSamples : 0;
|
||||
for (; i < nbSamples; ++i)
|
||||
{
|
||||
maxSampleSize = samplesSizes[i] > maxSampleSize ? samplesSizes[i] : maxSampleSize;
|
||||
}
|
||||
|
||||
dstCapacity = ZSTD_compressBound(maxSampleSize);
|
||||
dst = malloc(dstCapacity);
|
||||
}
|
||||
|
||||
cctx = ZSTD_createCCtx();
|
||||
cdict = ZSTD_createCDict(dict, dictBufferCapacity, parameters.zParams.compressionLevel);
|
||||
if (dst == null || cctx == null || cdict == null)
|
||||
{
|
||||
goto _compressCleanup;
|
||||
}
|
||||
|
||||
totalCompressedSize = dictBufferCapacity;
|
||||
i = parameters.splitPoint < 1 ? nbTrainSamples : 0;
|
||||
for (; i < nbSamples; ++i)
|
||||
{
|
||||
nuint size = ZSTD_compress_usingCDict(
|
||||
cctx,
|
||||
dst,
|
||||
dstCapacity,
|
||||
samples + offsets[i],
|
||||
samplesSizes[i],
|
||||
cdict
|
||||
);
|
||||
if (ERR_isError(size))
|
||||
{
|
||||
totalCompressedSize = size;
|
||||
goto _compressCleanup;
|
||||
}
|
||||
|
||||
totalCompressedSize += size;
|
||||
}
|
||||
|
||||
_compressCleanup:
|
||||
ZSTD_freeCCtx(cctx);
|
||||
ZSTD_freeCDict(cdict);
|
||||
if (dst != null)
|
||||
{
|
||||
free(dst);
|
||||
}
|
||||
|
||||
return totalCompressedSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the `COVER_best_t`.
|
||||
*/
|
||||
private static void COVER_best_init(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
return;
|
||||
SynchronizationWrapper.Init(&best->mutex);
|
||||
best->liveJobs = 0;
|
||||
best->dict = null;
|
||||
best->dictSize = 0;
|
||||
best->compressedSize = unchecked((nuint)(-1));
|
||||
best->parameters = new ZDICT_cover_params_t();
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait until liveJobs == 0.
|
||||
*/
|
||||
private static void COVER_best_wait(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Enter(&best->mutex);
|
||||
while (best->liveJobs != 0)
|
||||
{
|
||||
SynchronizationWrapper.Wait(&best->mutex);
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Call COVER_best_wait() and then destroy the COVER_best_t.
|
||||
*/
|
||||
private static void COVER_best_destroy(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
COVER_best_wait(best);
|
||||
if (best->dict != null)
|
||||
{
|
||||
free(best->dict);
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Free(&best->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a thread is about to be launched.
|
||||
* Increments liveJobs.
|
||||
*/
|
||||
private static void COVER_best_start(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Enter(&best->mutex);
|
||||
++best->liveJobs;
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a thread finishes executing, both on error or success.
|
||||
* Decrements liveJobs and signals any waiting threads if liveJobs == 0.
|
||||
* If this dictionary is the best so far save it and its parameters.
|
||||
*/
|
||||
private static void COVER_best_finish(
|
||||
COVER_best_s* best,
|
||||
ZDICT_cover_params_t parameters,
|
||||
COVER_dictSelection selection
|
||||
)
|
||||
{
|
||||
void* dict = selection.dictContent;
|
||||
nuint compressedSize = selection.totalCompressedSize;
|
||||
nuint dictSize = selection.dictSize;
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
nuint liveJobs;
|
||||
SynchronizationWrapper.Enter(&best->mutex);
|
||||
--best->liveJobs;
|
||||
liveJobs = best->liveJobs;
|
||||
if (compressedSize < best->compressedSize)
|
||||
{
|
||||
if (best->dict == null || best->dictSize < dictSize)
|
||||
{
|
||||
if (best->dict != null)
|
||||
{
|
||||
free(best->dict);
|
||||
}
|
||||
|
||||
best->dict = malloc(dictSize);
|
||||
if (best->dict == null)
|
||||
{
|
||||
best->compressedSize = unchecked(
|
||||
(nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)
|
||||
);
|
||||
best->dictSize = 0;
|
||||
SynchronizationWrapper.Pulse(&best->mutex);
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (dict != null)
|
||||
{
|
||||
memcpy(best->dict, dict, (uint)dictSize);
|
||||
best->dictSize = dictSize;
|
||||
best->parameters = parameters;
|
||||
best->compressedSize = compressedSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (liveJobs == 0)
|
||||
{
|
||||
SynchronizationWrapper.PulseAll(&best->mutex);
|
||||
}
|
||||
|
||||
SynchronizationWrapper.Exit(&best->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
private static COVER_dictSelection setDictSelection(byte* buf, nuint s, nuint csz)
|
||||
{
|
||||
COVER_dictSelection ds;
|
||||
ds.dictContent = buf;
|
||||
ds.dictSize = s;
|
||||
ds.totalCompressedSize = csz;
|
||||
return ds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Error function for COVER_selectDict function. Returns a struct where
|
||||
* return.totalCompressedSize is a ZSTD error.
|
||||
*/
|
||||
private static COVER_dictSelection COVER_dictSelectionError(nuint error)
|
||||
{
|
||||
return setDictSelection(null, 0, error);
|
||||
}
|
||||
|
||||
/**
|
||||
* Error function for COVER_selectDict function. Checks if the return
|
||||
* value is an error.
|
||||
*/
|
||||
private static uint COVER_dictSelectionIsError(COVER_dictSelection selection)
|
||||
{
|
||||
return ERR_isError(selection.totalCompressedSize) || selection.dictContent == null
|
||||
? 1U
|
||||
: 0U;
|
||||
}
|
||||
|
||||
/**
|
||||
* Always call after selectDict is called to free up used memory from
|
||||
* newly created dictionary.
|
||||
*/
|
||||
private static void COVER_dictSelectionFree(COVER_dictSelection selection)
|
||||
{
|
||||
free(selection.dictContent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called to finalize the dictionary and select one based on whether or not
|
||||
* the shrink-dict flag was enabled. If enabled the dictionary used is the
|
||||
* smallest dictionary within a specified regression of the compressed size
|
||||
* from the largest dictionary.
|
||||
*/
|
||||
private static COVER_dictSelection COVER_selectDict(
|
||||
byte* customDictContent,
|
||||
nuint dictBufferCapacity,
|
||||
nuint dictContentSize,
|
||||
byte* samplesBuffer,
|
||||
nuint* samplesSizes,
|
||||
uint nbFinalizeSamples,
|
||||
nuint nbCheckSamples,
|
||||
nuint nbSamples,
|
||||
ZDICT_cover_params_t @params,
|
||||
nuint* offsets,
|
||||
nuint totalCompressedSize
|
||||
)
|
||||
{
|
||||
nuint largestDict = 0;
|
||||
nuint largestCompressed = 0;
|
||||
byte* customDictContentEnd = customDictContent + dictContentSize;
|
||||
byte* largestDictbuffer = (byte*)malloc(dictBufferCapacity);
|
||||
byte* candidateDictBuffer = (byte*)malloc(dictBufferCapacity);
|
||||
double regressionTolerance = (double)@params.shrinkDictMaxRegression / 100 + 1;
|
||||
if (largestDictbuffer == null || candidateDictBuffer == null)
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
memcpy(largestDictbuffer, customDictContent, (uint)dictContentSize);
|
||||
dictContentSize = ZDICT_finalizeDictionary(
|
||||
largestDictbuffer,
|
||||
dictBufferCapacity,
|
||||
customDictContent,
|
||||
dictContentSize,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbFinalizeSamples,
|
||||
@params.zParams
|
||||
);
|
||||
if (ZDICT_isError(dictContentSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
totalCompressedSize = COVER_checkTotalCompressedSize(
|
||||
@params,
|
||||
samplesSizes,
|
||||
samplesBuffer,
|
||||
offsets,
|
||||
nbCheckSamples,
|
||||
nbSamples,
|
||||
largestDictbuffer,
|
||||
dictContentSize
|
||||
);
|
||||
if (ERR_isError(totalCompressedSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(totalCompressedSize);
|
||||
}
|
||||
|
||||
if (@params.shrinkDict == 0)
|
||||
{
|
||||
free(candidateDictBuffer);
|
||||
return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize);
|
||||
}
|
||||
|
||||
largestDict = dictContentSize;
|
||||
largestCompressed = totalCompressedSize;
|
||||
dictContentSize = 256;
|
||||
while (dictContentSize < largestDict)
|
||||
{
|
||||
memcpy(candidateDictBuffer, largestDictbuffer, (uint)largestDict);
|
||||
dictContentSize = ZDICT_finalizeDictionary(
|
||||
candidateDictBuffer,
|
||||
dictBufferCapacity,
|
||||
customDictContentEnd - dictContentSize,
|
||||
dictContentSize,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbFinalizeSamples,
|
||||
@params.zParams
|
||||
);
|
||||
if (ZDICT_isError(dictContentSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
totalCompressedSize = COVER_checkTotalCompressedSize(
|
||||
@params,
|
||||
samplesSizes,
|
||||
samplesBuffer,
|
||||
offsets,
|
||||
nbCheckSamples,
|
||||
nbSamples,
|
||||
candidateDictBuffer,
|
||||
dictContentSize
|
||||
);
|
||||
if (ERR_isError(totalCompressedSize))
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
free(candidateDictBuffer);
|
||||
return COVER_dictSelectionError(totalCompressedSize);
|
||||
}
|
||||
|
||||
if (totalCompressedSize <= largestCompressed * regressionTolerance)
|
||||
{
|
||||
free(largestDictbuffer);
|
||||
return setDictSelection(candidateDictBuffer, dictContentSize, totalCompressedSize);
|
||||
}
|
||||
|
||||
dictContentSize *= 2;
|
||||
}
|
||||
|
||||
dictContentSize = largestDict;
|
||||
totalCompressedSize = largestCompressed;
|
||||
free(candidateDictBuffer);
|
||||
return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize);
|
||||
}
|
||||
}
|
||||
12
src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs
Normal file
12
src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs
Normal file
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-***************************/
|
||||
/* generic DTableDesc */
|
||||
/*-***************************/
|
||||
public struct DTableDesc
|
||||
{
|
||||
public byte maxTableLog;
|
||||
public byte tableType;
|
||||
public byte tableLog;
|
||||
public byte reserved;
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct EStats_ress_t
|
||||
{
|
||||
/* dictionary */
|
||||
public ZSTD_CDict_s* dict;
|
||||
|
||||
/* working context */
|
||||
public ZSTD_CCtx_s* zc;
|
||||
|
||||
/* must be ZSTD_BLOCKSIZE_MAX allocated */
|
||||
public void* workPlace;
|
||||
}
|
||||
447
src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs
Normal file
447
src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs
Normal file
@@ -0,0 +1,447 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/*=== Version ===*/
|
||||
private static uint FSE_versionNumber()
|
||||
{
|
||||
return 0 * 100 * 100 + 9 * 100 + 0;
|
||||
}
|
||||
|
||||
/*=== Error Management ===*/
|
||||
private static bool FSE_isError(nuint code)
|
||||
{
|
||||
return ERR_isError(code);
|
||||
}
|
||||
|
||||
private static string FSE_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorName(code);
|
||||
}
|
||||
|
||||
/* Error Management */
|
||||
private static bool HUF_isError(nuint code)
|
||||
{
|
||||
return ERR_isError(code);
|
||||
}
|
||||
|
||||
private static string HUF_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorName(code);
|
||||
}
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding-decoding
|
||||
****************************************************************/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint FSE_readNCount_body(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize
|
||||
)
|
||||
{
|
||||
byte* istart = (byte*)headerBuffer;
|
||||
byte* iend = istart + hbSize;
|
||||
byte* ip = istart;
|
||||
int nbBits;
|
||||
int remaining;
|
||||
int threshold;
|
||||
uint bitStream;
|
||||
int bitCount;
|
||||
uint charnum = 0;
|
||||
uint maxSV1 = *maxSVPtr + 1;
|
||||
int previous0 = 0;
|
||||
if (hbSize < 8)
|
||||
{
|
||||
sbyte* buffer = stackalloc sbyte[8];
|
||||
/* This function only works when hbSize >= 8 */
|
||||
memset(buffer, 0, sizeof(sbyte) * 8);
|
||||
memcpy(buffer, headerBuffer, (uint)hbSize);
|
||||
{
|
||||
nuint countSize = FSE_readNCount(
|
||||
normalizedCounter,
|
||||
maxSVPtr,
|
||||
tableLogPtr,
|
||||
buffer,
|
||||
sizeof(sbyte) * 8
|
||||
);
|
||||
if (FSE_isError(countSize))
|
||||
return countSize;
|
||||
if (countSize > hbSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
return countSize;
|
||||
}
|
||||
}
|
||||
|
||||
assert(hbSize >= 8);
|
||||
memset(normalizedCounter, 0, (*maxSVPtr + 1) * sizeof(short));
|
||||
bitStream = MEM_readLE32(ip);
|
||||
nbBits = (int)((bitStream & 0xF) + 5);
|
||||
if (nbBits > 15)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
|
||||
bitStream >>= 4;
|
||||
bitCount = 4;
|
||||
*tableLogPtr = (uint)nbBits;
|
||||
remaining = (1 << nbBits) + 1;
|
||||
threshold = 1 << nbBits;
|
||||
nbBits++;
|
||||
for (; ; )
|
||||
{
|
||||
if (previous0 != 0)
|
||||
{
|
||||
/* Count the number of repeats. Each time the
|
||||
* 2-bit repeat code is 0b11 there is another
|
||||
* repeat.
|
||||
* Avoid UB by setting the high bit to 1.
|
||||
*/
|
||||
int repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1);
|
||||
while (repeats >= 12)
|
||||
{
|
||||
charnum += 3 * 12;
|
||||
if (ip <= iend - 7)
|
||||
{
|
||||
ip += 3;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 7 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1);
|
||||
}
|
||||
|
||||
charnum += (uint)(3 * repeats);
|
||||
bitStream >>= 2 * repeats;
|
||||
bitCount += 2 * repeats;
|
||||
assert((bitStream & 3) < 3);
|
||||
charnum += bitStream & 3;
|
||||
bitCount += 2;
|
||||
if (charnum >= maxSV1)
|
||||
break;
|
||||
if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4)
|
||||
{
|
||||
assert(bitCount >> 3 <= 3);
|
||||
ip += bitCount >> 3;
|
||||
bitCount &= 7;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
}
|
||||
|
||||
{
|
||||
int max = 2 * threshold - 1 - remaining;
|
||||
int count;
|
||||
if ((bitStream & (uint)(threshold - 1)) < (uint)max)
|
||||
{
|
||||
count = (int)(bitStream & (uint)(threshold - 1));
|
||||
bitCount += nbBits - 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
count = (int)(bitStream & (uint)(2 * threshold - 1));
|
||||
if (count >= threshold)
|
||||
count -= max;
|
||||
bitCount += nbBits;
|
||||
}
|
||||
|
||||
count--;
|
||||
if (count >= 0)
|
||||
{
|
||||
remaining -= count;
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(count == -1);
|
||||
remaining += count;
|
||||
}
|
||||
|
||||
normalizedCounter[charnum++] = (short)count;
|
||||
previous0 = count == 0 ? 1 : 0;
|
||||
assert(threshold > 1);
|
||||
if (remaining < threshold)
|
||||
{
|
||||
if (remaining <= 1)
|
||||
break;
|
||||
nbBits = (int)(ZSTD_highbit32((uint)remaining) + 1);
|
||||
threshold = 1 << nbBits - 1;
|
||||
}
|
||||
|
||||
if (charnum >= maxSV1)
|
||||
break;
|
||||
if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4)
|
||||
{
|
||||
ip += bitCount >> 3;
|
||||
bitCount &= 7;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
}
|
||||
}
|
||||
|
||||
if (remaining != 1)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
if (charnum > maxSV1)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall));
|
||||
if (bitCount > 32)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
*maxSVPtr = charnum - 1;
|
||||
ip += bitCount + 7 >> 3;
|
||||
return (nuint)(ip - istart);
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
private static nuint FSE_readNCount_body_default(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize
|
||||
)
|
||||
{
|
||||
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
|
||||
/*! FSE_readNCount_bmi2():
|
||||
* Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
|
||||
*/
|
||||
private static nuint FSE_readNCount_bmi2(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize,
|
||||
int bmi2
|
||||
)
|
||||
{
|
||||
return FSE_readNCount_body_default(
|
||||
normalizedCounter,
|
||||
maxSVPtr,
|
||||
tableLogPtr,
|
||||
headerBuffer,
|
||||
hbSize
|
||||
);
|
||||
}
|
||||
|
||||
/*! FSE_readNCount():
|
||||
Read compactly saved 'normalizedCounter' from 'rBuffer'.
|
||||
@return : size read from 'rBuffer',
|
||||
or an errorCode, which can be tested using FSE_isError().
|
||||
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
|
||||
private static nuint FSE_readNCount(
|
||||
short* normalizedCounter,
|
||||
uint* maxSVPtr,
|
||||
uint* tableLogPtr,
|
||||
void* headerBuffer,
|
||||
nuint hbSize
|
||||
)
|
||||
{
|
||||
return FSE_readNCount_bmi2(
|
||||
normalizedCounter,
|
||||
maxSVPtr,
|
||||
tableLogPtr,
|
||||
headerBuffer,
|
||||
hbSize,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
|
||||
@return : size read from `src` , or an error Code .
|
||||
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
|
||||
*/
|
||||
private static nuint HUF_readStats(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize
|
||||
)
|
||||
{
|
||||
uint* wksp = stackalloc uint[219];
|
||||
return HUF_readStats_wksp(
|
||||
huffWeight,
|
||||
hwSize,
|
||||
rankStats,
|
||||
nbSymbolsPtr,
|
||||
tableLogPtr,
|
||||
src,
|
||||
srcSize,
|
||||
wksp,
|
||||
sizeof(uint) * 219,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint HUF_readStats_body(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize,
|
||||
void* workSpace,
|
||||
nuint wkspSize,
|
||||
int bmi2
|
||||
)
|
||||
{
|
||||
uint weightTotal;
|
||||
byte* ip = (byte*)src;
|
||||
nuint iSize;
|
||||
nuint oSize;
|
||||
if (srcSize == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
iSize = ip[0];
|
||||
if (iSize >= 128)
|
||||
{
|
||||
oSize = iSize - 127;
|
||||
iSize = (oSize + 1) / 2;
|
||||
if (iSize + 1 > srcSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
if (oSize >= hwSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
ip += 1;
|
||||
{
|
||||
uint n;
|
||||
for (n = 0; n < oSize; n += 2)
|
||||
{
|
||||
huffWeight[n] = (byte)(ip[n / 2] >> 4);
|
||||
huffWeight[n + 1] = (byte)(ip[n / 2] & 15);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (iSize + 1 > srcSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
oSize = FSE_decompress_wksp_bmi2(
|
||||
huffWeight,
|
||||
hwSize - 1,
|
||||
ip + 1,
|
||||
iSize,
|
||||
6,
|
||||
workSpace,
|
||||
wkspSize,
|
||||
bmi2
|
||||
);
|
||||
if (FSE_isError(oSize))
|
||||
return oSize;
|
||||
}
|
||||
|
||||
memset(rankStats, 0, (12 + 1) * sizeof(uint));
|
||||
weightTotal = 0;
|
||||
{
|
||||
uint n;
|
||||
for (n = 0; n < oSize; n++)
|
||||
{
|
||||
if (huffWeight[n] > 12)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
rankStats[huffWeight[n]]++;
|
||||
weightTotal += (uint)(1 << huffWeight[n] >> 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (weightTotal == 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
{
|
||||
uint tableLog = ZSTD_highbit32(weightTotal) + 1;
|
||||
if (tableLog > 12)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
*tableLogPtr = tableLog;
|
||||
{
|
||||
uint total = (uint)(1 << (int)tableLog);
|
||||
uint rest = total - weightTotal;
|
||||
uint verif = (uint)(1 << (int)ZSTD_highbit32(rest));
|
||||
uint lastWeight = ZSTD_highbit32(rest) + 1;
|
||||
if (verif != rest)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
huffWeight[oSize] = (byte)lastWeight;
|
||||
rankStats[lastWeight]++;
|
||||
}
|
||||
}
|
||||
|
||||
if (rankStats[1] < 2 || (rankStats[1] & 1) != 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
*nbSymbolsPtr = (uint)(oSize + 1);
|
||||
return iSize + 1;
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
private static nuint HUF_readStats_body_default(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize,
|
||||
void* workSpace,
|
||||
nuint wkspSize
|
||||
)
|
||||
{
|
||||
return HUF_readStats_body(
|
||||
huffWeight,
|
||||
hwSize,
|
||||
rankStats,
|
||||
nbSymbolsPtr,
|
||||
tableLogPtr,
|
||||
src,
|
||||
srcSize,
|
||||
workSpace,
|
||||
wkspSize,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
private static nuint HUF_readStats_wksp(
|
||||
byte* huffWeight,
|
||||
nuint hwSize,
|
||||
uint* rankStats,
|
||||
uint* nbSymbolsPtr,
|
||||
uint* tableLogPtr,
|
||||
void* src,
|
||||
nuint srcSize,
|
||||
void* workSpace,
|
||||
nuint wkspSize,
|
||||
int flags
|
||||
)
|
||||
{
|
||||
return HUF_readStats_body_default(
|
||||
huffWeight,
|
||||
hwSize,
|
||||
rankStats,
|
||||
nbSymbolsPtr,
|
||||
tableLogPtr,
|
||||
src,
|
||||
srcSize,
|
||||
workSpace,
|
||||
wkspSize
|
||||
);
|
||||
}
|
||||
}
|
||||
110
src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs
Normal file
110
src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs
Normal file
@@ -0,0 +1,110 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static bool ERR_isError(nuint code)
|
||||
{
|
||||
return code > unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode));
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static ZSTD_ErrorCode ERR_getErrorCode(nuint code)
|
||||
{
|
||||
if (!ERR_isError(code))
|
||||
return 0;
|
||||
return (ZSTD_ErrorCode)(0 - code);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static string ERR_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorString(ERR_getErrorCode(code));
|
||||
}
|
||||
|
||||
/*-****************************************
|
||||
* Error Strings
|
||||
******************************************/
|
||||
private static string ERR_getErrorString(ZSTD_ErrorCode code)
|
||||
{
|
||||
const string notErrorCode = "Unspecified error code";
|
||||
switch (code)
|
||||
{
|
||||
case ZSTD_ErrorCode.ZSTD_error_no_error:
|
||||
return "No error detected";
|
||||
case ZSTD_ErrorCode.ZSTD_error_GENERIC:
|
||||
return "Error (generic)";
|
||||
case ZSTD_ErrorCode.ZSTD_error_prefix_unknown:
|
||||
return "Unknown frame descriptor";
|
||||
case ZSTD_ErrorCode.ZSTD_error_version_unsupported:
|
||||
return "Version not supported";
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported:
|
||||
return "Unsupported frame parameter";
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge:
|
||||
return "Frame requires too much memory for decoding";
|
||||
case ZSTD_ErrorCode.ZSTD_error_corruption_detected:
|
||||
return "Data corruption detected";
|
||||
case ZSTD_ErrorCode.ZSTD_error_checksum_wrong:
|
||||
return "Restored data doesn't match checksum";
|
||||
case ZSTD_ErrorCode.ZSTD_error_literals_headerWrong:
|
||||
return "Header of Literals' block doesn't respect format specification";
|
||||
case ZSTD_ErrorCode.ZSTD_error_parameter_unsupported:
|
||||
return "Unsupported parameter";
|
||||
case ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported:
|
||||
return "Unsupported combination of parameters";
|
||||
case ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound:
|
||||
return "Parameter is out of bound";
|
||||
case ZSTD_ErrorCode.ZSTD_error_init_missing:
|
||||
return "Context should be init first";
|
||||
case ZSTD_ErrorCode.ZSTD_error_memory_allocation:
|
||||
return "Allocation error : not enough memory";
|
||||
case ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall:
|
||||
return "workSpace buffer is not large enough";
|
||||
case ZSTD_ErrorCode.ZSTD_error_stage_wrong:
|
||||
return "Operation not authorized at current processing stage";
|
||||
case ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge:
|
||||
return "tableLog requires too much memory : unsupported";
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge:
|
||||
return "Unsupported max Symbol Value : too large";
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall:
|
||||
return "Specified maxSymbolValue is too small";
|
||||
case ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock:
|
||||
return "This mode cannot generate an uncompressed block";
|
||||
case ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected:
|
||||
return "pledged buffer stability condition is not respected";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted:
|
||||
return "Dictionary is corrupted";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionary_wrong:
|
||||
return "Dictionary mismatch";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed:
|
||||
return "Cannot create Dictionary from provided samples";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall:
|
||||
return "Destination buffer is too small";
|
||||
case ZSTD_ErrorCode.ZSTD_error_srcSize_wrong:
|
||||
return "Src size is incorrect";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstBuffer_null:
|
||||
return "Operation on NULL destination buffer";
|
||||
case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull:
|
||||
return "Operation made no progress over multiple calls, due to output buffer being full";
|
||||
case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty:
|
||||
return "Operation made no progress over multiple calls, due to input being empty";
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameIndex_tooLarge:
|
||||
return "Frame index is too large";
|
||||
case ZSTD_ErrorCode.ZSTD_error_seekableIO:
|
||||
return "An I/O error occurred when reading/seeking";
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong:
|
||||
return "Destination buffer is wrong";
|
||||
case ZSTD_ErrorCode.ZSTD_error_srcBuffer_wrong:
|
||||
return "Source buffer is wrong";
|
||||
case ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed:
|
||||
return "Block-level external sequence producer returned an error code";
|
||||
case ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid:
|
||||
return "External sequences are not valid";
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxCode:
|
||||
default:
|
||||
return notErrorCode;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct EstimatedBlockSize
|
||||
{
|
||||
public nuint estLitSize;
|
||||
public nuint estBlockSize;
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-*************************************
|
||||
* Acceleration
|
||||
***************************************/
|
||||
public struct FASTCOVER_accel_t
|
||||
{
|
||||
/* Percentage of training samples used for ZDICT_finalizeDictionary */
|
||||
public uint finalize;
|
||||
|
||||
/* Number of dmer skipped between each dmer counted in computeFrequency */
|
||||
public uint skip;
|
||||
|
||||
public FASTCOVER_accel_t(uint finalize, uint skip)
|
||||
{
|
||||
this.finalize = finalize;
|
||||
this.skip = skip;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-*************************************
|
||||
* Context
|
||||
***************************************/
|
||||
public unsafe struct FASTCOVER_ctx_t
|
||||
{
|
||||
public byte* samples;
|
||||
public nuint* offsets;
|
||||
public nuint* samplesSizes;
|
||||
public nuint nbSamples;
|
||||
public nuint nbTrainSamples;
|
||||
public nuint nbTestSamples;
|
||||
public nuint nbDmers;
|
||||
public uint* freqs;
|
||||
public uint d;
|
||||
public uint f;
|
||||
public FASTCOVER_accel_t accelParams;
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* Parameters for FASTCOVER_tryParameters().
|
||||
*/
|
||||
public unsafe struct FASTCOVER_tryParameters_data_s
|
||||
{
|
||||
public FASTCOVER_ctx_t* ctx;
|
||||
public COVER_best_s* best;
|
||||
public nuint dictBufferCapacity;
|
||||
public ZDICT_cover_params_t parameters;
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct FPStats
|
||||
{
|
||||
public Fingerprint pastEvents;
|
||||
public Fingerprint newEvents;
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol compression API
|
||||
*******************************************/
|
||||
/*!
|
||||
This API consists of small unitary functions, which highly benefit from being inlined.
|
||||
Hence their body are included in next section.
|
||||
*/
|
||||
public unsafe struct FSE_CState_t
|
||||
{
|
||||
public nint value;
|
||||
public void* stateTable;
|
||||
public void* symbolTT;
|
||||
public uint stateLog;
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol decompression API
|
||||
*******************************************/
|
||||
public unsafe struct FSE_DState_t
|
||||
{
|
||||
public nuint state;
|
||||
|
||||
/* precise table may vary, depending on U16 */
|
||||
public void* table;
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* ====== Decompression ====== */
|
||||
public struct FSE_DTableHeader
|
||||
{
|
||||
public ushort tableLog;
|
||||
public ushort fastMode;
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct FSE_DecompressWksp
|
||||
{
|
||||
public fixed short ncount[256];
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public struct FSE_decode_t
|
||||
{
|
||||
public ushort newState;
|
||||
public byte symbol;
|
||||
public byte nbBits;
|
||||
}
|
||||
13
src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs
Normal file
13
src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs
Normal file
@@ -0,0 +1,13 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public enum FSE_repeat
|
||||
{
|
||||
/**< Cannot use the previous table */
|
||||
FSE_repeat_none,
|
||||
|
||||
/**< Can use the previous table but it must be checked */
|
||||
FSE_repeat_check,
|
||||
|
||||
/**< Can use the previous table and it is assumed to be valid */
|
||||
FSE_repeat_valid,
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* *****************************************
|
||||
* Implementation of inlined functions
|
||||
*******************************************/
|
||||
public struct FSE_symbolCompressionTransform
|
||||
{
|
||||
public int deltaFindState;
|
||||
public uint deltaNbBits;
|
||||
}
|
||||
761
src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs
Normal file
761
src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs
Normal file
@@ -0,0 +1,761 @@
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/*-*************************************
|
||||
* Hash Functions
|
||||
***************************************/
|
||||
/**
|
||||
* Hash the d-byte value pointed to by p and mod 2^f into the frequency vector
|
||||
*/
|
||||
private static nuint FASTCOVER_hashPtrToIndex(void* p, uint f, uint d)
|
||||
{
|
||||
if (d == 6)
|
||||
{
|
||||
return ZSTD_hash6Ptr(p, f);
|
||||
}
|
||||
|
||||
return ZSTD_hash8Ptr(p, f);
|
||||
}
|
||||
|
||||
private static readonly FASTCOVER_accel_t* FASTCOVER_defaultAccelParameters = GetArrayPointer(
|
||||
new FASTCOVER_accel_t[11]
|
||||
{
|
||||
new FASTCOVER_accel_t(finalize: 100, skip: 0),
|
||||
new FASTCOVER_accel_t(finalize: 100, skip: 0),
|
||||
new FASTCOVER_accel_t(finalize: 50, skip: 1),
|
||||
new FASTCOVER_accel_t(finalize: 34, skip: 2),
|
||||
new FASTCOVER_accel_t(finalize: 25, skip: 3),
|
||||
new FASTCOVER_accel_t(finalize: 20, skip: 4),
|
||||
new FASTCOVER_accel_t(finalize: 17, skip: 5),
|
||||
new FASTCOVER_accel_t(finalize: 14, skip: 6),
|
||||
new FASTCOVER_accel_t(finalize: 13, skip: 7),
|
||||
new FASTCOVER_accel_t(finalize: 11, skip: 8),
|
||||
new FASTCOVER_accel_t(finalize: 10, skip: 9),
|
||||
}
|
||||
);
|
||||
|
||||
/*-*************************************
|
||||
* Helper functions
|
||||
***************************************/
|
||||
/**
|
||||
* Selects the best segment in an epoch.
|
||||
* Segments of are scored according to the function:
|
||||
*
|
||||
* Let F(d) be the frequency of all dmers with hash value d.
|
||||
* Let S_i be hash value of the dmer at position i of segment S which has length k.
|
||||
*
|
||||
* Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
|
||||
*
|
||||
* Once the dmer with hash value d is in the dictionary we set F(d) = 0.
|
||||
*/
|
||||
private static COVER_segment_t FASTCOVER_selectSegment(
|
||||
FASTCOVER_ctx_t* ctx,
|
||||
uint* freqs,
|
||||
uint begin,
|
||||
uint end,
|
||||
ZDICT_cover_params_t parameters,
|
||||
ushort* segmentFreqs
|
||||
)
|
||||
{
|
||||
/* Constants */
|
||||
uint k = parameters.k;
|
||||
uint d = parameters.d;
|
||||
uint f = ctx->f;
|
||||
uint dmersInK = k - d + 1;
|
||||
/* Try each segment (activeSegment) and save the best (bestSegment) */
|
||||
COVER_segment_t bestSegment = new COVER_segment_t
|
||||
{
|
||||
begin = 0,
|
||||
end = 0,
|
||||
score = 0,
|
||||
};
|
||||
COVER_segment_t activeSegment;
|
||||
activeSegment.begin = begin;
|
||||
activeSegment.end = begin;
|
||||
activeSegment.score = 0;
|
||||
while (activeSegment.end < end)
|
||||
{
|
||||
/* Get hash value of current dmer */
|
||||
nuint idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d);
|
||||
if (segmentFreqs[idx] == 0)
|
||||
{
|
||||
activeSegment.score += freqs[idx];
|
||||
}
|
||||
|
||||
activeSegment.end += 1;
|
||||
segmentFreqs[idx] += 1;
|
||||
if (activeSegment.end - activeSegment.begin == dmersInK + 1)
|
||||
{
|
||||
/* Get hash value of the dmer to be eliminated from active segment */
|
||||
nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
|
||||
segmentFreqs[delIndex] -= 1;
|
||||
if (segmentFreqs[delIndex] == 0)
|
||||
{
|
||||
activeSegment.score -= freqs[delIndex];
|
||||
}
|
||||
|
||||
activeSegment.begin += 1;
|
||||
}
|
||||
|
||||
if (activeSegment.score > bestSegment.score)
|
||||
{
|
||||
bestSegment = activeSegment;
|
||||
}
|
||||
}
|
||||
|
||||
while (activeSegment.begin < end)
|
||||
{
|
||||
nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
|
||||
segmentFreqs[delIndex] -= 1;
|
||||
activeSegment.begin += 1;
|
||||
}
|
||||
|
||||
{
|
||||
/* Zero the frequency of hash value of each dmer covered by the chosen segment. */
|
||||
uint pos;
|
||||
for (pos = bestSegment.begin; pos != bestSegment.end; ++pos)
|
||||
{
|
||||
nuint i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d);
|
||||
freqs[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return bestSegment;
|
||||
}
|
||||
|
||||
private static int FASTCOVER_checkParameters(
|
||||
ZDICT_cover_params_t parameters,
|
||||
nuint maxDictSize,
|
||||
uint f,
|
||||
uint accel
|
||||
)
|
||||
{
|
||||
if (parameters.d == 0 || parameters.k == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.d != 6 && parameters.d != 8)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.k > maxDictSize)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.d > parameters.k)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (f > 31 || f == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.splitPoint <= 0 || parameters.splitPoint > 1)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (accel > 10 || accel == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up a context initialized with `FASTCOVER_ctx_init()`.
|
||||
*/
|
||||
private static void FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx)
|
||||
{
|
||||
if (ctx == null)
|
||||
return;
|
||||
free(ctx->freqs);
|
||||
ctx->freqs = null;
|
||||
free(ctx->offsets);
|
||||
ctx->offsets = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate for frequency of hash value of each dmer in ctx->samples
|
||||
*/
|
||||
private static void FASTCOVER_computeFrequency(uint* freqs, FASTCOVER_ctx_t* ctx)
|
||||
{
|
||||
uint f = ctx->f;
|
||||
uint d = ctx->d;
|
||||
uint skip = ctx->accelParams.skip;
|
||||
uint readLength = d > 8 ? d : 8;
|
||||
nuint i;
|
||||
assert(ctx->nbTrainSamples >= 5);
|
||||
assert(ctx->nbTrainSamples <= ctx->nbSamples);
|
||||
for (i = 0; i < ctx->nbTrainSamples; i++)
|
||||
{
|
||||
/* start of current dmer */
|
||||
nuint start = ctx->offsets[i];
|
||||
nuint currSampleEnd = ctx->offsets[i + 1];
|
||||
while (start + readLength <= currSampleEnd)
|
||||
{
|
||||
nuint dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d);
|
||||
freqs[dmerIndex]++;
|
||||
start = start + skip + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare a context for dictionary building.
|
||||
* The context is only dependent on the parameter `d` and can be used multiple
|
||||
* times.
|
||||
* Returns 0 on success or error code on error.
|
||||
* The context must be destroyed with `FASTCOVER_ctx_destroy()`.
|
||||
*/
|
||||
private static nuint FASTCOVER_ctx_init(
|
||||
FASTCOVER_ctx_t* ctx,
|
||||
void* samplesBuffer,
|
||||
nuint* samplesSizes,
|
||||
uint nbSamples,
|
||||
uint d,
|
||||
double splitPoint,
|
||||
uint f,
|
||||
FASTCOVER_accel_t accelParams
|
||||
)
|
||||
{
|
||||
byte* samples = (byte*)samplesBuffer;
|
||||
nuint totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
|
||||
/* Split samples into testing and training sets */
|
||||
uint nbTrainSamples = splitPoint < 1 ? (uint)(nbSamples * splitPoint) : nbSamples;
|
||||
uint nbTestSamples = splitPoint < 1 ? nbSamples - nbTrainSamples : nbSamples;
|
||||
nuint trainingSamplesSize =
|
||||
splitPoint < 1 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
|
||||
nuint testSamplesSize =
|
||||
splitPoint < 1
|
||||
? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples)
|
||||
: totalSamplesSize;
|
||||
if (
|
||||
totalSamplesSize < (d > sizeof(ulong) ? d : sizeof(ulong))
|
||||
|| totalSamplesSize >= (sizeof(nuint) == 8 ? unchecked((uint)-1) : 1 * (1U << 30))
|
||||
)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
if (nbTrainSamples < 5)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
if (nbTestSamples < 1)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
*ctx = new FASTCOVER_ctx_t
|
||||
{
|
||||
samples = samples,
|
||||
samplesSizes = samplesSizes,
|
||||
nbSamples = nbSamples,
|
||||
nbTrainSamples = nbTrainSamples,
|
||||
nbTestSamples = nbTestSamples,
|
||||
nbDmers = trainingSamplesSize - (d > sizeof(ulong) ? d : sizeof(ulong)) + 1,
|
||||
d = d,
|
||||
f = f,
|
||||
accelParams = accelParams,
|
||||
offsets = (nuint*)calloc(nbSamples + 1, (ulong)sizeof(nuint)),
|
||||
};
|
||||
if (ctx->offsets == null)
|
||||
{
|
||||
FASTCOVER_ctx_destroy(ctx);
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
|
||||
}
|
||||
|
||||
{
|
||||
uint i;
|
||||
ctx->offsets[0] = 0;
|
||||
assert(nbSamples >= 5);
|
||||
for (i = 1; i <= nbSamples; ++i)
|
||||
{
|
||||
ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
|
||||
}
|
||||
}
|
||||
|
||||
ctx->freqs = (uint*)calloc((ulong)1 << (int)f, sizeof(uint));
|
||||
if (ctx->freqs == null)
|
||||
{
|
||||
FASTCOVER_ctx_destroy(ctx);
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
|
||||
}
|
||||
|
||||
FASTCOVER_computeFrequency(ctx->freqs, ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given the prepared context build the dictionary.
|
||||
*/
|
||||
private static nuint FASTCOVER_buildDictionary(
|
||||
FASTCOVER_ctx_t* ctx,
|
||||
uint* freqs,
|
||||
void* dictBuffer,
|
||||
nuint dictBufferCapacity,
|
||||
ZDICT_cover_params_t parameters,
|
||||
ushort* segmentFreqs
|
||||
)
|
||||
{
|
||||
byte* dict = (byte*)dictBuffer;
|
||||
nuint tail = dictBufferCapacity;
|
||||
/* Divide the data into epochs. We will select one segment from each epoch. */
|
||||
COVER_epoch_info_t epochs = COVER_computeEpochs(
|
||||
(uint)dictBufferCapacity,
|
||||
(uint)ctx->nbDmers,
|
||||
parameters.k,
|
||||
1
|
||||
);
|
||||
const nuint maxZeroScoreRun = 10;
|
||||
nuint zeroScoreRun = 0;
|
||||
nuint epoch;
|
||||
for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num)
|
||||
{
|
||||
uint epochBegin = (uint)(epoch * epochs.size);
|
||||
uint epochEnd = epochBegin + epochs.size;
|
||||
nuint segmentSize;
|
||||
/* Select a segment */
|
||||
COVER_segment_t segment = FASTCOVER_selectSegment(
|
||||
ctx,
|
||||
freqs,
|
||||
epochBegin,
|
||||
epochEnd,
|
||||
parameters,
|
||||
segmentFreqs
|
||||
);
|
||||
if (segment.score == 0)
|
||||
{
|
||||
if (++zeroScoreRun >= maxZeroScoreRun)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
zeroScoreRun = 0;
|
||||
segmentSize =
|
||||
segment.end - segment.begin + parameters.d - 1 < tail
|
||||
? segment.end - segment.begin + parameters.d - 1
|
||||
: tail;
|
||||
if (segmentSize < parameters.d)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
tail -= segmentSize;
|
||||
memcpy(dict + tail, ctx->samples + segment.begin, (uint)segmentSize);
|
||||
}
|
||||
|
||||
return tail;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries a set of parameters and updates the COVER_best_t with the results.
|
||||
* This function is thread safe if zstd is compiled with multithreaded support.
|
||||
* It takes its parameters as an *OWNING* opaque pointer to support threading.
|
||||
*/
|
||||
private static void FASTCOVER_tryParameters(void* opaque)
|
||||
{
|
||||
/* Save parameters as local variables */
|
||||
FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)opaque;
|
||||
FASTCOVER_ctx_t* ctx = data->ctx;
|
||||
ZDICT_cover_params_t parameters = data->parameters;
|
||||
nuint dictBufferCapacity = data->dictBufferCapacity;
|
||||
nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
/* Initialize array to keep track of frequency of dmer within activeSegment */
|
||||
ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)ctx->f, sizeof(ushort));
|
||||
/* Allocate space for hash table, dict, and freqs */
|
||||
byte* dict = (byte*)malloc(dictBufferCapacity);
|
||||
COVER_dictSelection selection = COVER_dictSelectionError(
|
||||
unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC))
|
||||
);
|
||||
uint* freqs = (uint*)malloc(((ulong)1 << (int)ctx->f) * sizeof(uint));
|
||||
if (segmentFreqs == null || dict == null || freqs == null)
|
||||
{
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
memcpy(freqs, ctx->freqs, (uint)(((ulong)1 << (int)ctx->f) * sizeof(uint)));
|
||||
{
|
||||
nuint tail = FASTCOVER_buildDictionary(
|
||||
ctx,
|
||||
freqs,
|
||||
dict,
|
||||
dictBufferCapacity,
|
||||
parameters,
|
||||
segmentFreqs
|
||||
);
|
||||
uint nbFinalizeSamples = (uint)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);
|
||||
selection = COVER_selectDict(
|
||||
dict + tail,
|
||||
dictBufferCapacity,
|
||||
dictBufferCapacity - tail,
|
||||
ctx->samples,
|
||||
ctx->samplesSizes,
|
||||
nbFinalizeSamples,
|
||||
ctx->nbTrainSamples,
|
||||
ctx->nbSamples,
|
||||
parameters,
|
||||
ctx->offsets,
|
||||
totalCompressedSize
|
||||
);
|
||||
if (COVER_dictSelectionIsError(selection) != 0)
|
||||
{
|
||||
goto _cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
_cleanup:
|
||||
free(dict);
|
||||
COVER_best_finish(data->best, parameters, selection);
|
||||
free(data);
|
||||
free(segmentFreqs);
|
||||
COVER_dictSelectionFree(selection);
|
||||
free(freqs);
|
||||
}
|
||||
|
||||
private static void FASTCOVER_convertToCoverParams(
|
||||
ZDICT_fastCover_params_t fastCoverParams,
|
||||
ZDICT_cover_params_t* coverParams
|
||||
)
|
||||
{
|
||||
coverParams->k = fastCoverParams.k;
|
||||
coverParams->d = fastCoverParams.d;
|
||||
coverParams->steps = fastCoverParams.steps;
|
||||
coverParams->nbThreads = fastCoverParams.nbThreads;
|
||||
coverParams->splitPoint = fastCoverParams.splitPoint;
|
||||
coverParams->zParams = fastCoverParams.zParams;
|
||||
coverParams->shrinkDict = fastCoverParams.shrinkDict;
|
||||
}
|
||||
|
||||
private static void FASTCOVER_convertToFastCoverParams(
|
||||
ZDICT_cover_params_t coverParams,
|
||||
ZDICT_fastCover_params_t* fastCoverParams,
|
||||
uint f,
|
||||
uint accel
|
||||
)
|
||||
{
|
||||
fastCoverParams->k = coverParams.k;
|
||||
fastCoverParams->d = coverParams.d;
|
||||
fastCoverParams->steps = coverParams.steps;
|
||||
fastCoverParams->nbThreads = coverParams.nbThreads;
|
||||
fastCoverParams->splitPoint = coverParams.splitPoint;
|
||||
fastCoverParams->f = f;
|
||||
fastCoverParams->accel = accel;
|
||||
fastCoverParams->zParams = coverParams.zParams;
|
||||
fastCoverParams->shrinkDict = coverParams.shrinkDict;
|
||||
}
|
||||
|
||||
/*! ZDICT_trainFromBuffer_fastCover():
|
||||
* Train a dictionary from an array of samples using a modified version of COVER algorithm.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* d and k are required.
|
||||
* All other parameters are optional, will use default values if not provided
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
public static nuint ZDICT_trainFromBuffer_fastCover(
|
||||
void* dictBuffer,
|
||||
nuint dictBufferCapacity,
|
||||
void* samplesBuffer,
|
||||
nuint* samplesSizes,
|
||||
uint nbSamples,
|
||||
ZDICT_fastCover_params_t parameters
|
||||
)
|
||||
{
|
||||
byte* dict = (byte*)dictBuffer;
|
||||
FASTCOVER_ctx_t ctx;
|
||||
ZDICT_cover_params_t coverParams;
|
||||
FASTCOVER_accel_t accelParams;
|
||||
g_displayLevel = (int)parameters.zParams.notificationLevel;
|
||||
parameters.splitPoint = 1;
|
||||
parameters.f = parameters.f == 0 ? 20 : parameters.f;
|
||||
parameters.accel = parameters.accel == 0 ? 1 : parameters.accel;
|
||||
coverParams = new ZDICT_cover_params_t();
|
||||
FASTCOVER_convertToCoverParams(parameters, &coverParams);
|
||||
if (
|
||||
FASTCOVER_checkParameters(
|
||||
coverParams,
|
||||
dictBufferCapacity,
|
||||
parameters.f,
|
||||
parameters.accel
|
||||
) == 0
|
||||
)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
|
||||
}
|
||||
|
||||
if (nbSamples == 0)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
if (dictBufferCapacity < 256)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
}
|
||||
|
||||
accelParams = FASTCOVER_defaultAccelParameters[parameters.accel];
|
||||
{
|
||||
nuint initVal = FASTCOVER_ctx_init(
|
||||
&ctx,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbSamples,
|
||||
coverParams.d,
|
||||
parameters.splitPoint,
|
||||
parameters.f,
|
||||
accelParams
|
||||
);
|
||||
if (ERR_isError(initVal))
|
||||
{
|
||||
return initVal;
|
||||
}
|
||||
}
|
||||
|
||||
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel);
|
||||
{
|
||||
/* Initialize array to keep track of frequency of dmer within activeSegment */
|
||||
ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)parameters.f, sizeof(ushort));
|
||||
nuint tail = FASTCOVER_buildDictionary(
|
||||
&ctx,
|
||||
ctx.freqs,
|
||||
dictBuffer,
|
||||
dictBufferCapacity,
|
||||
coverParams,
|
||||
segmentFreqs
|
||||
);
|
||||
uint nbFinalizeSamples = (uint)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100);
|
||||
nuint dictionarySize = ZDICT_finalizeDictionary(
|
||||
dict,
|
||||
dictBufferCapacity,
|
||||
dict + tail,
|
||||
dictBufferCapacity - tail,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbFinalizeSamples,
|
||||
coverParams.zParams
|
||||
);
|
||||
if (!ERR_isError(dictionarySize)) { }
|
||||
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
free(segmentFreqs);
|
||||
return dictionarySize;
|
||||
}
|
||||
}
|
||||
|
||||
/*! ZDICT_optimizeTrainFromBuffer_fastCover():
|
||||
* The same requirements as above hold for all the parameters except `parameters`.
|
||||
* This function tries many parameter combinations (specifically, k and d combinations)
|
||||
* and picks the best parameters. `*parameters` is filled with the best parameters found,
|
||||
* dictionary constructed with those parameters is stored in `dictBuffer`.
|
||||
* All of the parameters d, k, steps, f, and accel are optional.
|
||||
* If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
|
||||
* if steps is zero it defaults to its default value.
|
||||
* If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
|
||||
* If f is zero, default value of 20 is used.
|
||||
* If accel is zero, default value of 1 is used.
|
||||
*
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* On success `*parameters` contains the parameters selected.
|
||||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.
|
||||
*/
|
||||
public static nuint ZDICT_optimizeTrainFromBuffer_fastCover(
|
||||
void* dictBuffer,
|
||||
nuint dictBufferCapacity,
|
||||
void* samplesBuffer,
|
||||
nuint* samplesSizes,
|
||||
uint nbSamples,
|
||||
ZDICT_fastCover_params_t* parameters
|
||||
)
|
||||
{
|
||||
ZDICT_cover_params_t coverParams;
|
||||
FASTCOVER_accel_t accelParams;
|
||||
/* constants */
|
||||
uint nbThreads = parameters->nbThreads;
|
||||
double splitPoint = parameters->splitPoint <= 0 ? 0.75 : parameters->splitPoint;
|
||||
uint kMinD = parameters->d == 0 ? 6 : parameters->d;
|
||||
uint kMaxD = parameters->d == 0 ? 8 : parameters->d;
|
||||
uint kMinK = parameters->k == 0 ? 50 : parameters->k;
|
||||
uint kMaxK = parameters->k == 0 ? 2000 : parameters->k;
|
||||
uint kSteps = parameters->steps == 0 ? 40 : parameters->steps;
|
||||
uint kStepSize = (kMaxK - kMinK) / kSteps > 1 ? (kMaxK - kMinK) / kSteps : 1;
|
||||
uint kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
|
||||
uint f = parameters->f == 0 ? 20 : parameters->f;
|
||||
uint accel = parameters->accel == 0 ? 1 : parameters->accel;
|
||||
const uint shrinkDict = 0;
|
||||
/* Local variables */
|
||||
int displayLevel = (int)parameters->zParams.notificationLevel;
|
||||
uint iteration = 1;
|
||||
uint d;
|
||||
uint k;
|
||||
COVER_best_s best;
|
||||
void* pool = null;
|
||||
int warned = 0;
|
||||
if (splitPoint <= 0 || splitPoint > 1)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
|
||||
}
|
||||
|
||||
if (accel == 0 || accel > 10)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
|
||||
}
|
||||
|
||||
if (kMinK < kMaxD || kMaxK < kMinK)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
|
||||
}
|
||||
|
||||
if (nbSamples == 0)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
|
||||
}
|
||||
|
||||
if (dictBufferCapacity < 256)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
}
|
||||
|
||||
if (nbThreads > 1)
|
||||
{
|
||||
pool = POOL_create(nbThreads, 1);
|
||||
if (pool == null)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
|
||||
}
|
||||
}
|
||||
|
||||
COVER_best_init(&best);
|
||||
coverParams = new ZDICT_cover_params_t();
|
||||
FASTCOVER_convertToCoverParams(*parameters, &coverParams);
|
||||
accelParams = FASTCOVER_defaultAccelParameters[accel];
|
||||
g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
|
||||
for (d = kMinD; d <= kMaxD; d += 2)
|
||||
{
|
||||
/* Initialize the context for this value of d */
|
||||
FASTCOVER_ctx_t ctx;
|
||||
{
|
||||
nuint initVal = FASTCOVER_ctx_init(
|
||||
&ctx,
|
||||
samplesBuffer,
|
||||
samplesSizes,
|
||||
nbSamples,
|
||||
d,
|
||||
splitPoint,
|
||||
f,
|
||||
accelParams
|
||||
);
|
||||
if (ERR_isError(initVal))
|
||||
{
|
||||
COVER_best_destroy(&best);
|
||||
POOL_free(pool);
|
||||
return initVal;
|
||||
}
|
||||
}
|
||||
|
||||
if (warned == 0)
|
||||
{
|
||||
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel);
|
||||
warned = 1;
|
||||
}
|
||||
|
||||
for (k = kMinK; k <= kMaxK; k += kStepSize)
|
||||
{
|
||||
/* Prepare the arguments */
|
||||
FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)malloc(
|
||||
(ulong)sizeof(FASTCOVER_tryParameters_data_s)
|
||||
);
|
||||
if (data == null)
|
||||
{
|
||||
COVER_best_destroy(&best);
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
POOL_free(pool);
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
|
||||
}
|
||||
|
||||
data->ctx = &ctx;
|
||||
data->best = &best;
|
||||
data->dictBufferCapacity = dictBufferCapacity;
|
||||
data->parameters = coverParams;
|
||||
data->parameters.k = k;
|
||||
data->parameters.d = d;
|
||||
data->parameters.splitPoint = splitPoint;
|
||||
data->parameters.steps = kSteps;
|
||||
data->parameters.shrinkDict = shrinkDict;
|
||||
data->parameters.zParams.notificationLevel = (uint)g_displayLevel;
|
||||
if (
|
||||
FASTCOVER_checkParameters(
|
||||
data->parameters,
|
||||
dictBufferCapacity,
|
||||
data->ctx->f,
|
||||
accel
|
||||
) == 0
|
||||
)
|
||||
{
|
||||
free(data);
|
||||
continue;
|
||||
}
|
||||
|
||||
COVER_best_start(&best);
|
||||
if (pool != null)
|
||||
{
|
||||
POOL_add(
|
||||
pool,
|
||||
(delegate* managed<void*, void>)(&FASTCOVER_tryParameters),
|
||||
data
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
FASTCOVER_tryParameters(data);
|
||||
}
|
||||
|
||||
++iteration;
|
||||
}
|
||||
|
||||
COVER_best_wait(&best);
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
}
|
||||
|
||||
{
|
||||
nuint dictSize = best.dictSize;
|
||||
if (ERR_isError(best.compressedSize))
|
||||
{
|
||||
nuint compressedSize = best.compressedSize;
|
||||
COVER_best_destroy(&best);
|
||||
POOL_free(pool);
|
||||
return compressedSize;
|
||||
}
|
||||
|
||||
FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel);
|
||||
memcpy(dictBuffer, best.dict, (uint)dictSize);
|
||||
COVER_best_destroy(&best);
|
||||
POOL_free(pool);
|
||||
return dictSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct Fingerprint
|
||||
{
|
||||
public fixed uint events[1024];
|
||||
public nuint nbEvents;
|
||||
}
|
||||
198
src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs
Normal file
198
src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs
Normal file
@@ -0,0 +1,198 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_initCState(FSE_CState_t* statePtr, uint* ct)
|
||||
{
|
||||
void* ptr = ct;
|
||||
ushort* u16ptr = (ushort*)ptr;
|
||||
uint tableLog = MEM_read16(ptr);
|
||||
statePtr->value = (nint)1 << (int)tableLog;
|
||||
statePtr->stateTable = u16ptr + 2;
|
||||
statePtr->symbolTT = ct + 1 + (tableLog != 0 ? 1 << (int)(tableLog - 1) : 1);
|
||||
statePtr->stateLog = tableLog;
|
||||
}
|
||||
|
||||
/*! FSE_initCState2() :
|
||||
* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
|
||||
* uses the smallest state value possible, saving the cost of this symbol */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_initCState2(ref FSE_CState_t statePtr, uint* ct, uint symbol)
|
||||
{
|
||||
FSE_initCState(ref statePtr, ct);
|
||||
{
|
||||
FSE_symbolCompressionTransform symbolTT = (
|
||||
(FSE_symbolCompressionTransform*)statePtr.symbolTT
|
||||
)[symbol];
|
||||
ushort* stateTable = (ushort*)statePtr.stateTable;
|
||||
uint nbBitsOut = symbolTT.deltaNbBits + (1 << 15) >> 16;
|
||||
statePtr.value = (nint)((nbBitsOut << 16) - symbolTT.deltaNbBits);
|
||||
statePtr.value = stateTable[
|
||||
(statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_encodeSymbol(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
ref FSE_CState_t statePtr,
|
||||
uint symbol
|
||||
)
|
||||
{
|
||||
FSE_symbolCompressionTransform symbolTT = (
|
||||
(FSE_symbolCompressionTransform*)statePtr.symbolTT
|
||||
)[symbol];
|
||||
ushort* stateTable = (ushort*)statePtr.stateTable;
|
||||
uint nbBitsOut = (uint)statePtr.value + symbolTT.deltaNbBits >> 16;
|
||||
BIT_addBits(ref bitC_bitContainer, ref bitC_bitPos, (nuint)statePtr.value, nbBitsOut);
|
||||
statePtr.value = stateTable[(statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState];
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_flushCState(
|
||||
ref nuint bitC_bitContainer,
|
||||
ref uint bitC_bitPos,
|
||||
ref sbyte* bitC_ptr,
|
||||
sbyte* bitC_endPtr,
|
||||
ref FSE_CState_t statePtr
|
||||
)
|
||||
{
|
||||
BIT_addBits(
|
||||
ref bitC_bitContainer,
|
||||
ref bitC_bitPos,
|
||||
(nuint)statePtr.value,
|
||||
statePtr.stateLog
|
||||
);
|
||||
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
|
||||
}
|
||||
|
||||
/* FSE_getMaxNbBits() :
|
||||
* Approximate maximum cost of a symbol, in bits.
|
||||
* Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
|
||||
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
|
||||
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint FSE_getMaxNbBits(void* symbolTTPtr, uint symbolValue)
|
||||
{
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)symbolTTPtr;
|
||||
return symbolTT[symbolValue].deltaNbBits + ((1 << 16) - 1) >> 16;
|
||||
}
|
||||
|
||||
/* FSE_bitCost() :
|
||||
* Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
|
||||
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
|
||||
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint FSE_bitCost(
|
||||
void* symbolTTPtr,
|
||||
uint tableLog,
|
||||
uint symbolValue,
|
||||
uint accuracyLog
|
||||
)
|
||||
{
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)symbolTTPtr;
|
||||
uint minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
|
||||
uint threshold = minNbBits + 1 << 16;
|
||||
assert(tableLog < 16);
|
||||
assert(accuracyLog < 31 - tableLog);
|
||||
{
|
||||
uint tableSize = (uint)(1 << (int)tableLog);
|
||||
uint deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
|
||||
/* linear interpolation (very approximate) */
|
||||
uint normalizedDeltaFromThreshold =
|
||||
deltaFromThreshold << (int)accuracyLog >> (int)tableLog;
|
||||
uint bitMultiplier = (uint)(1 << (int)accuracyLog);
|
||||
assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
|
||||
assert(normalizedDeltaFromThreshold <= bitMultiplier);
|
||||
return (minNbBits + 1) * bitMultiplier - normalizedDeltaFromThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_initDState(ref FSE_DState_t DStatePtr, ref BIT_DStream_t bitD, uint* dt)
|
||||
{
|
||||
void* ptr = dt;
|
||||
FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr;
|
||||
DStatePtr.state = BIT_readBits(bitD.bitContainer, ref bitD.bitsConsumed, DTableH->tableLog);
|
||||
BIT_reloadDStream(
|
||||
ref bitD.bitContainer,
|
||||
ref bitD.bitsConsumed,
|
||||
ref bitD.ptr,
|
||||
bitD.start,
|
||||
bitD.limitPtr
|
||||
);
|
||||
DStatePtr.table = dt + 1;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte FSE_peekSymbol(FSE_DState_t* DStatePtr)
|
||||
{
|
||||
FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr->table)[DStatePtr->state];
|
||||
return DInfo.symbol;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr->table)[DStatePtr->state];
|
||||
uint nbBits = DInfo.nbBits;
|
||||
nuint lowBits = BIT_readBits(bitD, nbBits);
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte FSE_decodeSymbol(
|
||||
ref FSE_DState_t DStatePtr,
|
||||
nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed
|
||||
)
|
||||
{
|
||||
FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state];
|
||||
uint nbBits = DInfo.nbBits;
|
||||
byte symbol = DInfo.symbol;
|
||||
nuint lowBits = BIT_readBits(bitD_bitContainer, ref bitD_bitsConsumed, nbBits);
|
||||
DStatePtr.state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
/*! FSE_decodeSymbolFast() :
|
||||
unsafe, only works if no symbol has a probability > 50% */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte FSE_decodeSymbolFast(
|
||||
ref FSE_DState_t DStatePtr,
|
||||
nuint bitD_bitContainer,
|
||||
ref uint bitD_bitsConsumed
|
||||
)
|
||||
{
|
||||
FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state];
|
||||
uint nbBits = DInfo.nbBits;
|
||||
byte symbol = DInfo.symbol;
|
||||
nuint lowBits = BIT_readBitsFast(bitD_bitContainer, ref bitD_bitsConsumed, nbBits);
|
||||
DStatePtr.state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint FSE_endOfDState(FSE_DState_t* DStatePtr)
|
||||
{
|
||||
return DStatePtr->state == 0 ? 1U : 0U;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_initCState(ref FSE_CState_t statePtr, uint* ct)
|
||||
{
|
||||
void* ptr = ct;
|
||||
ushort* u16ptr = (ushort*)ptr;
|
||||
uint tableLog = MEM_read16(ptr);
|
||||
statePtr.value = (nint)1 << (int)tableLog;
|
||||
statePtr.stateTable = u16ptr + 2;
|
||||
statePtr.symbolTT = ct + 1 + (tableLog != 0 ? 1 << (int)(tableLog - 1) : 1);
|
||||
statePtr.stateLog = tableLog;
|
||||
}
|
||||
}
|
||||
782
src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs
Normal file
782
src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs
Normal file
@@ -0,0 +1,782 @@
|
||||
using System;
|
||||
using System.Runtime.InteropServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
|
||||
* workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
|
||||
*/
|
||||
private static nuint FSE_buildCTable_wksp(
|
||||
uint* ct,
|
||||
short* normalizedCounter,
|
||||
uint maxSymbolValue,
|
||||
uint tableLog,
|
||||
void* workSpace,
|
||||
nuint wkspSize
|
||||
)
|
||||
{
|
||||
uint tableSize = (uint)(1 << (int)tableLog);
|
||||
uint tableMask = tableSize - 1;
|
||||
void* ptr = ct;
|
||||
ushort* tableU16 = (ushort*)ptr + 2;
|
||||
/* header */
|
||||
void* FSCT = (uint*)ptr + 1 + (tableLog != 0 ? tableSize >> 1 : 1);
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)FSCT;
|
||||
uint step = (tableSize >> 1) + (tableSize >> 3) + 3;
|
||||
uint maxSV1 = maxSymbolValue + 1;
|
||||
/* size = maxSV1 */
|
||||
ushort* cumul = (ushort*)workSpace;
|
||||
/* size = tableSize */
|
||||
byte* tableSymbol = (byte*)(cumul + (maxSV1 + 1));
|
||||
uint highThreshold = tableSize - 1;
|
||||
assert(((nuint)workSpace & 1) == 0);
|
||||
if (
|
||||
sizeof(uint)
|
||||
* ((maxSymbolValue + 2 + (1UL << (int)tableLog)) / 2 + sizeof(ulong) / sizeof(uint))
|
||||
> wkspSize
|
||||
)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
|
||||
tableU16[-2] = (ushort)tableLog;
|
||||
tableU16[-1] = (ushort)maxSymbolValue;
|
||||
assert(tableLog < 16);
|
||||
{
|
||||
uint u;
|
||||
cumul[0] = 0;
|
||||
for (u = 1; u <= maxSV1; u++)
|
||||
{
|
||||
if (normalizedCounter[u - 1] == -1)
|
||||
{
|
||||
cumul[u] = (ushort)(cumul[u - 1] + 1);
|
||||
tableSymbol[highThreshold--] = (byte)(u - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(normalizedCounter[u - 1] >= 0);
|
||||
cumul[u] = (ushort)(cumul[u - 1] + (ushort)normalizedCounter[u - 1]);
|
||||
assert(cumul[u] >= cumul[u - 1]);
|
||||
}
|
||||
}
|
||||
|
||||
cumul[maxSV1] = (ushort)(tableSize + 1);
|
||||
}
|
||||
|
||||
if (highThreshold == tableSize - 1)
|
||||
{
|
||||
/* size = tableSize + 8 (may write beyond tableSize) */
|
||||
byte* spread = tableSymbol + tableSize;
|
||||
{
|
||||
const ulong add = 0x0101010101010101UL;
|
||||
nuint pos = 0;
|
||||
ulong sv = 0;
|
||||
uint s;
|
||||
for (s = 0; s < maxSV1; ++s, sv += add)
|
||||
{
|
||||
int i;
|
||||
int n = normalizedCounter[s];
|
||||
MEM_write64(spread + pos, sv);
|
||||
for (i = 8; i < n; i += 8)
|
||||
{
|
||||
MEM_write64(spread + pos + i, sv);
|
||||
}
|
||||
|
||||
assert(n >= 0);
|
||||
pos += (nuint)n;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
nuint position = 0;
|
||||
nuint s;
|
||||
/* Experimentally determined optimal unroll */
|
||||
const nuint unroll = 2;
|
||||
assert(tableSize % unroll == 0);
|
||||
for (s = 0; s < tableSize; s += unroll)
|
||||
{
|
||||
nuint u;
|
||||
for (u = 0; u < unroll; ++u)
|
||||
{
|
||||
nuint uPosition = position + u * step & tableMask;
|
||||
tableSymbol[uPosition] = spread[s + u];
|
||||
}
|
||||
|
||||
position = position + unroll * step & tableMask;
|
||||
}
|
||||
|
||||
assert(position == 0);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
uint position = 0;
|
||||
uint symbol;
|
||||
for (symbol = 0; symbol < maxSV1; symbol++)
|
||||
{
|
||||
int nbOccurrences;
|
||||
int freq = normalizedCounter[symbol];
|
||||
for (nbOccurrences = 0; nbOccurrences < freq; nbOccurrences++)
|
||||
{
|
||||
tableSymbol[position] = (byte)symbol;
|
||||
position = position + step & tableMask;
|
||||
while (position > highThreshold)
|
||||
position = position + step & tableMask;
|
||||
}
|
||||
}
|
||||
|
||||
assert(position == 0);
|
||||
}
|
||||
|
||||
{
|
||||
uint u;
|
||||
for (u = 0; u < tableSize; u++)
|
||||
{
|
||||
/* note : static analyzer may not understand tableSymbol is properly initialized */
|
||||
byte s = tableSymbol[u];
|
||||
tableU16[cumul[s]++] = (ushort)(tableSize + u);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
uint total = 0;
|
||||
uint s;
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
switch (normalizedCounter[s])
|
||||
{
|
||||
case 0:
|
||||
symbolTT[s].deltaNbBits = (tableLog + 1 << 16) - (uint)(1 << (int)tableLog);
|
||||
break;
|
||||
case -1:
|
||||
case 1:
|
||||
symbolTT[s].deltaNbBits = (tableLog << 16) - (uint)(1 << (int)tableLog);
|
||||
assert(total <= 2147483647);
|
||||
symbolTT[s].deltaFindState = (int)(total - 1);
|
||||
total++;
|
||||
break;
|
||||
default:
|
||||
assert(normalizedCounter[s] > 1);
|
||||
|
||||
{
|
||||
uint maxBitsOut =
|
||||
tableLog - ZSTD_highbit32((uint)normalizedCounter[s] - 1);
|
||||
uint minStatePlus = (uint)normalizedCounter[s] << (int)maxBitsOut;
|
||||
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
|
||||
symbolTT[s].deltaFindState = (int)(total - (uint)normalizedCounter[s]);
|
||||
total += (uint)normalizedCounter[s];
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding
|
||||
****************************************************************/
|
||||
private static nuint FSE_NCountWriteBound(uint maxSymbolValue, uint tableLog)
|
||||
{
|
||||
nuint maxHeaderSize = ((maxSymbolValue + 1) * tableLog + 4 + 2) / 8 + 1 + 2;
|
||||
return maxSymbolValue != 0 ? maxHeaderSize : 512;
|
||||
}
|
||||
|
||||
private static nuint FSE_writeNCount_generic(
|
||||
void* header,
|
||||
nuint headerBufferSize,
|
||||
short* normalizedCounter,
|
||||
uint maxSymbolValue,
|
||||
uint tableLog,
|
||||
uint writeIsSafe
|
||||
)
|
||||
{
|
||||
byte* ostart = (byte*)header;
|
||||
byte* @out = ostart;
|
||||
byte* oend = ostart + headerBufferSize;
|
||||
int nbBits;
|
||||
int tableSize = 1 << (int)tableLog;
|
||||
int remaining;
|
||||
int threshold;
|
||||
uint bitStream = 0;
|
||||
int bitCount = 0;
|
||||
uint symbol = 0;
|
||||
uint alphabetSize = maxSymbolValue + 1;
|
||||
int previousIs0 = 0;
|
||||
bitStream += tableLog - 5 << bitCount;
|
||||
bitCount += 4;
|
||||
remaining = tableSize + 1;
|
||||
threshold = tableSize;
|
||||
nbBits = (int)tableLog + 1;
|
||||
while (symbol < alphabetSize && remaining > 1)
|
||||
{
|
||||
if (previousIs0 != 0)
|
||||
{
|
||||
uint start = symbol;
|
||||
while (symbol < alphabetSize && normalizedCounter[symbol] == 0)
|
||||
symbol++;
|
||||
if (symbol == alphabetSize)
|
||||
break;
|
||||
while (symbol >= start + 24)
|
||||
{
|
||||
start += 24;
|
||||
bitStream += 0xFFFFU << bitCount;
|
||||
if (writeIsSafe == 0 && @out > oend - 2)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
@out[0] = (byte)bitStream;
|
||||
@out[1] = (byte)(bitStream >> 8);
|
||||
@out += 2;
|
||||
bitStream >>= 16;
|
||||
}
|
||||
|
||||
while (symbol >= start + 3)
|
||||
{
|
||||
start += 3;
|
||||
bitStream += 3U << bitCount;
|
||||
bitCount += 2;
|
||||
}
|
||||
|
||||
bitStream += symbol - start << bitCount;
|
||||
bitCount += 2;
|
||||
if (bitCount > 16)
|
||||
{
|
||||
if (writeIsSafe == 0 && @out > oend - 2)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
@out[0] = (byte)bitStream;
|
||||
@out[1] = (byte)(bitStream >> 8);
|
||||
@out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
int count = normalizedCounter[symbol++];
|
||||
int max = 2 * threshold - 1 - remaining;
|
||||
remaining -= count < 0 ? -count : count;
|
||||
count++;
|
||||
if (count >= threshold)
|
||||
count += max;
|
||||
bitStream += (uint)count << bitCount;
|
||||
bitCount += nbBits;
|
||||
bitCount -= count < max ? 1 : 0;
|
||||
previousIs0 = count == 1 ? 1 : 0;
|
||||
if (remaining < 1)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
while (remaining < threshold)
|
||||
{
|
||||
nbBits--;
|
||||
threshold >>= 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (bitCount > 16)
|
||||
{
|
||||
if (writeIsSafe == 0 && @out > oend - 2)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
@out[0] = (byte)bitStream;
|
||||
@out[1] = (byte)(bitStream >> 8);
|
||||
@out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
}
|
||||
}
|
||||
|
||||
if (remaining != 1)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
assert(symbol <= alphabetSize);
|
||||
if (writeIsSafe == 0 && @out > oend - 2)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
@out[0] = (byte)bitStream;
|
||||
@out[1] = (byte)(bitStream >> 8);
|
||||
@out += (bitCount + 7) / 8;
|
||||
assert(@out >= ostart);
|
||||
return (nuint)(@out - ostart);
|
||||
}
|
||||
|
||||
/*! FSE_writeNCount():
|
||||
Compactly save 'normalizedCounter' into 'buffer'.
|
||||
@return : size of the compressed table,
|
||||
or an errorCode, which can be tested using FSE_isError(). */
|
||||
private static nuint FSE_writeNCount(
|
||||
void* buffer,
|
||||
nuint bufferSize,
|
||||
short* normalizedCounter,
|
||||
uint maxSymbolValue,
|
||||
uint tableLog
|
||||
)
|
||||
{
|
||||
if (tableLog > 14 - 2)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
|
||||
if (tableLog < 5)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
|
||||
return FSE_writeNCount_generic(
|
||||
buffer,
|
||||
bufferSize,
|
||||
normalizedCounter,
|
||||
maxSymbolValue,
|
||||
tableLog,
|
||||
0
|
||||
);
|
||||
return FSE_writeNCount_generic(
|
||||
buffer,
|
||||
bufferSize,
|
||||
normalizedCounter,
|
||||
maxSymbolValue,
|
||||
tableLog,
|
||||
1
|
||||
);
|
||||
}
|
||||
|
||||
/* provides the minimum logSize to safely represent a distribution */
|
||||
private static uint FSE_minTableLog(nuint srcSize, uint maxSymbolValue)
|
||||
{
|
||||
uint minBitsSrc = ZSTD_highbit32((uint)srcSize) + 1;
|
||||
uint minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2;
|
||||
uint minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
|
||||
assert(srcSize > 1);
|
||||
return minBits;
|
||||
}
|
||||
|
||||
/* *****************************************
|
||||
* FSE advanced API
|
||||
***************************************** */
|
||||
private static uint FSE_optimalTableLog_internal(
|
||||
uint maxTableLog,
|
||||
nuint srcSize,
|
||||
uint maxSymbolValue,
|
||||
uint minus
|
||||
)
|
||||
{
|
||||
uint maxBitsSrc = ZSTD_highbit32((uint)(srcSize - 1)) - minus;
|
||||
uint tableLog = maxTableLog;
|
||||
uint minBits = FSE_minTableLog(srcSize, maxSymbolValue);
|
||||
assert(srcSize > 1);
|
||||
if (tableLog == 0)
|
||||
tableLog = 13 - 2;
|
||||
if (maxBitsSrc < tableLog)
|
||||
tableLog = maxBitsSrc;
|
||||
if (minBits > tableLog)
|
||||
tableLog = minBits;
|
||||
if (tableLog < 5)
|
||||
tableLog = 5;
|
||||
if (tableLog > 14 - 2)
|
||||
tableLog = 14 - 2;
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
/*! FSE_optimalTableLog():
|
||||
dynamically downsize 'tableLog' when conditions are met.
|
||||
It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
|
||||
@return : recommended tableLog (necessarily <= 'maxTableLog') */
|
||||
private static uint FSE_optimalTableLog(uint maxTableLog, nuint srcSize, uint maxSymbolValue)
|
||||
{
|
||||
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
|
||||
}
|
||||
|
||||
/* Secondary normalization method.
|
||||
To be used when primary method fails. */
|
||||
private static nuint FSE_normalizeM2(
|
||||
short* norm,
|
||||
uint tableLog,
|
||||
uint* count,
|
||||
nuint total,
|
||||
uint maxSymbolValue,
|
||||
short lowProbCount
|
||||
)
|
||||
{
|
||||
const short NOT_YET_ASSIGNED = -2;
|
||||
uint s;
|
||||
uint distributed = 0;
|
||||
uint ToDistribute;
|
||||
/* Init */
|
||||
uint lowThreshold = (uint)(total >> (int)tableLog);
|
||||
uint lowOne = (uint)(total * 3 >> (int)(tableLog + 1));
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
if (count[s] == 0)
|
||||
{
|
||||
norm[s] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (count[s] <= lowThreshold)
|
||||
{
|
||||
norm[s] = lowProbCount;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
|
||||
if (count[s] <= lowOne)
|
||||
{
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
|
||||
norm[s] = NOT_YET_ASSIGNED;
|
||||
}
|
||||
|
||||
ToDistribute = (uint)(1 << (int)tableLog) - distributed;
|
||||
if (ToDistribute == 0)
|
||||
return 0;
|
||||
if (total / ToDistribute > lowOne)
|
||||
{
|
||||
lowOne = (uint)(total * 3 / (ToDistribute * 2));
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
if (norm[s] == NOT_YET_ASSIGNED && count[s] <= lowOne)
|
||||
{
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
ToDistribute = (uint)(1 << (int)tableLog) - distributed;
|
||||
}
|
||||
|
||||
if (distributed == maxSymbolValue + 1)
|
||||
{
|
||||
/* all values are pretty poor;
|
||||
probably incompressible data (should have already been detected);
|
||||
find max, then give all remaining points to max */
|
||||
uint maxV = 0,
|
||||
maxC = 0;
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
if (count[s] > maxC)
|
||||
{
|
||||
maxV = s;
|
||||
maxC = count[s];
|
||||
}
|
||||
|
||||
norm[maxV] += (short)ToDistribute;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (total == 0)
|
||||
{
|
||||
for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1))
|
||||
if (norm[s] > 0)
|
||||
{
|
||||
ToDistribute--;
|
||||
norm[s]++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
{
|
||||
ulong vStepLog = 62 - tableLog;
|
||||
ulong mid = (1UL << (int)(vStepLog - 1)) - 1;
|
||||
/* scale on remaining */
|
||||
ulong rStep = (((ulong)1 << (int)vStepLog) * ToDistribute + mid) / (uint)total;
|
||||
ulong tmpTotal = mid;
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
if (norm[s] == NOT_YET_ASSIGNED)
|
||||
{
|
||||
ulong end = tmpTotal + count[s] * rStep;
|
||||
uint sStart = (uint)(tmpTotal >> (int)vStepLog);
|
||||
uint sEnd = (uint)(end >> (int)vStepLog);
|
||||
uint weight = sEnd - sStart;
|
||||
if (weight < 1)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
norm[s] = (short)weight;
|
||||
tmpTotal = end;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if NET7_0_OR_GREATER
|
||||
private static ReadOnlySpan<uint> Span_rtbTable =>
|
||||
new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
|
||||
private static uint* rtbTable =>
|
||||
(uint*)
|
||||
System.Runtime.CompilerServices.Unsafe.AsPointer(
|
||||
ref MemoryMarshal.GetReference(Span_rtbTable)
|
||||
);
|
||||
#else
|
||||
|
||||
private static readonly uint* rtbTable = GetArrayPointer(
|
||||
new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }
|
||||
);
|
||||
#endif
|
||||
/*! FSE_normalizeCount():
|
||||
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
|
||||
'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
|
||||
useLowProbCount is a boolean parameter which trades off compressed size for
|
||||
faster header decoding. When it is set to 1, the compressed data will be slightly
|
||||
smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
|
||||
faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
|
||||
is a good default, since header deserialization makes a big speed difference.
|
||||
Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
|
||||
@return : tableLog,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
private static nuint FSE_normalizeCount(
|
||||
short* normalizedCounter,
|
||||
uint tableLog,
|
||||
uint* count,
|
||||
nuint total,
|
||||
uint maxSymbolValue,
|
||||
uint useLowProbCount
|
||||
)
|
||||
{
|
||||
if (tableLog == 0)
|
||||
tableLog = 13 - 2;
|
||||
if (tableLog < 5)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
if (tableLog > 14 - 2)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
|
||||
if (tableLog < FSE_minTableLog(total, maxSymbolValue))
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
{
|
||||
short lowProbCount = (short)(useLowProbCount != 0 ? -1 : 1);
|
||||
ulong scale = 62 - tableLog;
|
||||
/* <== here, one division ! */
|
||||
ulong step = ((ulong)1 << 62) / (uint)total;
|
||||
ulong vStep = 1UL << (int)(scale - 20);
|
||||
int stillToDistribute = 1 << (int)tableLog;
|
||||
uint s;
|
||||
uint largest = 0;
|
||||
short largestP = 0;
|
||||
uint lowThreshold = (uint)(total >> (int)tableLog);
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
if (count[s] == total)
|
||||
return 0;
|
||||
if (count[s] == 0)
|
||||
{
|
||||
normalizedCounter[s] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (count[s] <= lowThreshold)
|
||||
{
|
||||
normalizedCounter[s] = lowProbCount;
|
||||
stillToDistribute--;
|
||||
}
|
||||
else
|
||||
{
|
||||
short proba = (short)(count[s] * step >> (int)scale);
|
||||
if (proba < 8)
|
||||
{
|
||||
ulong restToBeat = vStep * rtbTable[proba];
|
||||
proba += (short)(
|
||||
count[s] * step - ((ulong)proba << (int)scale) > restToBeat ? 1 : 0
|
||||
);
|
||||
}
|
||||
|
||||
if (proba > largestP)
|
||||
{
|
||||
largestP = proba;
|
||||
largest = s;
|
||||
}
|
||||
|
||||
normalizedCounter[s] = proba;
|
||||
stillToDistribute -= proba;
|
||||
}
|
||||
}
|
||||
|
||||
if (-stillToDistribute >= normalizedCounter[largest] >> 1)
|
||||
{
|
||||
/* corner case, need another normalization method */
|
||||
nuint errorCode = FSE_normalizeM2(
|
||||
normalizedCounter,
|
||||
tableLog,
|
||||
count,
|
||||
total,
|
||||
maxSymbolValue,
|
||||
lowProbCount
|
||||
);
|
||||
if (ERR_isError(errorCode))
|
||||
return errorCode;
|
||||
}
|
||||
else
|
||||
normalizedCounter[largest] += (short)stillToDistribute;
|
||||
}
|
||||
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
/* fake FSE_CTable, for rle input (always same symbol) */
|
||||
private static nuint FSE_buildCTable_rle(uint* ct, byte symbolValue)
|
||||
{
|
||||
void* ptr = ct;
|
||||
ushort* tableU16 = (ushort*)ptr + 2;
|
||||
void* FSCTptr = (uint*)ptr + 2;
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)FSCTptr;
|
||||
tableU16[-2] = 0;
|
||||
tableU16[-1] = symbolValue;
|
||||
tableU16[0] = 0;
|
||||
tableU16[1] = 0;
|
||||
symbolTT[symbolValue].deltaNbBits = 0;
|
||||
symbolTT[symbolValue].deltaFindState = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
private static nuint FSE_compress_usingCTable_generic(
|
||||
void* dst,
|
||||
nuint dstSize,
|
||||
void* src,
|
||||
nuint srcSize,
|
||||
uint* ct,
|
||||
uint fast
|
||||
)
|
||||
{
|
||||
byte* istart = (byte*)src;
|
||||
byte* iend = istart + srcSize;
|
||||
byte* ip = iend;
|
||||
BIT_CStream_t bitC;
|
||||
System.Runtime.CompilerServices.Unsafe.SkipInit(out bitC);
|
||||
FSE_CState_t CState1,
|
||||
CState2;
|
||||
System.Runtime.CompilerServices.Unsafe.SkipInit(out CState1);
|
||||
System.Runtime.CompilerServices.Unsafe.SkipInit(out CState2);
|
||||
if (srcSize <= 2)
|
||||
return 0;
|
||||
{
|
||||
nuint initError = BIT_initCStream(ref bitC, dst, dstSize);
|
||||
if (ERR_isError(initError))
|
||||
return 0;
|
||||
}
|
||||
|
||||
nuint bitC_bitContainer = bitC.bitContainer;
|
||||
uint bitC_bitPos = bitC.bitPos;
|
||||
sbyte* bitC_ptr = bitC.ptr;
|
||||
sbyte* bitC_endPtr = bitC.endPtr;
|
||||
if ((srcSize & 1) != 0)
|
||||
{
|
||||
FSE_initCState2(ref CState1, ct, *--ip);
|
||||
FSE_initCState2(ref CState2, ct, *--ip);
|
||||
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip);
|
||||
if (fast != 0)
|
||||
BIT_flushBitsFast(
|
||||
ref bitC_bitContainer,
|
||||
ref bitC_bitPos,
|
||||
ref bitC_ptr,
|
||||
bitC_endPtr
|
||||
);
|
||||
else
|
||||
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
|
||||
}
|
||||
else
|
||||
{
|
||||
FSE_initCState2(ref CState2, ct, *--ip);
|
||||
FSE_initCState2(ref CState1, ct, *--ip);
|
||||
}
|
||||
|
||||
srcSize -= 2;
|
||||
if (sizeof(nuint) * 8 > (14 - 2) * 4 + 7 && (srcSize & 2) != 0)
|
||||
{
|
||||
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip);
|
||||
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip);
|
||||
if (fast != 0)
|
||||
BIT_flushBitsFast(
|
||||
ref bitC_bitContainer,
|
||||
ref bitC_bitPos,
|
||||
ref bitC_ptr,
|
||||
bitC_endPtr
|
||||
);
|
||||
else
|
||||
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
|
||||
}
|
||||
|
||||
while (ip > istart)
|
||||
{
|
||||
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip);
|
||||
if (sizeof(nuint) * 8 < (14 - 2) * 2 + 7)
|
||||
if (fast != 0)
|
||||
BIT_flushBitsFast(
|
||||
ref bitC_bitContainer,
|
||||
ref bitC_bitPos,
|
||||
ref bitC_ptr,
|
||||
bitC_endPtr
|
||||
);
|
||||
else
|
||||
BIT_flushBits(
|
||||
ref bitC_bitContainer,
|
||||
ref bitC_bitPos,
|
||||
ref bitC_ptr,
|
||||
bitC_endPtr
|
||||
);
|
||||
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip);
|
||||
if (sizeof(nuint) * 8 > (14 - 2) * 4 + 7)
|
||||
{
|
||||
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip);
|
||||
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip);
|
||||
}
|
||||
|
||||
if (fast != 0)
|
||||
BIT_flushBitsFast(
|
||||
ref bitC_bitContainer,
|
||||
ref bitC_bitPos,
|
||||
ref bitC_ptr,
|
||||
bitC_endPtr
|
||||
);
|
||||
else
|
||||
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
|
||||
}
|
||||
|
||||
FSE_flushCState(
|
||||
ref bitC_bitContainer,
|
||||
ref bitC_bitPos,
|
||||
ref bitC_ptr,
|
||||
bitC_endPtr,
|
||||
ref CState2
|
||||
);
|
||||
FSE_flushCState(
|
||||
ref bitC_bitContainer,
|
||||
ref bitC_bitPos,
|
||||
ref bitC_ptr,
|
||||
bitC_endPtr,
|
||||
ref CState1
|
||||
);
|
||||
return BIT_closeCStream(
|
||||
ref bitC_bitContainer,
|
||||
ref bitC_bitPos,
|
||||
bitC_ptr,
|
||||
bitC_endPtr,
|
||||
bitC.startPtr
|
||||
);
|
||||
}
|
||||
|
||||
/*! FSE_compress_usingCTable():
|
||||
Compress `src` using `ct` into `dst` which must be already allocated.
|
||||
@return : size of compressed data (<= `dstCapacity`),
|
||||
or 0 if compressed data could not fit into `dst`,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
private static nuint FSE_compress_usingCTable(
|
||||
void* dst,
|
||||
nuint dstSize,
|
||||
void* src,
|
||||
nuint srcSize,
|
||||
uint* ct
|
||||
)
|
||||
{
|
||||
uint fast = dstSize >= srcSize + (srcSize >> 7) + 4 + (nuint)sizeof(nuint) ? 1U : 0U;
|
||||
if (fast != 0)
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
|
||||
else
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
|
||||
}
|
||||
|
||||
/*-*****************************************
|
||||
* Tool functions
|
||||
******************************************/
|
||||
private static nuint FSE_compressBound(nuint size)
|
||||
{
|
||||
return 512 + (size + (size >> 7) + 4 + (nuint)sizeof(nuint));
|
||||
}
|
||||
}
|
||||
462
src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs
Normal file
462
src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs
Normal file
@@ -0,0 +1,462 @@
|
||||
using System.Runtime.CompilerServices;
|
||||
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
|
||||
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
private static nuint FSE_buildDTable_internal(
|
||||
uint* dt,
|
||||
short* normalizedCounter,
|
||||
uint maxSymbolValue,
|
||||
uint tableLog,
|
||||
void* workSpace,
|
||||
nuint wkspSize
|
||||
)
|
||||
{
|
||||
/* because *dt is unsigned, 32-bits aligned on 32-bits */
|
||||
void* tdPtr = dt + 1;
|
||||
FSE_decode_t* tableDecode = (FSE_decode_t*)tdPtr;
|
||||
ushort* symbolNext = (ushort*)workSpace;
|
||||
byte* spread = (byte*)(symbolNext + maxSymbolValue + 1);
|
||||
uint maxSV1 = maxSymbolValue + 1;
|
||||
uint tableSize = (uint)(1 << (int)tableLog);
|
||||
uint highThreshold = tableSize - 1;
|
||||
if (sizeof(short) * (maxSymbolValue + 1) + (1UL << (int)tableLog) + 8 > wkspSize)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge));
|
||||
if (maxSymbolValue > 255)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge));
|
||||
if (tableLog > 14 - 2)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
|
||||
{
|
||||
FSE_DTableHeader DTableH;
|
||||
DTableH.tableLog = (ushort)tableLog;
|
||||
DTableH.fastMode = 1;
|
||||
{
|
||||
short largeLimit = (short)(1 << (int)(tableLog - 1));
|
||||
uint s;
|
||||
for (s = 0; s < maxSV1; s++)
|
||||
{
|
||||
if (normalizedCounter[s] == -1)
|
||||
{
|
||||
tableDecode[highThreshold--].symbol = (byte)s;
|
||||
symbolNext[s] = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (normalizedCounter[s] >= largeLimit)
|
||||
DTableH.fastMode = 0;
|
||||
symbolNext[s] = (ushort)normalizedCounter[s];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(dt, &DTableH, (uint)sizeof(FSE_DTableHeader));
|
||||
}
|
||||
|
||||
if (highThreshold == tableSize - 1)
|
||||
{
|
||||
nuint tableMask = tableSize - 1;
|
||||
nuint step = (tableSize >> 1) + (tableSize >> 3) + 3;
|
||||
{
|
||||
const ulong add = 0x0101010101010101UL;
|
||||
nuint pos = 0;
|
||||
ulong sv = 0;
|
||||
uint s;
|
||||
for (s = 0; s < maxSV1; ++s, sv += add)
|
||||
{
|
||||
int i;
|
||||
int n = normalizedCounter[s];
|
||||
MEM_write64(spread + pos, sv);
|
||||
for (i = 8; i < n; i += 8)
|
||||
{
|
||||
MEM_write64(spread + pos + i, sv);
|
||||
}
|
||||
|
||||
pos += (nuint)n;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
nuint position = 0;
|
||||
nuint s;
|
||||
const nuint unroll = 2;
|
||||
assert(tableSize % unroll == 0);
|
||||
for (s = 0; s < tableSize; s += unroll)
|
||||
{
|
||||
nuint u;
|
||||
for (u = 0; u < unroll; ++u)
|
||||
{
|
||||
nuint uPosition = position + u * step & tableMask;
|
||||
tableDecode[uPosition].symbol = spread[s + u];
|
||||
}
|
||||
|
||||
position = position + unroll * step & tableMask;
|
||||
}
|
||||
|
||||
assert(position == 0);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
uint tableMask = tableSize - 1;
|
||||
uint step = (tableSize >> 1) + (tableSize >> 3) + 3;
|
||||
uint s,
|
||||
position = 0;
|
||||
for (s = 0; s < maxSV1; s++)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < normalizedCounter[s]; i++)
|
||||
{
|
||||
tableDecode[position].symbol = (byte)s;
|
||||
position = position + step & tableMask;
|
||||
while (position > highThreshold)
|
||||
position = position + step & tableMask;
|
||||
}
|
||||
}
|
||||
|
||||
if (position != 0)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
}
|
||||
|
||||
{
|
||||
uint u;
|
||||
for (u = 0; u < tableSize; u++)
|
||||
{
|
||||
byte symbol = tableDecode[u].symbol;
|
||||
uint nextState = symbolNext[symbol]++;
|
||||
tableDecode[u].nbBits = (byte)(tableLog - ZSTD_highbit32(nextState));
|
||||
tableDecode[u].newState = (ushort)(
|
||||
(nextState << tableDecode[u].nbBits) - tableSize
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
private static nuint FSE_buildDTable_wksp(
|
||||
uint* dt,
|
||||
short* normalizedCounter,
|
||||
uint maxSymbolValue,
|
||||
uint tableLog,
|
||||
void* workSpace,
|
||||
nuint wkspSize
|
||||
)
|
||||
{
|
||||
return FSE_buildDTable_internal(
|
||||
dt,
|
||||
normalizedCounter,
|
||||
maxSymbolValue,
|
||||
tableLog,
|
||||
workSpace,
|
||||
wkspSize
|
||||
);
|
||||
}
|
||||
|
||||
/*-*******************************************************
|
||||
* Decompression (Byte symbols)
|
||||
*********************************************************/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint FSE_decompress_usingDTable_generic(
|
||||
void* dst,
|
||||
nuint maxDstSize,
|
||||
void* cSrc,
|
||||
nuint cSrcSize,
|
||||
uint* dt,
|
||||
uint fast
|
||||
)
|
||||
{
|
||||
byte* ostart = (byte*)dst;
|
||||
byte* op = ostart;
|
||||
byte* omax = op + maxDstSize;
|
||||
byte* olimit = omax - 3;
|
||||
BIT_DStream_t bitD;
|
||||
System.Runtime.CompilerServices.Unsafe.SkipInit(out bitD);
|
||||
FSE_DState_t state1;
|
||||
System.Runtime.CompilerServices.Unsafe.SkipInit(out state1);
|
||||
FSE_DState_t state2;
|
||||
System.Runtime.CompilerServices.Unsafe.SkipInit(out state2);
|
||||
{
|
||||
/* Init */
|
||||
nuint _var_err__ = BIT_initDStream(ref bitD, cSrc, cSrcSize);
|
||||
if (ERR_isError(_var_err__))
|
||||
return _var_err__;
|
||||
}
|
||||
|
||||
FSE_initDState(ref state1, ref bitD, dt);
|
||||
FSE_initDState(ref state2, ref bitD, dt);
|
||||
nuint bitD_bitContainer = bitD.bitContainer;
|
||||
uint bitD_bitsConsumed = bitD.bitsConsumed;
|
||||
sbyte* bitD_ptr = bitD.ptr;
|
||||
sbyte* bitD_start = bitD.start;
|
||||
sbyte* bitD_limitPtr = bitD.limitPtr;
|
||||
if (
|
||||
BIT_reloadDStream(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start,
|
||||
bitD_limitPtr
|
||||
) == BIT_DStream_status.BIT_DStream_overflow
|
||||
)
|
||||
{
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
|
||||
}
|
||||
|
||||
for (
|
||||
;
|
||||
BIT_reloadDStream(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start,
|
||||
bitD_limitPtr
|
||||
) == BIT_DStream_status.BIT_DStream_unfinished
|
||||
&& op < olimit;
|
||||
op += 4
|
||||
)
|
||||
{
|
||||
op[0] =
|
||||
fast != 0
|
||||
? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed)
|
||||
: FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed);
|
||||
if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8)
|
||||
BIT_reloadDStream(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start,
|
||||
bitD_limitPtr
|
||||
);
|
||||
op[1] =
|
||||
fast != 0
|
||||
? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed)
|
||||
: FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed);
|
||||
if ((14 - 2) * 4 + 7 > sizeof(nuint) * 8)
|
||||
{
|
||||
if (
|
||||
BIT_reloadDStream(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start,
|
||||
bitD_limitPtr
|
||||
) > BIT_DStream_status.BIT_DStream_unfinished
|
||||
)
|
||||
{
|
||||
op += 2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
op[2] =
|
||||
fast != 0
|
||||
? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed)
|
||||
: FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed);
|
||||
if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8)
|
||||
BIT_reloadDStream(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start,
|
||||
bitD_limitPtr
|
||||
);
|
||||
op[3] =
|
||||
fast != 0
|
||||
? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed)
|
||||
: FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed);
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (op > omax - 2)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
*op++ =
|
||||
fast != 0
|
||||
? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed)
|
||||
: FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed);
|
||||
if (
|
||||
BIT_reloadDStream(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start,
|
||||
bitD_limitPtr
|
||||
) == BIT_DStream_status.BIT_DStream_overflow
|
||||
)
|
||||
{
|
||||
*op++ =
|
||||
fast != 0
|
||||
? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed)
|
||||
: FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed);
|
||||
break;
|
||||
}
|
||||
|
||||
if (op > omax - 2)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
|
||||
*op++ =
|
||||
fast != 0
|
||||
? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed)
|
||||
: FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed);
|
||||
if (
|
||||
BIT_reloadDStream(
|
||||
ref bitD_bitContainer,
|
||||
ref bitD_bitsConsumed,
|
||||
ref bitD_ptr,
|
||||
bitD_start,
|
||||
bitD_limitPtr
|
||||
) == BIT_DStream_status.BIT_DStream_overflow
|
||||
)
|
||||
{
|
||||
*op++ =
|
||||
fast != 0
|
||||
? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed)
|
||||
: FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert(op >= ostart);
|
||||
return (nuint)(op - ostart);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint FSE_decompress_wksp_body(
|
||||
void* dst,
|
||||
nuint dstCapacity,
|
||||
void* cSrc,
|
||||
nuint cSrcSize,
|
||||
uint maxLog,
|
||||
void* workSpace,
|
||||
nuint wkspSize,
|
||||
int bmi2
|
||||
)
|
||||
{
|
||||
byte* istart = (byte*)cSrc;
|
||||
byte* ip = istart;
|
||||
uint tableLog;
|
||||
uint maxSymbolValue = 255;
|
||||
FSE_DecompressWksp* wksp = (FSE_DecompressWksp*)workSpace;
|
||||
nuint dtablePos = (nuint)(sizeof(FSE_DecompressWksp) / sizeof(uint));
|
||||
uint* dtable = (uint*)workSpace + dtablePos;
|
||||
if (wkspSize < (nuint)sizeof(FSE_DecompressWksp))
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
|
||||
{
|
||||
nuint NCountLength = FSE_readNCount_bmi2(
|
||||
wksp->ncount,
|
||||
&maxSymbolValue,
|
||||
&tableLog,
|
||||
istart,
|
||||
cSrcSize,
|
||||
bmi2
|
||||
);
|
||||
if (ERR_isError(NCountLength))
|
||||
return NCountLength;
|
||||
if (tableLog > maxLog)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
|
||||
assert(NCountLength <= cSrcSize);
|
||||
ip += NCountLength;
|
||||
cSrcSize -= NCountLength;
|
||||
}
|
||||
|
||||
if (
|
||||
(
|
||||
(ulong)(1 + (1 << (int)tableLog) + 1)
|
||||
+ (
|
||||
sizeof(short) * (maxSymbolValue + 1)
|
||||
+ (1UL << (int)tableLog)
|
||||
+ 8
|
||||
+ sizeof(uint)
|
||||
- 1
|
||||
) / sizeof(uint)
|
||||
+ (255 + 1) / 2
|
||||
+ 1
|
||||
) * sizeof(uint)
|
||||
> wkspSize
|
||||
)
|
||||
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
|
||||
assert(
|
||||
(nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint))
|
||||
<= wkspSize
|
||||
);
|
||||
workSpace =
|
||||
(byte*)workSpace
|
||||
+ sizeof(FSE_DecompressWksp)
|
||||
+ (1 + (1 << (int)tableLog)) * sizeof(uint);
|
||||
wkspSize -= (nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint));
|
||||
{
|
||||
nuint _var_err__ = FSE_buildDTable_internal(
|
||||
dtable,
|
||||
wksp->ncount,
|
||||
maxSymbolValue,
|
||||
tableLog,
|
||||
workSpace,
|
||||
wkspSize
|
||||
);
|
||||
if (ERR_isError(_var_err__))
|
||||
return _var_err__;
|
||||
}
|
||||
|
||||
{
|
||||
void* ptr = dtable;
|
||||
FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr;
|
||||
uint fastMode = DTableH->fastMode;
|
||||
if (fastMode != 0)
|
||||
return FSE_decompress_usingDTable_generic(
|
||||
dst,
|
||||
dstCapacity,
|
||||
ip,
|
||||
cSrcSize,
|
||||
dtable,
|
||||
1
|
||||
);
|
||||
return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
private static nuint FSE_decompress_wksp_body_default(
|
||||
void* dst,
|
||||
nuint dstCapacity,
|
||||
void* cSrc,
|
||||
nuint cSrcSize,
|
||||
uint maxLog,
|
||||
void* workSpace,
|
||||
nuint wkspSize
|
||||
)
|
||||
{
|
||||
return FSE_decompress_wksp_body(
|
||||
dst,
|
||||
dstCapacity,
|
||||
cSrc,
|
||||
cSrcSize,
|
||||
maxLog,
|
||||
workSpace,
|
||||
wkspSize,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
private static nuint FSE_decompress_wksp_bmi2(
|
||||
void* dst,
|
||||
nuint dstCapacity,
|
||||
void* cSrc,
|
||||
nuint cSrcSize,
|
||||
uint maxLog,
|
||||
void* workSpace,
|
||||
nuint wkspSize,
|
||||
int bmi2
|
||||
)
|
||||
{
|
||||
return FSE_decompress_wksp_body_default(
|
||||
dst,
|
||||
dstCapacity,
|
||||
cSrc,
|
||||
cSrcSize,
|
||||
maxLog,
|
||||
workSpace,
|
||||
wkspSize
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public enum HIST_checkInput_e
|
||||
{
|
||||
trustInput,
|
||||
checkMaxSymbolValue,
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct HUF_CStream_t
|
||||
{
|
||||
public _bitContainer_e__FixedBuffer bitContainer;
|
||||
public _bitPos_e__FixedBuffer bitPos;
|
||||
public byte* startPtr;
|
||||
public byte* ptr;
|
||||
public byte* endPtr;
|
||||
|
||||
public unsafe struct _bitContainer_e__FixedBuffer
|
||||
{
|
||||
public nuint e0;
|
||||
public nuint e1;
|
||||
}
|
||||
|
||||
public unsafe struct _bitPos_e__FixedBuffer
|
||||
{
|
||||
public nuint e0;
|
||||
public nuint e1;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct HUF_CTableHeader
|
||||
{
|
||||
public byte tableLog;
|
||||
public byte maxSymbolValue;
|
||||
public fixed byte unused[6];
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct HUF_CompressWeightsWksp
|
||||
{
|
||||
public fixed uint CTable[59];
|
||||
public fixed uint scratchBuffer[41];
|
||||
public fixed uint count[13];
|
||||
public fixed short norm[13];
|
||||
}
|
||||
11
src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs
Normal file
11
src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs
Normal file
@@ -0,0 +1,11 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/*-***************************/
|
||||
/* single-symbol decoding */
|
||||
/*-***************************/
|
||||
public struct HUF_DEltX1
|
||||
{
|
||||
/* single-symbol decoding */
|
||||
public byte nbBits;
|
||||
public byte @byte;
|
||||
}
|
||||
12
src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs
Normal file
12
src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs
Normal file
@@ -0,0 +1,12 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/* *************************/
|
||||
/* double-symbols decoding */
|
||||
/* *************************/
|
||||
public struct HUF_DEltX2
|
||||
{
|
||||
/* double-symbols decoding */
|
||||
public ushort sequence;
|
||||
public byte nbBits;
|
||||
public byte length;
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
/**
|
||||
* The input/output arguments to the Huffman fast decoding loop:
|
||||
*
|
||||
* ip [in/out] - The input pointers, must be updated to reflect what is consumed.
|
||||
* op [in/out] - The output pointers, must be updated to reflect what is written.
|
||||
* bits [in/out] - The bitstream containers, must be updated to reflect the current state.
|
||||
* dt [in] - The decoding table.
|
||||
* ilowest [in] - The beginning of the valid range of the input. Decoders may read
|
||||
* down to this pointer. It may be below iend[0].
|
||||
* oend [in] - The end of the output stream. op[3] must not cross oend.
|
||||
* iend [in] - The end of each input stream. ip[i] may cross iend[i],
|
||||
* as long as it is above ilowest, but that indicates corruption.
|
||||
*/
|
||||
public unsafe struct HUF_DecompressFastArgs
|
||||
{
|
||||
public _ip_e__FixedBuffer ip;
|
||||
public _op_e__FixedBuffer op;
|
||||
public fixed ulong bits[4];
|
||||
public void* dt;
|
||||
public byte* ilowest;
|
||||
public byte* oend;
|
||||
public _iend_e__FixedBuffer iend;
|
||||
|
||||
public unsafe struct _ip_e__FixedBuffer
|
||||
{
|
||||
public byte* e0;
|
||||
public byte* e1;
|
||||
public byte* e2;
|
||||
public byte* e3;
|
||||
}
|
||||
|
||||
public unsafe struct _op_e__FixedBuffer
|
||||
{
|
||||
public byte* e0;
|
||||
public byte* e1;
|
||||
public byte* e2;
|
||||
public byte* e3;
|
||||
}
|
||||
|
||||
public unsafe struct _iend_e__FixedBuffer
|
||||
{
|
||||
public byte* e0;
|
||||
public byte* e1;
|
||||
public byte* e2;
|
||||
public byte* e3;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
namespace SharpCompress.Compressors.ZStandard.Unsafe;
|
||||
|
||||
public unsafe struct HUF_ReadDTableX1_Workspace
|
||||
{
|
||||
public fixed uint rankVal[13];
|
||||
public fixed uint rankStart[13];
|
||||
public fixed uint statsWksp[219];
|
||||
public fixed byte symbols[256];
|
||||
public fixed byte huffWeight[256];
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user