mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-05 13:34:59 +00:00
Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
862fa50fcb | ||
|
|
7b87924172 | ||
|
|
d9d7ea8ec5 | ||
|
|
8d17d09455 | ||
|
|
05208ccd9b | ||
|
|
a1e7c0068d | ||
|
|
e6bec19946 | ||
|
|
ec2be2869f | ||
|
|
ce5432ed73 | ||
|
|
b6e0ad89ce | ||
|
|
2745bfa19b | ||
|
|
3cdc4b38a6 | ||
|
|
fc1ca808d7 | ||
|
|
6983e66037 | ||
|
|
01f7336d09 | ||
|
|
1561bba538 | ||
|
|
3ecf8a5e0c | ||
|
|
e2095fc416 | ||
|
|
8398d40106 | ||
|
|
134fa8892f | ||
|
|
ea5c8dc063 | ||
|
|
0209d00164 | ||
|
|
a8d065dc9e | ||
|
|
7bd9711ade | ||
|
|
61802eadb4 | ||
|
|
b425659058 | ||
|
|
3e32e3d7b1 | ||
|
|
1b661c9df1 | ||
|
|
54fc26b93d |
6
.github/workflows/dotnetcore.yml
vendored
6
.github/workflows/dotnetcore.yml
vendored
@@ -12,13 +12,9 @@ jobs:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-dotnet@v1
|
||||
with:
|
||||
dotnet-version: 5.0.101
|
||||
dotnet-version: 5.0.401
|
||||
- run: dotnet run -p build/build.csproj
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ matrix.os }}-sharpcompress.nupkg
|
||||
path: artifacts/*
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ matrix.os }}-sharpcompress.snupkg
|
||||
path: artifacts/*
|
||||
|
||||
@@ -49,20 +49,20 @@ class Program
|
||||
Target(Build, DependsOn(Format),
|
||||
framework =>
|
||||
{
|
||||
if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && framework == "net46")
|
||||
{
|
||||
return;
|
||||
}
|
||||
Run("dotnet", "build src/SharpCompress/SharpCompress.csproj -c Release");
|
||||
});
|
||||
|
||||
Target(Test, DependsOn(Build), ForEach("net5.0"),
|
||||
Target(Test, DependsOn(Build), ForEach("net5.0", "net461"),
|
||||
framework =>
|
||||
{
|
||||
IEnumerable<string> GetFiles(string d)
|
||||
{
|
||||
return Glob.Files(".", d);
|
||||
}
|
||||
if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && framework == "net461")
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
foreach (var file in GetFiles("**/*.Test.csproj"))
|
||||
{
|
||||
|
||||
@@ -6,9 +6,9 @@
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Bullseye" Version="3.6.0" />
|
||||
<PackageReference Include="Bullseye" Version="3.8.0" />
|
||||
<PackageReference Include="Glob" Version="1.1.8" />
|
||||
<PackageReference Include="SimpleExec" Version="6.4.0" />
|
||||
<PackageReference Include="SimpleExec" Version="8.0.0" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"sdk": {
|
||||
"version": "5.0.101"
|
||||
"version": "5.0.300",
|
||||
"rollForward": "latestFeature"
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1 && !NETFRAMEWORK
|
||||
using System.Runtime.Intrinsics;
|
||||
using System.Runtime.Intrinsics.X86;
|
||||
#endif
|
||||
@@ -22,7 +22,7 @@ namespace SharpCompress.Algorithms
|
||||
/// </summary>
|
||||
public const uint SeedValue = 1U;
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1 && !NETFRAMEWORK
|
||||
private const int MinBufferSize = 64;
|
||||
#endif
|
||||
|
||||
@@ -51,12 +51,7 @@ namespace SharpCompress.Algorithms
|
||||
/// <returns>The <see cref="uint"/>.</returns>
|
||||
public static uint Calculate(uint adler, ReadOnlySpan<byte> buffer)
|
||||
{
|
||||
if (buffer.IsEmpty)
|
||||
{
|
||||
return SeedValue;
|
||||
}
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1 && !NETFRAMEWORK
|
||||
if (Sse3.IsSupported && buffer.Length >= MinBufferSize)
|
||||
{
|
||||
return CalculateSse(adler, buffer);
|
||||
@@ -69,7 +64,7 @@ namespace SharpCompress.Algorithms
|
||||
}
|
||||
|
||||
// Based on https://github.com/chromium/chromium/blob/master/third_party/zlib/adler32_simd.c
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1 && !NETFRAMEWORK
|
||||
private static unsafe uint CalculateSse(uint adler, ReadOnlySpan<byte> buffer)
|
||||
{
|
||||
uint s1 = adler & 0xFFFF;
|
||||
@@ -282,4 +277,4 @@ namespace SharpCompress.Algorithms
|
||||
return (s2 << 16) | s1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Archives.Dmg;
|
||||
using SharpCompress.Archives.GZip;
|
||||
using SharpCompress.Archives.Rar;
|
||||
using SharpCompress.Archives.SevenZip;
|
||||
@@ -45,12 +44,6 @@ namespace SharpCompress.Archives
|
||||
return GZipArchive.Open(stream, readerOptions);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (DmgArchive.IsDmgFile(stream))
|
||||
{
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
return DmgArchive.Open(stream, readerOptions);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (RarArchive.IsRarFile(stream, readerOptions))
|
||||
{
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
@@ -62,7 +55,7 @@ namespace SharpCompress.Archives
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
return TarArchive.Open(stream, readerOptions);
|
||||
}
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, LZip, Dmg");
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, LZip");
|
||||
}
|
||||
|
||||
public static IWritableArchive Create(ArchiveType type)
|
||||
@@ -113,12 +106,6 @@ namespace SharpCompress.Archives
|
||||
return GZipArchive.Open(fileInfo, options);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (DmgArchive.IsDmgFile(stream))
|
||||
{
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
return DmgArchive.Open(fileInfo, options);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (RarArchive.IsRarFile(stream, options))
|
||||
{
|
||||
return RarArchive.Open(fileInfo, options);
|
||||
@@ -128,7 +115,7 @@ namespace SharpCompress.Archives
|
||||
{
|
||||
return TarArchive.Open(fileInfo, options);
|
||||
}
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, Dmg");
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
|
||||
@@ -1,117 +0,0 @@
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Dmg;
|
||||
using SharpCompress.Common.Dmg.Headers;
|
||||
using SharpCompress.Common.Dmg.HFS;
|
||||
using SharpCompress.Readers;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
|
||||
namespace SharpCompress.Archives.Dmg
|
||||
{
|
||||
public class DmgArchive : AbstractArchive<DmgArchiveEntry, DmgVolume>
|
||||
{
|
||||
private readonly string _fileName;
|
||||
|
||||
internal DmgArchive(FileInfo fileInfo, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Dmg, fileInfo, readerOptions)
|
||||
{
|
||||
_fileName = fileInfo.FullName;
|
||||
}
|
||||
|
||||
internal DmgArchive(Stream stream, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Dmg, stream.AsEnumerable(), readerOptions)
|
||||
{
|
||||
_fileName = string.Empty;
|
||||
}
|
||||
|
||||
protected override IReader CreateReaderForSolidExtraction()
|
||||
=> new DmgReader(ReaderOptions, this, _fileName);
|
||||
|
||||
protected override IEnumerable<DmgArchiveEntry> LoadEntries(IEnumerable<DmgVolume> volumes)
|
||||
=> volumes.Single().LoadEntries();
|
||||
|
||||
protected override IEnumerable<DmgVolume> LoadVolumes(FileInfo file)
|
||||
=> new DmgVolume(this, file.OpenRead(), file.FullName, ReaderOptions).AsEnumerable();
|
||||
|
||||
protected override IEnumerable<DmgVolume> LoadVolumes(IEnumerable<Stream> streams)
|
||||
=> new DmgVolume(this, streams.Single(), string.Empty, ReaderOptions).AsEnumerable();
|
||||
|
||||
public static bool IsDmgFile(FileInfo fileInfo)
|
||||
{
|
||||
if (!fileInfo.Exists) return false;
|
||||
|
||||
using var stream = fileInfo.OpenRead();
|
||||
return IsDmgFile(stream);
|
||||
}
|
||||
|
||||
public static bool IsDmgFile(Stream stream)
|
||||
{
|
||||
long headerPos = stream.Length - DmgHeader.HeaderSize;
|
||||
if (headerPos < 0) return false;
|
||||
stream.Position = headerPos;
|
||||
|
||||
return DmgHeader.TryRead(stream, out _);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="filePath"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static DmgArchive Open(string filePath, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static DmgArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
return new DmgArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
/// <param name="stream"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static DmgArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
return new DmgArchive(stream, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
private sealed class DmgReader : AbstractReader<DmgEntry, DmgVolume>
|
||||
{
|
||||
private readonly DmgArchive _archive;
|
||||
private readonly string _fileName;
|
||||
private readonly Stream? _partitionStream;
|
||||
|
||||
public override DmgVolume Volume { get; }
|
||||
|
||||
internal DmgReader(ReaderOptions readerOptions, DmgArchive archive, string fileName)
|
||||
: base(readerOptions, ArchiveType.Dmg)
|
||||
{
|
||||
_archive = archive;
|
||||
_fileName = fileName;
|
||||
Volume = archive.Volumes.Single();
|
||||
|
||||
using var compressedStream = DmgUtil.LoadHFSPartitionStream(Volume.Stream, Volume.Header);
|
||||
_partitionStream = compressedStream?.Decompress();
|
||||
}
|
||||
|
||||
protected override IEnumerable<DmgEntry> GetEntries(Stream stream)
|
||||
{
|
||||
if (_partitionStream is null) return Array.Empty<DmgArchiveEntry>();
|
||||
else return HFSUtil.LoadEntriesFromPartition(_partitionStream, _fileName, _archive);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
using SharpCompress.Common.Dmg;
|
||||
using SharpCompress.Common.Dmg.HFS;
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Archives.Dmg
|
||||
{
|
||||
public sealed class DmgArchiveEntry : DmgEntry, IArchiveEntry
|
||||
{
|
||||
private readonly Stream? _stream;
|
||||
|
||||
public bool IsComplete { get; } = true;
|
||||
|
||||
public IArchive Archive { get; }
|
||||
|
||||
internal DmgArchiveEntry(Stream? stream, DmgArchive archive, HFSCatalogRecord record, string path, DmgFilePart part)
|
||||
: base(record, path, stream?.Length ?? 0, part)
|
||||
{
|
||||
_stream = stream;
|
||||
Archive = archive;
|
||||
}
|
||||
|
||||
public Stream OpenEntryStream()
|
||||
{
|
||||
if (IsDirectory)
|
||||
throw new NotSupportedException("Directories cannot be opened as stream");
|
||||
|
||||
_stream!.Position = 0;
|
||||
return _stream;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,7 @@ using SharpCompress.Readers.Rar;
|
||||
|
||||
namespace SharpCompress.Archives.Rar
|
||||
{
|
||||
public class
|
||||
public class
|
||||
RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
{
|
||||
internal Lazy<IRarUnpack> UnpackV2017 { get; } = new Lazy<IRarUnpack>(() => new SharpCompress.Compressors.Rar.UnpackV2017.Unpack());
|
||||
|
||||
@@ -5,7 +5,7 @@ using System.Runtime.CompilerServices;
|
||||
[assembly: AssemblyTitle("SharpCompress")]
|
||||
[assembly: AssemblyProduct("SharpCompress")]
|
||||
[assembly: InternalsVisibleTo("SharpCompress.Test" + SharpCompress.AssemblyInfo.PublicKeySuffix)]
|
||||
[assembly: CLSCompliant(true)]
|
||||
[assembly: CLSCompliant(false)]
|
||||
|
||||
namespace SharpCompress
|
||||
{
|
||||
|
||||
@@ -36,7 +36,7 @@ namespace SharpCompress.Common
|
||||
Password = password;
|
||||
}
|
||||
|
||||
#if !NET461
|
||||
#if !NETFRAMEWORK
|
||||
static ArchiveEncoding()
|
||||
{
|
||||
Encoding.RegisterProvider(CodePagesEncodingProvider.Instance);
|
||||
|
||||
@@ -8,10 +8,5 @@ namespace SharpCompress.Common
|
||||
: base(message)
|
||||
{
|
||||
}
|
||||
|
||||
public ArchiveException(string message, Exception inner)
|
||||
: base(message, inner)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,6 @@
|
||||
Tar,
|
||||
SevenZip,
|
||||
GZip,
|
||||
Dmg
|
||||
ZStandard
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@
|
||||
LZip,
|
||||
Xz,
|
||||
Unknown,
|
||||
Deflate64
|
||||
Deflate64,
|
||||
ZStandard,
|
||||
}
|
||||
}
|
||||
@@ -1,323 +0,0 @@
|
||||
using SharpCompress.Common.Dmg.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.ADC;
|
||||
using SharpCompress.Compressors.BZip2;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.IO;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
internal sealed class DmgBlockDataStream : Stream
|
||||
{
|
||||
private readonly Stream _baseStream;
|
||||
private readonly DmgHeader _header;
|
||||
private readonly BlkxTable _table;
|
||||
private long _position;
|
||||
private bool _isEnded;
|
||||
private int _chunkIndex;
|
||||
private Stream? _chunkStream;
|
||||
private long _chunkPos;
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanWrite => false;
|
||||
public override bool CanSeek => true;
|
||||
public override long Length { get; }
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _position;
|
||||
set
|
||||
{
|
||||
if ((value < 0) || (value > Length)) throw new ArgumentOutOfRangeException(nameof(value));
|
||||
|
||||
if (value == Length)
|
||||
{
|
||||
// End of the stream
|
||||
|
||||
_position = Length;
|
||||
_isEnded = true;
|
||||
_chunkIndex = -1;
|
||||
_chunkStream = null;
|
||||
}
|
||||
else if (value != _position)
|
||||
{
|
||||
_position = value;
|
||||
|
||||
// We can only seek over entire chunks at a time because some chunks may be compressed.
|
||||
// So we first find the chunk that we are now in, then we read to the exact position inside that chunk.
|
||||
|
||||
for (int i = 0; i < _table.Chunks.Count; i++)
|
||||
{
|
||||
var chunk = _table.Chunks[i];
|
||||
if (IsChunkValid(chunk) && (chunk.UncompressedOffset <= (ulong)_position)
|
||||
&& ((chunk.UncompressedOffset + chunk.UncompressedLength) > (ulong)_position))
|
||||
{
|
||||
if (i == _chunkIndex)
|
||||
{
|
||||
// We are still in the same chunk, so if the new position is
|
||||
// behind the previous one we can just read to the new position.
|
||||
|
||||
long offset = (long)chunk.UncompressedOffset + _chunkPos;
|
||||
if (offset <= _position)
|
||||
{
|
||||
long skip = _position - offset;
|
||||
_chunkStream!.Skip(skip);
|
||||
_chunkPos += skip;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_chunkIndex = i;
|
||||
_chunkStream = GetChunkStream();
|
||||
_chunkPos = 0;
|
||||
|
||||
// If the chunk happens to not be compressed this read will still result in a fast seek
|
||||
if ((ulong)_position != chunk.UncompressedOffset)
|
||||
{
|
||||
long skip = _position - (long)chunk.UncompressedOffset;
|
||||
_chunkStream.Skip(skip);
|
||||
_chunkPos = skip;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public DmgBlockDataStream(Stream baseStream, DmgHeader header, BlkxTable table)
|
||||
{
|
||||
if (!baseStream.CanRead) throw new ArgumentException("Requires a readable stream", nameof(baseStream));
|
||||
if (!baseStream.CanSeek) throw new ArgumentException("Requires a seekable stream", nameof(baseStream));
|
||||
|
||||
_baseStream = baseStream;
|
||||
_header = header;
|
||||
_table = table;
|
||||
|
||||
Length = 0;
|
||||
foreach (var chunk in table.Chunks)
|
||||
{
|
||||
if (IsChunkValid(chunk))
|
||||
Length += (long)chunk.UncompressedLength;
|
||||
}
|
||||
|
||||
_position = 0;
|
||||
_chunkIndex = -1;
|
||||
_chunkIndex = GetNextChunk();
|
||||
_isEnded = _chunkIndex < 0;
|
||||
if (!_isEnded) _chunkStream = GetChunkStream();
|
||||
_chunkPos = 0;
|
||||
}
|
||||
|
||||
private static bool IsChunkValid(BlkxChunk chunk)
|
||||
{
|
||||
return chunk.Type switch
|
||||
{
|
||||
BlkxChunkType.Zero => true,
|
||||
BlkxChunkType.Uncompressed => true,
|
||||
BlkxChunkType.Ignore => true,
|
||||
BlkxChunkType.AdcCompressed => true,
|
||||
BlkxChunkType.ZlibCompressed => true,
|
||||
BlkxChunkType.Bz2Compressed => true,
|
||||
_ => false
|
||||
};
|
||||
}
|
||||
|
||||
private int GetNextChunk()
|
||||
{
|
||||
int index = _chunkIndex;
|
||||
bool isValid = false;
|
||||
while (!isValid)
|
||||
{
|
||||
index++;
|
||||
if (index >= _table.Chunks.Count) return -1;
|
||||
|
||||
var chunk = _table.Chunks[index];
|
||||
if (chunk.Type == BlkxChunkType.Last) return -1;
|
||||
|
||||
isValid = IsChunkValid(chunk);
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
private Stream GetChunkStream()
|
||||
{
|
||||
if (_chunkIndex < 0)
|
||||
throw new InvalidOperationException("Invalid chunk index");
|
||||
|
||||
var chunk = _table.Chunks[_chunkIndex];
|
||||
|
||||
// For our purposes, ignore behaves the same as zero
|
||||
if ((chunk.Type == BlkxChunkType.Zero) || (chunk.Type == BlkxChunkType.Ignore))
|
||||
return new ConstantStream(0, (long)chunk.UncompressedLength);
|
||||
|
||||
// We first create a sub-stream on the region of the base stream where the
|
||||
// (possibly compressed) data is physically located at.
|
||||
var subStream = new SeekableSubStream(_baseStream,
|
||||
(long)(_header.DataForkOffset + _table.DataOffset + chunk.CompressedOffset),
|
||||
(long)chunk.CompressedLength);
|
||||
|
||||
// Then we nest that sub-stream into the apropriate compressed stream.
|
||||
return chunk.Type switch
|
||||
{
|
||||
BlkxChunkType.Uncompressed => subStream,
|
||||
BlkxChunkType.AdcCompressed => new ADCStream(subStream, CompressionMode.Decompress),
|
||||
BlkxChunkType.ZlibCompressed => new ZlibStream(subStream, CompressionMode.Decompress),
|
||||
BlkxChunkType.Bz2Compressed => new BZip2Stream(subStream, CompressionMode.Decompress, false),
|
||||
_ => throw new InvalidOperationException("Invalid chunk type")
|
||||
};
|
||||
}
|
||||
|
||||
// Decompresses the entire stream in memory for faster extraction.
|
||||
// This is about two orders of magnitude faster than decompressing
|
||||
// on-the-fly while extracting, but also eats RAM for breakfest.
|
||||
public Stream Decompress()
|
||||
{
|
||||
// We have to load all the chunks into separate memory streams first
|
||||
// because otherwise the decompression threads would block each other
|
||||
// and actually be slower than just a single decompression thread.
|
||||
|
||||
var rawStreams = new Stream?[_table.Chunks.Count];
|
||||
for (int i = 0; i < rawStreams.Length; i++)
|
||||
{
|
||||
var chunk = _table.Chunks[i];
|
||||
if (IsChunkValid(chunk))
|
||||
{
|
||||
if ((chunk.Type == BlkxChunkType.Zero) || (chunk.Type == BlkxChunkType.Ignore))
|
||||
{
|
||||
rawStreams[i] = new ConstantStream(0, (long)chunk.UncompressedLength);
|
||||
}
|
||||
else
|
||||
{
|
||||
var subStream = new SeekableSubStream(_baseStream,
|
||||
(long)(_header.DataForkOffset + _table.DataOffset + chunk.CompressedOffset),
|
||||
(long)chunk.CompressedLength);
|
||||
|
||||
var memStream = new MemoryStream();
|
||||
subStream.CopyTo(memStream);
|
||||
memStream.Position = 0;
|
||||
rawStreams[i] = memStream;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
rawStreams[i] = null;
|
||||
}
|
||||
}
|
||||
|
||||
// Now we can decompress the chunks multithreaded
|
||||
|
||||
var streams = new Stream?[_table.Chunks.Count];
|
||||
Parallel.For(0, streams.Length, i =>
|
||||
{
|
||||
var rawStream = rawStreams[i];
|
||||
if (rawStream is not null)
|
||||
{
|
||||
var chunk = _table.Chunks[i];
|
||||
if ((chunk.Type == BlkxChunkType.Zero)
|
||||
|| (chunk.Type == BlkxChunkType.Ignore)
|
||||
|| (chunk.Type == BlkxChunkType.Uncompressed))
|
||||
{
|
||||
streams[i] = rawStream;
|
||||
}
|
||||
else
|
||||
{
|
||||
Stream compStream = chunk.Type switch
|
||||
{
|
||||
BlkxChunkType.AdcCompressed => new ADCStream(rawStream, CompressionMode.Decompress),
|
||||
BlkxChunkType.ZlibCompressed => new ZlibStream(rawStream, CompressionMode.Decompress),
|
||||
BlkxChunkType.Bz2Compressed => new BZip2Stream(rawStream, CompressionMode.Decompress, false),
|
||||
_ => throw new InvalidOperationException("Invalid chunk type")
|
||||
};
|
||||
|
||||
var memStream = new MemoryStream();
|
||||
compStream.CopyTo(memStream);
|
||||
compStream.Dispose();
|
||||
|
||||
memStream.Position = 0;
|
||||
streams[i] = memStream;
|
||||
}
|
||||
|
||||
rawStream.Dispose();
|
||||
rawStreams[i] = null;
|
||||
}
|
||||
else
|
||||
{
|
||||
streams[i] = null;
|
||||
}
|
||||
});
|
||||
|
||||
return new CompositeStream((IEnumerable<Stream>)streams.Where(s => s is not null));
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_isEnded) return 0;
|
||||
|
||||
int readCount = _chunkStream!.Read(buffer, offset, count);
|
||||
_chunkPos += readCount;
|
||||
|
||||
while (readCount < count)
|
||||
{
|
||||
// Current chunk has ended, so we have to continue reading from the next chunk.
|
||||
|
||||
_chunkIndex = GetNextChunk();
|
||||
if (_chunkIndex < 0)
|
||||
{
|
||||
// We have reached the last chunk
|
||||
|
||||
_isEnded = true;
|
||||
_chunkPos = 0;
|
||||
_position += readCount;
|
||||
return readCount;
|
||||
}
|
||||
|
||||
_chunkStream = GetChunkStream();
|
||||
int rc = _chunkStream.Read(buffer, offset + readCount, count - readCount);
|
||||
_chunkPos = rc;
|
||||
readCount += rc;
|
||||
}
|
||||
|
||||
_position += readCount;
|
||||
return readCount;
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{ }
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
=> throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
=> throw new NotSupportedException();
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{ }
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
using SharpCompress.Common.Dmg.HFS;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
public abstract class DmgEntry : Entry
|
||||
{
|
||||
public override string Key { get; }
|
||||
public override bool IsDirectory { get; }
|
||||
public override long Size { get; }
|
||||
public override long CompressedSize { get; }
|
||||
public override CompressionType CompressionType { get; }
|
||||
public override DateTime? LastModifiedTime { get; }
|
||||
public override DateTime? CreatedTime { get; }
|
||||
public override DateTime? LastAccessedTime { get; }
|
||||
public override DateTime? ArchivedTime { get; }
|
||||
|
||||
public override long Crc { get; } = 0; // Not stored
|
||||
public override string? LinkTarget { get; } = null;
|
||||
public override bool IsEncrypted { get; } = false;
|
||||
public override bool IsSplitAfter { get; } = false;
|
||||
|
||||
internal override IEnumerable<FilePart> Parts { get; }
|
||||
|
||||
internal DmgEntry(HFSCatalogRecord record, string path, long size, DmgFilePart part)
|
||||
{
|
||||
Key = path;
|
||||
IsDirectory = record.Type == HFSCatalogRecordType.Folder;
|
||||
Size = CompressedSize = size; // There is no way to get the actual compressed size or the compression type of
|
||||
CompressionType = CompressionType.Unknown; // a file in a DMG archive since the files are nested inside the HFS partition.
|
||||
Parts = part.AsEnumerable();
|
||||
|
||||
if (IsDirectory)
|
||||
{
|
||||
var folder = (HFSCatalogFolder)record;
|
||||
LastModifiedTime = (folder.AttributeModDate > folder.ContentModDate) ? folder.AttributeModDate : folder.ContentModDate;
|
||||
CreatedTime = folder.CreateDate;
|
||||
LastAccessedTime = folder.AccessDate;
|
||||
ArchivedTime = folder.BackupDate;
|
||||
}
|
||||
else
|
||||
{
|
||||
var file = (HFSCatalogFile)record;
|
||||
LastModifiedTime = (file.AttributeModDate > file.ContentModDate) ? file.AttributeModDate : file.ContentModDate;
|
||||
CreatedTime = file.CreateDate;
|
||||
LastAccessedTime = file.AccessDate;
|
||||
ArchivedTime = file.BackupDate;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
internal sealed class DmgFilePart : FilePart
|
||||
{
|
||||
private readonly Stream _stream;
|
||||
|
||||
internal override string FilePartName { get; }
|
||||
|
||||
public DmgFilePart(Stream stream, string fileName)
|
||||
: base(new ArchiveEncoding())
|
||||
{
|
||||
_stream = stream;
|
||||
FilePartName = fileName;
|
||||
}
|
||||
|
||||
internal override Stream GetCompressedStream() => _stream;
|
||||
internal override Stream? GetRawStream() => null;
|
||||
}
|
||||
}
|
||||
@@ -1,183 +0,0 @@
|
||||
using SharpCompress.Common.Dmg.Headers;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Globalization;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using System.Xml.Linq;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
internal static class DmgUtil
|
||||
{
|
||||
private const string MalformedXmlMessage = "Malformed XML block";
|
||||
|
||||
private static T[] ParseArray<T>(in XElement parent, in Func<XElement, T> parseElement)
|
||||
{
|
||||
var list = new List<T>();
|
||||
|
||||
foreach (var node in parent.Elements())
|
||||
list.Add(parseElement(node));
|
||||
|
||||
return list.ToArray();
|
||||
}
|
||||
|
||||
private static Dictionary<string, T> ParseDict<T>(in XElement parent, in Func<XElement, T> parseValue)
|
||||
{
|
||||
var dict = new Dictionary<string, T>();
|
||||
|
||||
string? key = null;
|
||||
foreach (var node in parent.Elements())
|
||||
{
|
||||
if (string.Equals(node.Name.LocalName, "key", StringComparison.Ordinal))
|
||||
{
|
||||
key = node.Value;
|
||||
}
|
||||
else if (key is not null)
|
||||
{
|
||||
var value = parseValue(node);
|
||||
dict.Add(key, value);
|
||||
key = null;
|
||||
}
|
||||
}
|
||||
|
||||
return dict;
|
||||
}
|
||||
|
||||
private static Dictionary<string, Dictionary<string, Dictionary<string, string>[]>> ParsePList(in XDocument doc)
|
||||
{
|
||||
var dictNode = doc.Root?.Element("dict");
|
||||
if (dictNode is null) throw new InvalidFormatException(MalformedXmlMessage);
|
||||
|
||||
static Dictionary<string, string> ParseObject(XElement parent)
|
||||
=> ParseDict(parent, node => node.Value);
|
||||
|
||||
static Dictionary<string, string>[] ParseObjectArray(XElement parent)
|
||||
=> ParseArray(parent, ParseObject);
|
||||
|
||||
static Dictionary<string, Dictionary<string, string>[]> ParseSubDict(XElement parent)
|
||||
=> ParseDict(parent, ParseObjectArray);
|
||||
|
||||
return ParseDict(dictNode, ParseSubDict);
|
||||
}
|
||||
|
||||
private static BlkxData CreateDataFromDict(in Dictionary<string, string> dict)
|
||||
{
|
||||
static bool TryParseHex(string? s, out uint value)
|
||||
{
|
||||
value = 0;
|
||||
if (string.IsNullOrEmpty(s)) return false;
|
||||
|
||||
if (s!.StartsWith("0x", StringComparison.OrdinalIgnoreCase))
|
||||
s = s.Substring(2);
|
||||
|
||||
return uint.TryParse(s, NumberStyles.HexNumber, CultureInfo.InvariantCulture, out value);
|
||||
}
|
||||
|
||||
if (!dict.TryGetValue("ID", out string? idStr) || !int.TryParse(idStr, out int id))
|
||||
throw new InvalidFormatException(MalformedXmlMessage);
|
||||
if (!dict.TryGetValue("Name", out string? name))
|
||||
throw new InvalidFormatException(MalformedXmlMessage);
|
||||
if (!dict.TryGetValue("Attributes", out string? attribStr) || !TryParseHex(attribStr, out uint attribs))
|
||||
throw new InvalidFormatException(MalformedXmlMessage);
|
||||
if (!dict.TryGetValue("Data", out string? base64Data) || string.IsNullOrEmpty(base64Data))
|
||||
throw new InvalidFormatException(MalformedXmlMessage);
|
||||
|
||||
try
|
||||
{
|
||||
var data = Convert.FromBase64String(base64Data);
|
||||
if (!BlkxTable.TryRead(data, out var table))
|
||||
throw new InvalidFormatException("Invalid BLKX table");
|
||||
|
||||
return new BlkxData(id, name, attribs, table!);
|
||||
}
|
||||
catch (FormatException ex)
|
||||
{
|
||||
throw new InvalidFormatException(MalformedXmlMessage, ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static DmgBlockDataStream? LoadHFSPartitionStream(Stream baseStream, DmgHeader header)
|
||||
{
|
||||
if ((header.XMLOffset + header.XMLLength) >= (ulong)baseStream.Length)
|
||||
throw new IncompleteArchiveException("XML block incomplete");
|
||||
if ((header.DataForkOffset + header.DataForkLength) >= (ulong)baseStream.Length)
|
||||
throw new IncompleteArchiveException("Data block incomplete");
|
||||
|
||||
baseStream.Position = (long)header.XMLOffset;
|
||||
var xmlBuffer = new byte[header.XMLLength];
|
||||
baseStream.Read(xmlBuffer, 0, (int)header.XMLLength);
|
||||
var xml = Encoding.ASCII.GetString(xmlBuffer);
|
||||
|
||||
var doc = XDocument.Parse(xml);
|
||||
var pList = ParsePList(doc);
|
||||
if (!pList.TryGetValue("resource-fork", out var resDict) || !resDict.TryGetValue("blkx", out var blkxDicts))
|
||||
throw new InvalidFormatException(MalformedXmlMessage);
|
||||
|
||||
var objs = new BlkxData[blkxDicts.Length];
|
||||
for (int i = 0; i < objs.Length; i++)
|
||||
objs[i] = CreateDataFromDict(blkxDicts[i]);
|
||||
|
||||
// Index 0 is the protective MBR partition
|
||||
// Index 1 is the GPT header
|
||||
// Index 2 is the GPT partition table
|
||||
|
||||
try
|
||||
{
|
||||
var headerData = objs[1];
|
||||
using var headerStream = new DmgBlockDataStream(baseStream, header, headerData.Table);
|
||||
if (!GptHeader.TryRead(headerStream, out var gptHeader))
|
||||
throw new InvalidFormatException("Invalid GPT header");
|
||||
|
||||
var tableData = objs[2];
|
||||
using var tableStream = new DmgBlockDataStream(baseStream, header, tableData.Table);
|
||||
var gptTable = new GptPartitionEntry[gptHeader!.EntriesCount];
|
||||
for (int i = 0; i < gptHeader.EntriesCount; i++)
|
||||
gptTable[i] = GptPartitionEntry.Read(tableStream);
|
||||
|
||||
foreach (var entry in gptTable)
|
||||
{
|
||||
if (entry.TypeGuid == PartitionFormat.AppleHFS)
|
||||
{
|
||||
BlkxData? partitionData = null;
|
||||
for (int i = 3; i < objs.Length; i++)
|
||||
{
|
||||
if (objs[i].Name.StartsWith(entry.Name, StringComparison.Ordinal))
|
||||
{
|
||||
partitionData = objs[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (partitionData is null)
|
||||
throw new InvalidFormatException($"Missing partition {entry.Name}");
|
||||
|
||||
return new DmgBlockDataStream(baseStream, header, partitionData.Table);
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
catch (EndOfStreamException ex)
|
||||
{
|
||||
throw new IncompleteArchiveException("Partition incomplete", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private sealed class BlkxData
|
||||
{
|
||||
public int Id { get; }
|
||||
public string Name { get; }
|
||||
public uint Attributes { get; }
|
||||
public BlkxTable Table { get; }
|
||||
|
||||
public BlkxData(int id, string name, uint attributes, BlkxTable table)
|
||||
{
|
||||
Id = id;
|
||||
Name = name;
|
||||
Attributes = attributes;
|
||||
Table = table;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
using SharpCompress.Archives.Dmg;
|
||||
using SharpCompress.Common.Dmg.Headers;
|
||||
using SharpCompress.Common.Dmg.HFS;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
public class DmgVolume : Volume
|
||||
{
|
||||
private readonly DmgArchive _archive;
|
||||
private readonly string _fileName;
|
||||
|
||||
internal DmgHeader Header { get; }
|
||||
|
||||
public DmgVolume(DmgArchive archive, Stream stream, string fileName, Readers.ReaderOptions readerOptions)
|
||||
: base(stream, readerOptions)
|
||||
{
|
||||
_archive = archive;
|
||||
_fileName = fileName;
|
||||
|
||||
long pos = stream.Length - DmgHeader.HeaderSize;
|
||||
if (pos < 0) throw new InvalidFormatException("Invalid DMG volume");
|
||||
stream.Position = pos;
|
||||
|
||||
if (DmgHeader.TryRead(stream, out var header)) Header = header!;
|
||||
else throw new InvalidFormatException("Invalid DMG volume");
|
||||
}
|
||||
|
||||
internal IEnumerable<DmgArchiveEntry> LoadEntries()
|
||||
{
|
||||
var partitionStream = DmgUtil.LoadHFSPartitionStream(Stream, Header);
|
||||
if (partitionStream is null) return Array.Empty<DmgArchiveEntry>();
|
||||
else return HFSUtil.LoadEntriesFromPartition(partitionStream, _fileName, _archive);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,336 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSCatalogKey : HFSStructBase, IEquatable<HFSCatalogKey>, IComparable<HFSCatalogKey>, IComparable
|
||||
{
|
||||
private readonly StringComparer _comparer;
|
||||
|
||||
public uint ParentId { get; }
|
||||
|
||||
public string Name { get; }
|
||||
|
||||
private static StringComparer GetComparer(HFSKeyCompareType compareType, bool isHFSX)
|
||||
{
|
||||
if (isHFSX)
|
||||
{
|
||||
return compareType switch
|
||||
{
|
||||
HFSKeyCompareType.CaseFolding => StringComparer.InvariantCultureIgnoreCase,
|
||||
HFSKeyCompareType.BinaryCompare => StringComparer.Ordinal,
|
||||
_ => StringComparer.InvariantCultureIgnoreCase
|
||||
};
|
||||
}
|
||||
else
|
||||
{
|
||||
return StringComparer.InvariantCultureIgnoreCase;
|
||||
}
|
||||
}
|
||||
|
||||
public HFSCatalogKey(uint parentId, string name, HFSKeyCompareType compareType, bool isHFSX)
|
||||
{
|
||||
ParentId = parentId;
|
||||
Name = name;
|
||||
_comparer = GetComparer(compareType, isHFSX);
|
||||
}
|
||||
|
||||
public HFSCatalogKey(byte[] key, HFSKeyCompareType compareType, bool isHFSX)
|
||||
{
|
||||
ReadOnlySpan<byte> data = key.AsSpan();
|
||||
ParentId = ReadUInt32(ref data);
|
||||
Name = ReadString(ref data, true);
|
||||
_comparer = GetComparer(compareType, isHFSX);
|
||||
}
|
||||
|
||||
public bool Equals(HFSCatalogKey? other)
|
||||
{
|
||||
if (other is null) return false;
|
||||
else return (ParentId == other.ParentId) && _comparer.Equals(Name, other.Name);
|
||||
}
|
||||
|
||||
public override bool Equals(object? obj)
|
||||
{
|
||||
if (obj is HFSCatalogKey other) return Equals(other);
|
||||
else return false;
|
||||
}
|
||||
|
||||
public int CompareTo(HFSCatalogKey? other)
|
||||
{
|
||||
if (other is null) return 1;
|
||||
|
||||
int result = ParentId.CompareTo(other.ParentId);
|
||||
if (result == 0) result = _comparer.Compare(Name, other.Name);
|
||||
return result;
|
||||
}
|
||||
|
||||
public int CompareTo(object? obj)
|
||||
{
|
||||
if (obj is null) return 1;
|
||||
else if (obj is HFSCatalogKey other) return CompareTo(other);
|
||||
else throw new ArgumentException("Object is not of type CatalogKey", nameof(obj));
|
||||
}
|
||||
|
||||
public override int GetHashCode()
|
||||
=> ParentId.GetHashCode() ^ _comparer.GetHashCode(Name);
|
||||
|
||||
public static bool operator ==(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return right is null;
|
||||
else return left.Equals(right);
|
||||
}
|
||||
|
||||
public static bool operator !=(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return right is not null;
|
||||
else return !left.Equals(right);
|
||||
}
|
||||
|
||||
public static bool operator <(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return right is not null;
|
||||
else return left.CompareTo(right) < 0;
|
||||
}
|
||||
|
||||
public static bool operator >(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return false;
|
||||
else return left.CompareTo(right) > 0;
|
||||
}
|
||||
|
||||
public static bool operator <=(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return true;
|
||||
else return left.CompareTo(right) <= 0;
|
||||
}
|
||||
|
||||
public static bool operator >=(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return right is null;
|
||||
else return left.CompareTo(right) >= 0;
|
||||
}
|
||||
}
|
||||
|
||||
internal enum HFSCatalogRecordType : ushort
|
||||
{
|
||||
Folder = 0x0001,
|
||||
File = 0x0002,
|
||||
FolderThread = 0x0003,
|
||||
FileThread = 0x0004
|
||||
}
|
||||
|
||||
internal abstract class HFSCatalogRecord : HFSStructBase
|
||||
{
|
||||
public HFSCatalogRecordType Type { get; }
|
||||
|
||||
protected HFSCatalogRecord(HFSCatalogRecordType type)
|
||||
=> Type = type;
|
||||
|
||||
public static bool TryRead(ref ReadOnlySpan<byte> data, HFSKeyCompareType compareType, bool isHFSX, out HFSCatalogRecord? record)
|
||||
{
|
||||
record = null;
|
||||
|
||||
ushort rawType = ReadUInt16(ref data);
|
||||
if (!Enum.IsDefined(typeof(HFSCatalogRecordType), rawType)) return false;
|
||||
|
||||
var type = (HFSCatalogRecordType)rawType;
|
||||
switch (type)
|
||||
{
|
||||
case HFSCatalogRecordType.Folder:
|
||||
record = HFSCatalogFolder.Read(ref data);
|
||||
return true;
|
||||
|
||||
case HFSCatalogRecordType.File:
|
||||
record = HFSCatalogFile.Read(ref data);
|
||||
return true;
|
||||
|
||||
case HFSCatalogRecordType.FolderThread:
|
||||
record = HFSCatalogThread.Read(ref data, false, compareType, isHFSX);
|
||||
return true;
|
||||
|
||||
case HFSCatalogRecordType.FileThread:
|
||||
record = HFSCatalogThread.Read(ref data, true, compareType, isHFSX);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSCatalogFolder : HFSCatalogRecord
|
||||
{
|
||||
public uint Valence { get; }
|
||||
public uint FolderId { get; }
|
||||
public DateTime CreateDate { get; }
|
||||
public DateTime ContentModDate { get; }
|
||||
public DateTime AttributeModDate { get; }
|
||||
public DateTime AccessDate { get; }
|
||||
public DateTime BackupDate { get; }
|
||||
public HFSPermissions Permissions { get; }
|
||||
public HFSFolderInfo Info { get; }
|
||||
public uint TextEncoding { get; }
|
||||
|
||||
private HFSCatalogFolder(
|
||||
uint valence,
|
||||
uint folderId,
|
||||
DateTime createDate,
|
||||
DateTime contentModDate,
|
||||
DateTime attributeModDate,
|
||||
DateTime accessDate,
|
||||
DateTime backupDate,
|
||||
HFSPermissions permissions,
|
||||
HFSFolderInfo info,
|
||||
uint textEncoding)
|
||||
: base(HFSCatalogRecordType.Folder)
|
||||
{
|
||||
Valence = valence;
|
||||
FolderId = folderId;
|
||||
CreateDate = createDate;
|
||||
ContentModDate = contentModDate;
|
||||
AttributeModDate = attributeModDate;
|
||||
AccessDate = accessDate;
|
||||
BackupDate = backupDate;
|
||||
Permissions = permissions;
|
||||
Info = info;
|
||||
TextEncoding = textEncoding;
|
||||
}
|
||||
|
||||
public static HFSCatalogFolder Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
_ = ReadUInt16(ref data); // reserved
|
||||
uint valence = ReadUInt32(ref data);
|
||||
uint folderId = ReadUInt32(ref data);
|
||||
var createDate = ReadDate(ref data);
|
||||
var contentModDate = ReadDate(ref data);
|
||||
var attributeModDate = ReadDate(ref data);
|
||||
var accessDate = ReadDate(ref data);
|
||||
var backupDate = ReadDate(ref data);
|
||||
var permissions = HFSPermissions.Read(ref data);
|
||||
var info = HFSFolderInfo.Read(ref data);
|
||||
uint textEncoding = ReadUInt32(ref data);
|
||||
_ = ReadUInt32(ref data); // reserved
|
||||
|
||||
return new HFSCatalogFolder(
|
||||
valence,
|
||||
folderId,
|
||||
createDate,
|
||||
contentModDate,
|
||||
attributeModDate,
|
||||
accessDate,
|
||||
backupDate,
|
||||
permissions,
|
||||
info,
|
||||
textEncoding);
|
||||
}
|
||||
}
|
||||
|
||||
internal enum HFSFileFlags : ushort
|
||||
{
|
||||
LockedBit = 0x0000,
|
||||
LockedMask = 0x0001,
|
||||
ThreadExistsBit = 0x0001,
|
||||
ThreadExistsMask = 0x0002
|
||||
}
|
||||
|
||||
internal sealed class HFSCatalogFile : HFSCatalogRecord
|
||||
{
|
||||
public HFSFileFlags Flags { get; }
|
||||
public uint FileId { get; }
|
||||
public DateTime CreateDate { get; }
|
||||
public DateTime ContentModDate { get; }
|
||||
public DateTime AttributeModDate { get; }
|
||||
public DateTime AccessDate { get; }
|
||||
public DateTime BackupDate { get; }
|
||||
public HFSPermissions Permissions { get; }
|
||||
public HFSFileInfo Info { get; }
|
||||
public uint TextEncoding { get; }
|
||||
|
||||
public HFSForkData DataFork { get; }
|
||||
public HFSForkData ResourceFork { get; }
|
||||
|
||||
private HFSCatalogFile(
|
||||
HFSFileFlags flags,
|
||||
uint fileId,
|
||||
DateTime createDate,
|
||||
DateTime contentModDate,
|
||||
DateTime attributeModDate,
|
||||
DateTime accessDate,
|
||||
DateTime backupDate,
|
||||
HFSPermissions permissions,
|
||||
HFSFileInfo info,
|
||||
uint textEncoding,
|
||||
HFSForkData dataFork,
|
||||
HFSForkData resourceFork)
|
||||
:base(HFSCatalogRecordType.File)
|
||||
{
|
||||
Flags = flags;
|
||||
FileId = fileId;
|
||||
CreateDate = createDate;
|
||||
ContentModDate = contentModDate;
|
||||
AttributeModDate = attributeModDate;
|
||||
AccessDate = accessDate;
|
||||
BackupDate = backupDate;
|
||||
Permissions = permissions;
|
||||
Info = info;
|
||||
TextEncoding = textEncoding;
|
||||
DataFork = dataFork;
|
||||
ResourceFork = resourceFork;
|
||||
}
|
||||
|
||||
public static HFSCatalogFile Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
var flags = (HFSFileFlags)ReadUInt16(ref data);
|
||||
_ = ReadUInt32(ref data); // reserved
|
||||
uint fileId = ReadUInt32(ref data);
|
||||
var createDate = ReadDate(ref data);
|
||||
var contentModDate = ReadDate(ref data);
|
||||
var attributeModDate = ReadDate(ref data);
|
||||
var accessDate = ReadDate(ref data);
|
||||
var backupDate = ReadDate(ref data);
|
||||
var permissions = HFSPermissions.Read(ref data);
|
||||
var info = HFSFileInfo.Read(ref data);
|
||||
uint textEncoding = ReadUInt32(ref data);
|
||||
_ = ReadUInt32(ref data); // reserved
|
||||
|
||||
var dataFork = HFSForkData.Read(ref data);
|
||||
var resourceFork = HFSForkData.Read(ref data);
|
||||
|
||||
return new HFSCatalogFile(
|
||||
flags,
|
||||
fileId,
|
||||
createDate,
|
||||
contentModDate,
|
||||
attributeModDate,
|
||||
accessDate,
|
||||
backupDate,
|
||||
permissions,
|
||||
info,
|
||||
textEncoding,
|
||||
dataFork,
|
||||
resourceFork);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSCatalogThread : HFSCatalogRecord
|
||||
{
|
||||
public uint ParentId { get; }
|
||||
public string NodeName { get; }
|
||||
public HFSCatalogKey CatalogKey { get; }
|
||||
|
||||
private HFSCatalogThread(uint parentId, string nodeName, bool isFile, HFSKeyCompareType compareType, bool isHFSX)
|
||||
: base(isFile ? HFSCatalogRecordType.FileThread : HFSCatalogRecordType.FolderThread)
|
||||
{
|
||||
ParentId = parentId;
|
||||
NodeName = nodeName;
|
||||
CatalogKey = new HFSCatalogKey(ParentId, NodeName, compareType, isHFSX);
|
||||
}
|
||||
|
||||
public static HFSCatalogThread Read(ref ReadOnlySpan<byte> data, bool isFile, HFSKeyCompareType compareType, bool isHFSX)
|
||||
{
|
||||
_ = ReadInt16(ref data); // reserved
|
||||
uint parentId = ReadUInt32(ref data);
|
||||
string nodeName = ReadString(ref data, true);
|
||||
|
||||
return new HFSCatalogThread(parentId, nodeName, isFile, compareType, isHFSX);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSExtentDescriptor : HFSStructBase
|
||||
{
|
||||
public uint StartBlock { get; }
|
||||
public uint BlockCount { get; }
|
||||
|
||||
private HFSExtentDescriptor(uint startBlock, uint blockCount)
|
||||
{
|
||||
StartBlock = startBlock;
|
||||
BlockCount = blockCount;
|
||||
}
|
||||
|
||||
public static HFSExtentDescriptor Read(Stream stream)
|
||||
{
|
||||
return new HFSExtentDescriptor(
|
||||
ReadUInt32(stream),
|
||||
ReadUInt32(stream));
|
||||
}
|
||||
|
||||
public static HFSExtentDescriptor Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
return new HFSExtentDescriptor(
|
||||
ReadUInt32(ref data),
|
||||
ReadUInt32(ref data));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSExtentKey : HFSStructBase, IEquatable<HFSExtentKey>, IComparable<HFSExtentKey>, IComparable
|
||||
{
|
||||
public byte ForkType { get; }
|
||||
public uint FileId { get; }
|
||||
public uint StartBlock { get; }
|
||||
|
||||
public HFSExtentKey(byte forkType, uint fileId, uint startBlock)
|
||||
{
|
||||
ForkType = forkType;
|
||||
FileId = fileId;
|
||||
StartBlock = startBlock;
|
||||
}
|
||||
|
||||
public HFSExtentKey(byte[] key)
|
||||
{
|
||||
ReadOnlySpan<byte> data = key.AsSpan();
|
||||
ForkType = ReadUInt8(ref data);
|
||||
_ = ReadUInt8(ref data); // padding
|
||||
FileId = ReadUInt32(ref data);
|
||||
StartBlock = ReadUInt32(ref data);
|
||||
}
|
||||
|
||||
public bool Equals(HFSExtentKey? other)
|
||||
{
|
||||
if (other is null) return false;
|
||||
else return (ForkType == other.ForkType) && (FileId == other.FileId) && (StartBlock == other.StartBlock);
|
||||
}
|
||||
|
||||
public override bool Equals(object? obj)
|
||||
{
|
||||
if (obj is HFSExtentKey other) return Equals(other);
|
||||
else return false;
|
||||
}
|
||||
|
||||
public int CompareTo(HFSExtentKey? other)
|
||||
{
|
||||
if (other is null) return 1;
|
||||
|
||||
int result = FileId.CompareTo(other.FileId);
|
||||
if (result == 0) result = ForkType.CompareTo(other.ForkType);
|
||||
if (result == 0) result = StartBlock.CompareTo(other.StartBlock);
|
||||
return result;
|
||||
}
|
||||
|
||||
public int CompareTo(object? obj)
|
||||
{
|
||||
if (obj is null) return 1;
|
||||
else if (obj is HFSExtentKey other) return CompareTo(other);
|
||||
else throw new ArgumentException("Object is not of type ExtentKey", nameof(obj));
|
||||
}
|
||||
|
||||
public override int GetHashCode()
|
||||
=> ForkType.GetHashCode() ^ FileId.GetHashCode() ^ StartBlock.GetHashCode();
|
||||
|
||||
public static bool operator ==(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return right is null;
|
||||
else return left.Equals(right);
|
||||
}
|
||||
|
||||
public static bool operator !=(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return right is not null;
|
||||
else return !left.Equals(right);
|
||||
}
|
||||
|
||||
public static bool operator <(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return right is not null;
|
||||
else return left.CompareTo(right) < 0;
|
||||
}
|
||||
|
||||
public static bool operator >(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return false;
|
||||
else return left.CompareTo(right) > 0;
|
||||
}
|
||||
|
||||
public static bool operator <=(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return true;
|
||||
else return left.CompareTo(right) <= 0;
|
||||
}
|
||||
|
||||
public static bool operator >=(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return right is null;
|
||||
else return left.CompareTo(right) >= 0;
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSExtentRecord : HFSStructBase
|
||||
{
|
||||
private const int ExtentCount = 8;
|
||||
|
||||
public IReadOnlyList<HFSExtentDescriptor> Extents { get; }
|
||||
|
||||
private HFSExtentRecord(IReadOnlyList<HFSExtentDescriptor> extents)
|
||||
=> Extents = extents;
|
||||
|
||||
public static HFSExtentRecord Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
var extents = new HFSExtentDescriptor[ExtentCount];
|
||||
for (int i = 0; i < ExtentCount; i++)
|
||||
extents[i] = HFSExtentDescriptor.Read(ref data);
|
||||
|
||||
return new HFSExtentRecord(extents);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal struct HFSPoint
|
||||
{
|
||||
public short V;
|
||||
public short H;
|
||||
}
|
||||
|
||||
internal struct HFSRect
|
||||
{
|
||||
public short Top;
|
||||
public short Left;
|
||||
public short Bottom;
|
||||
public short Right;
|
||||
}
|
||||
|
||||
[Flags]
|
||||
internal enum HFSFinderFlags : ushort
|
||||
{
|
||||
None = 0x0000,
|
||||
|
||||
IsOnDesk = 0x0001, /* Files and folders (System 6) */
|
||||
Color = 0x000E, /* Files and folders */
|
||||
IsShared = 0x0040, /* Files only (Applications only) If */
|
||||
/* clear, the application needs */
|
||||
/* to write to its resource fork, */
|
||||
/* and therefore cannot be shared */
|
||||
/* on a server */
|
||||
HasNoINITs = 0x0080, /* Files only (Extensions/Control */
|
||||
/* Panels only) */
|
||||
/* This file contains no INIT resource */
|
||||
HasBeenInited = 0x0100, /* Files only. Clear if the file */
|
||||
/* contains desktop database resources */
|
||||
/* ('BNDL', 'FREF', 'open', 'kind'...) */
|
||||
/* that have not been added yet. Set */
|
||||
/* only by the Finder. */
|
||||
/* Reserved for folders */
|
||||
HasCustomIcon = 0x0400, /* Files and folders */
|
||||
IsStationery = 0x0800, /* Files only */
|
||||
NameLocked = 0x1000, /* Files and folders */
|
||||
HasBundle = 0x2000, /* Files only */
|
||||
IsInvisible = 0x4000, /* Files and folders */
|
||||
IsAlias = 0x8000 /* Files only */
|
||||
}
|
||||
|
||||
[Flags]
|
||||
internal enum HFSExtendedFinderFlags : ushort
|
||||
{
|
||||
None = 0x0000,
|
||||
|
||||
ExtendedFlagsAreInvalid = 0x8000, /* The other extended flags */
|
||||
/* should be ignored */
|
||||
HasCustomBadge = 0x0100, /* The file or folder has a */
|
||||
/* badge resource */
|
||||
HasRoutingInfo = 0x0004 /* The file contains routing */
|
||||
/* info resource */
|
||||
}
|
||||
|
||||
internal sealed class HFSFileInfo : HFSStructBase
|
||||
{
|
||||
public string FileType { get; } /* The type of the file */
|
||||
public string FileCreator { get; } /* The file's creator */
|
||||
public HFSFinderFlags FinderFlags { get; }
|
||||
public HFSPoint Location { get; } /* File's location in the folder. */
|
||||
public HFSExtendedFinderFlags ExtendedFinderFlags { get; }
|
||||
public int PutAwayFolderId { get; }
|
||||
|
||||
private HFSFileInfo(
|
||||
string fileType,
|
||||
string fileCreator,
|
||||
HFSFinderFlags finderFlags,
|
||||
HFSPoint location,
|
||||
HFSExtendedFinderFlags extendedFinderFlags,
|
||||
int putAwayFolderId)
|
||||
{
|
||||
FileType = fileType;
|
||||
FileCreator = fileCreator;
|
||||
FinderFlags = finderFlags;
|
||||
Location = location;
|
||||
ExtendedFinderFlags = extendedFinderFlags;
|
||||
PutAwayFolderId = putAwayFolderId;
|
||||
}
|
||||
|
||||
public static HFSFileInfo Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
string fileType = ReadOSType(ref data);
|
||||
string fileCreator = ReadOSType(ref data);
|
||||
var finderFlags = (HFSFinderFlags)ReadUInt16(ref data);
|
||||
var location = ReadPoint(ref data);
|
||||
_ = ReadUInt16(ref data); // reserved
|
||||
data = data.Slice(4 * sizeof(short)); // reserved
|
||||
var extendedFinderFlags = (HFSExtendedFinderFlags)ReadUInt16(ref data);
|
||||
_ = ReadInt16(ref data); // reserved
|
||||
int putAwayFolderId = ReadInt32(ref data);
|
||||
|
||||
return new HFSFileInfo(fileType, fileCreator, finderFlags, location, extendedFinderFlags, putAwayFolderId);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSFolderInfo : HFSStructBase
|
||||
{
|
||||
public HFSRect WindowBounds { get; } /* The position and dimension of the */
|
||||
/* folder's window */
|
||||
public HFSFinderFlags FinderFlags { get; }
|
||||
public HFSPoint Location { get; } /* Folder's location in the parent */
|
||||
/* folder. If set to {0, 0}, the Finder */
|
||||
/* will place the item automatically */
|
||||
public HFSPoint ScrollPosition { get; } /* Scroll position (for icon views) */
|
||||
public HFSExtendedFinderFlags ExtendedFinderFlags { get; }
|
||||
public int PutAwayFolderId { get; }
|
||||
|
||||
private HFSFolderInfo(
|
||||
HFSRect windowBounds,
|
||||
HFSFinderFlags finderFlags,
|
||||
HFSPoint location,
|
||||
HFSPoint scrollPosition,
|
||||
HFSExtendedFinderFlags extendedFinderFlags,
|
||||
int putAwayFolderId)
|
||||
{
|
||||
WindowBounds = windowBounds;
|
||||
FinderFlags = finderFlags;
|
||||
Location = location;
|
||||
ScrollPosition = scrollPosition;
|
||||
ExtendedFinderFlags = extendedFinderFlags;
|
||||
PutAwayFolderId = putAwayFolderId;
|
||||
}
|
||||
|
||||
public static HFSFolderInfo Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
var windowBounds = ReadRect(ref data);
|
||||
var finderFlags = (HFSFinderFlags)ReadUInt16(ref data);
|
||||
var location = ReadPoint(ref data);
|
||||
_ = ReadUInt16(ref data); // reserved
|
||||
var scrollPosition = ReadPoint(ref data);
|
||||
_ = ReadInt32(ref data); // reserved
|
||||
var extendedFinderFlags = (HFSExtendedFinderFlags)ReadUInt16(ref data);
|
||||
_ = ReadInt16(ref data); // reserved
|
||||
int putAwayFolderId = ReadInt32(ref data);
|
||||
|
||||
return new HFSFolderInfo(windowBounds, finderFlags, location, scrollPosition, extendedFinderFlags, putAwayFolderId);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSForkData : HFSStructBase
|
||||
{
|
||||
private const int ExtentCount = 8;
|
||||
|
||||
public ulong LogicalSize { get; }
|
||||
public uint ClumpSize { get; }
|
||||
public uint TotalBlocks { get; }
|
||||
public IReadOnlyList<HFSExtentDescriptor> Extents { get; }
|
||||
|
||||
private HFSForkData(ulong logicalSize, uint clumpSize, uint totalBlocks, IReadOnlyList<HFSExtentDescriptor> extents)
|
||||
{
|
||||
LogicalSize = logicalSize;
|
||||
ClumpSize = clumpSize;
|
||||
TotalBlocks = totalBlocks;
|
||||
Extents = extents;
|
||||
}
|
||||
|
||||
public static HFSForkData Read(Stream stream)
|
||||
{
|
||||
ulong logicalSize = ReadUInt64(stream);
|
||||
uint clumpSize = ReadUInt32(stream);
|
||||
uint totalBlocks = ReadUInt32(stream);
|
||||
|
||||
var extents = new HFSExtentDescriptor[ExtentCount];
|
||||
for (int i = 0; i < ExtentCount; i++)
|
||||
extents[i] = HFSExtentDescriptor.Read(stream);
|
||||
|
||||
return new HFSForkData(logicalSize, clumpSize, totalBlocks, extents);
|
||||
}
|
||||
|
||||
public static HFSForkData Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
ulong logicalSize = ReadUInt64(ref data);
|
||||
uint clumpSize = ReadUInt32(ref data);
|
||||
uint totalBlocks = ReadUInt32(ref data);
|
||||
|
||||
var extents = new HFSExtentDescriptor[ExtentCount];
|
||||
for (int i = 0; i < ExtentCount; i++)
|
||||
extents[i] = HFSExtentDescriptor.Read(ref data);
|
||||
|
||||
return new HFSForkData(logicalSize, clumpSize, totalBlocks, extents);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,196 +0,0 @@
|
||||
using SharpCompress.IO;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSForkStream : Stream
|
||||
{
|
||||
private readonly Stream _baseStream;
|
||||
private readonly HFSVolumeHeader _volumeHeader;
|
||||
private readonly IReadOnlyList<HFSExtentDescriptor> _extents;
|
||||
private long _position;
|
||||
private bool _isEnded;
|
||||
private int _extentIndex;
|
||||
private Stream? _extentStream;
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanWrite => false;
|
||||
public override bool CanSeek => true;
|
||||
public override long Length { get; }
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _position;
|
||||
set
|
||||
{
|
||||
if ((value < 0) || (value > Length)) throw new ArgumentOutOfRangeException(nameof(value));
|
||||
|
||||
if (value == Length)
|
||||
{
|
||||
// End of the stream
|
||||
|
||||
_position = Length;
|
||||
_isEnded = true;
|
||||
_extentIndex = -1;
|
||||
_extentStream = null;
|
||||
}
|
||||
else if (value != _position)
|
||||
{
|
||||
_position = value;
|
||||
|
||||
// We first have to determine in which extent we are now, then we seek to the exact position in that extent.
|
||||
|
||||
long offsetInExtent = _position;
|
||||
for (int i = 0; i < _extents.Count; i++)
|
||||
{
|
||||
var extent = _extents[i];
|
||||
long extentSize = extent.BlockCount * _volumeHeader.BlockSize;
|
||||
if (extentSize < offsetInExtent)
|
||||
{
|
||||
if (i == _extentIndex)
|
||||
{
|
||||
// We are in the same extent so just seek to the correct position
|
||||
_extentStream!.Position = offsetInExtent;
|
||||
}
|
||||
else
|
||||
{
|
||||
_extentIndex = i;
|
||||
_extentStream = GetExtentStream();
|
||||
_extentStream.Position = offsetInExtent;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
offsetInExtent -= extentSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public HFSForkStream(Stream baseStream, HFSVolumeHeader volumeHeader, HFSForkData forkData)
|
||||
{
|
||||
_baseStream = baseStream;
|
||||
_volumeHeader = volumeHeader;
|
||||
_extents = forkData.Extents;
|
||||
Length = (long)forkData.LogicalSize;
|
||||
|
||||
_position = 0;
|
||||
_extentIndex = -1;
|
||||
_extentIndex = GetNextExtent();
|
||||
_isEnded = _extentIndex < 0;
|
||||
if (!_isEnded) _extentStream = GetExtentStream();
|
||||
}
|
||||
|
||||
public HFSForkStream(
|
||||
Stream baseStream, HFSVolumeHeader volumeHeader, HFSForkData forkData, uint fileId,
|
||||
IReadOnlyDictionary<HFSExtentKey, HFSExtentRecord> extents)
|
||||
{
|
||||
_baseStream = baseStream;
|
||||
_volumeHeader = volumeHeader;
|
||||
Length = (long)forkData.LogicalSize;
|
||||
|
||||
uint blocks = (uint)forkData.Extents.Sum(e => e.BlockCount);
|
||||
var totalExtents = new List<HFSExtentDescriptor>(forkData.Extents);
|
||||
_extents = totalExtents;
|
||||
|
||||
var nextKey = new HFSExtentKey(0, fileId, blocks);
|
||||
while (extents.TryGetValue(nextKey, out var record))
|
||||
{
|
||||
blocks += (uint)record.Extents.Sum(e => e.BlockCount);
|
||||
totalExtents.AddRange(record.Extents);
|
||||
|
||||
nextKey = new HFSExtentKey(0, fileId, blocks);
|
||||
}
|
||||
|
||||
_position = 0;
|
||||
_extentIndex = -1;
|
||||
_extentIndex = GetNextExtent();
|
||||
_isEnded = _extentIndex < 0;
|
||||
if (!_isEnded) _extentStream = GetExtentStream();
|
||||
}
|
||||
|
||||
private int GetNextExtent()
|
||||
{
|
||||
int index = _extentIndex + 1;
|
||||
if (index >= _extents.Count) return -1;
|
||||
|
||||
var extent = _extents[index];
|
||||
if ((extent.StartBlock == 0) && (extent.BlockCount == 0)) return -1;
|
||||
return index;
|
||||
}
|
||||
|
||||
private Stream GetExtentStream()
|
||||
{
|
||||
if (_extentIndex < 0)
|
||||
throw new InvalidOperationException("Invalid extent index");
|
||||
|
||||
var extent = _extents[_extentIndex];
|
||||
return new HFSExtentStream(_baseStream, _volumeHeader, extent);
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{ }
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_isEnded) return 0;
|
||||
|
||||
count = (int)Math.Min(count, Length - Position);
|
||||
int readCount = _extentStream!.Read(buffer, offset, count);
|
||||
while (readCount < count)
|
||||
{
|
||||
_extentIndex = GetNextExtent();
|
||||
if (_extentIndex < 0)
|
||||
{
|
||||
_isEnded = true;
|
||||
return readCount;
|
||||
}
|
||||
|
||||
_extentStream = GetExtentStream();
|
||||
readCount += _extentStream.Read(buffer, offset + readCount, count - readCount);
|
||||
}
|
||||
|
||||
_position += readCount;
|
||||
return readCount;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
=> throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
=> throw new NotSupportedException();
|
||||
|
||||
private sealed class HFSExtentStream : SeekableSubStream
|
||||
{
|
||||
public HFSExtentStream(Stream stream, HFSVolumeHeader volumeHeader, HFSExtentDescriptor extent)
|
||||
: base(stream, (long)extent.StartBlock * volumeHeader.BlockSize, (long)extent.BlockCount * volumeHeader.BlockSize)
|
||||
{ }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal abstract class HFSKeyedRecord : HFSStructBase
|
||||
{
|
||||
private readonly HFSKeyCompareType _compareType;
|
||||
private readonly bool _isHFSX;
|
||||
private HFSCatalogKey? _catalogKey;
|
||||
private HFSExtentKey? _extentKey;
|
||||
|
||||
public byte[] Key { get; }
|
||||
|
||||
public HFSCatalogKey GetCatalogKey() => _catalogKey ??= new HFSCatalogKey(Key, _compareType, _isHFSX);
|
||||
|
||||
public HFSExtentKey GetExtentKey() => _extentKey ??= new HFSExtentKey(Key);
|
||||
|
||||
protected HFSKeyedRecord(byte[] key, HFSKeyCompareType compareType, bool isHFSX)
|
||||
{
|
||||
Key = key;
|
||||
_compareType = compareType;
|
||||
_isHFSX = isHFSX;
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSPointerRecord : HFSKeyedRecord
|
||||
{
|
||||
public uint NodeNumber { get; }
|
||||
|
||||
private HFSPointerRecord(byte[] key, uint nodeNumber, HFSKeyCompareType compareType, bool isHFSX)
|
||||
: base(key, compareType, isHFSX)
|
||||
{
|
||||
NodeNumber = nodeNumber;
|
||||
}
|
||||
|
||||
public static HFSPointerRecord Read(ref ReadOnlySpan<byte> data, HFSTreeHeaderRecord headerRecord, bool isHFSX)
|
||||
{
|
||||
bool isBigKey = headerRecord.Attributes.HasFlag(HFSTreeAttributes.BigKeys);
|
||||
ushort keyLength = isBigKey ? ReadUInt16(ref data) : ReadUInt8(ref data);
|
||||
if (!headerRecord.Attributes.HasFlag(HFSTreeAttributes.VariableIndexKeys)) keyLength = headerRecord.MaxKeyLength;
|
||||
int keySize = (isBigKey ? 2 : 1) + keyLength;
|
||||
|
||||
var key = new byte[keyLength];
|
||||
data.Slice(0, keyLength).CopyTo(key);
|
||||
data = data.Slice(keyLength);
|
||||
|
||||
// data is always aligned to 2 bytes
|
||||
if (keySize % 2 == 1) data = data.Slice(1);
|
||||
|
||||
uint nodeNumber = ReadUInt32(ref data);
|
||||
|
||||
return new HFSPointerRecord(key, nodeNumber, headerRecord.KeyCompareType, isHFSX);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSDataRecord : HFSKeyedRecord
|
||||
{
|
||||
public byte[] Data { get; }
|
||||
|
||||
private HFSDataRecord(byte[] key, byte[] data, HFSKeyCompareType compareType, bool isHFSX)
|
||||
: base(key, compareType, isHFSX)
|
||||
{
|
||||
Data = data;
|
||||
}
|
||||
|
||||
public static HFSDataRecord Read(ref ReadOnlySpan<byte> data, int size, HFSTreeHeaderRecord headerRecord, bool isHFSX)
|
||||
{
|
||||
bool isBigKey = headerRecord.Attributes.HasFlag(HFSTreeAttributes.BigKeys);
|
||||
ushort keyLength = isBigKey ? ReadUInt16(ref data) : ReadUInt8(ref data);
|
||||
int keySize = (isBigKey ? 2 : 1) + keyLength;
|
||||
size -= keySize;
|
||||
|
||||
var key = new byte[keyLength];
|
||||
data.Slice(0, keyLength).CopyTo(key);
|
||||
data = data.Slice(keyLength);
|
||||
|
||||
// data is always aligned to 2 bytes
|
||||
if (keySize % 2 == 1)
|
||||
{
|
||||
data = data.Slice(1);
|
||||
size--;
|
||||
}
|
||||
|
||||
var structData = new byte[size];
|
||||
data.Slice(0, size).CopyTo(structData);
|
||||
data = data.Slice(size);
|
||||
|
||||
return new HFSDataRecord(key, structData, headerRecord.KeyCompareType, isHFSX);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSPermissions : HFSStructBase
|
||||
{
|
||||
public uint OwnerID { get; }
|
||||
public uint GroupID { get; }
|
||||
public byte AdminFlags { get; }
|
||||
public byte OwnerFlags { get; }
|
||||
public ushort FileMode { get; }
|
||||
public uint Special { get; }
|
||||
|
||||
private HFSPermissions(uint ownerID, uint groupID, byte adminFlags, byte ownerFlags, ushort fileMode, uint special)
|
||||
{
|
||||
OwnerID = ownerID;
|
||||
GroupID = groupID;
|
||||
AdminFlags = adminFlags;
|
||||
OwnerFlags = ownerFlags;
|
||||
FileMode = fileMode;
|
||||
Special = special;
|
||||
}
|
||||
|
||||
public static HFSPermissions Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
return new HFSPermissions(
|
||||
ReadUInt32(ref data),
|
||||
ReadUInt32(ref data),
|
||||
ReadUInt8(ref data),
|
||||
ReadUInt8(ref data),
|
||||
ReadUInt16(ref data),
|
||||
ReadUInt32(ref data));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal abstract class HFSStructBase
|
||||
{
|
||||
private const int StringSize = 510;
|
||||
private const int OSTypeSize = 4;
|
||||
private static readonly DateTime Epoch = new DateTime(1904, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
private static readonly byte[] _buffer = new byte[StringSize];
|
||||
|
||||
protected static byte ReadUInt8(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(byte)) != sizeof(byte))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return _buffer[0];
|
||||
}
|
||||
|
||||
protected static ushort ReadUInt16(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(ushort)) != sizeof(ushort))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt16BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static short ReadInt16(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(short)) != sizeof(short))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadInt16BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static uint ReadUInt32(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(uint)) != sizeof(uint))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt32BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static int ReadInt32(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(int)) != sizeof(int))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadInt32BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static ulong ReadUInt64(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(ulong)) != sizeof(ulong))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt64BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static long ReadInt64(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(long)) != sizeof(long))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadInt64BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static string ReadString(Stream stream)
|
||||
{
|
||||
ushort length = ReadUInt16(stream);
|
||||
if (stream.Read(_buffer, 0, StringSize) != StringSize)
|
||||
throw new EndOfStreamException();
|
||||
return Encoding.Unicode.GetString(_buffer, 0, Math.Min(length * 2, StringSize));
|
||||
}
|
||||
|
||||
protected static DateTime ReadDate(Stream stream)
|
||||
{
|
||||
uint seconds = ReadUInt32(stream);
|
||||
var span = TimeSpan.FromSeconds(seconds);
|
||||
return Epoch + span;
|
||||
}
|
||||
|
||||
protected static byte ReadUInt8(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
byte val = data[0];
|
||||
data = data.Slice(sizeof(byte));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static ushort ReadUInt16(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
ushort val = BinaryPrimitives.ReadUInt16BigEndian(data);
|
||||
data = data.Slice(sizeof(ushort));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static short ReadInt16(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
short val = BinaryPrimitives.ReadInt16BigEndian(data);
|
||||
data = data.Slice(sizeof(short));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static uint ReadUInt32(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
uint val = BinaryPrimitives.ReadUInt32BigEndian(data);
|
||||
data = data.Slice(sizeof(uint));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static int ReadInt32(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
int val = BinaryPrimitives.ReadInt32BigEndian(data);
|
||||
data = data.Slice(sizeof(int));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static ulong ReadUInt64(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
ulong val = BinaryPrimitives.ReadUInt64BigEndian(data);
|
||||
data = data.Slice(sizeof(ulong));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static long ReadInt64(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
long val = BinaryPrimitives.ReadInt64BigEndian(data);
|
||||
data = data.Slice(sizeof(long));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static string ReadString(ref ReadOnlySpan<byte> data, bool truncate)
|
||||
{
|
||||
int length = ReadUInt16(ref data);
|
||||
if (truncate)
|
||||
{
|
||||
length = Math.Min(length * 2, StringSize);
|
||||
data.Slice(0, length).CopyTo(_buffer);
|
||||
data = data.Slice(length);
|
||||
return Encoding.BigEndianUnicode.GetString(_buffer, 0, length);
|
||||
}
|
||||
else
|
||||
{
|
||||
data.Slice(0, StringSize).CopyTo(_buffer);
|
||||
data = data.Slice(StringSize);
|
||||
return Encoding.BigEndianUnicode.GetString(_buffer, 0, Math.Min(length * 2, StringSize));
|
||||
}
|
||||
}
|
||||
|
||||
protected static DateTime ReadDate(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
uint seconds = ReadUInt32(ref data);
|
||||
var span = TimeSpan.FromSeconds(seconds);
|
||||
return Epoch + span;
|
||||
}
|
||||
|
||||
protected static string ReadOSType(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
data.Slice(0, OSTypeSize).CopyTo(_buffer);
|
||||
data = data.Slice(OSTypeSize);
|
||||
return Encoding.ASCII.GetString(_buffer, 0, OSTypeSize).NullTerminate();
|
||||
}
|
||||
|
||||
protected static HFSPoint ReadPoint(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
return new HFSPoint()
|
||||
{
|
||||
V = ReadInt16(ref data),
|
||||
H = ReadInt16(ref data)
|
||||
};
|
||||
}
|
||||
|
||||
protected static HFSRect ReadRect(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
return new HFSRect()
|
||||
{
|
||||
Top = ReadInt16(ref data),
|
||||
Left = ReadInt16(ref data),
|
||||
Bottom = ReadInt16(ref data),
|
||||
Right = ReadInt16(ref data)
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal enum HFSTreeType : byte
|
||||
{
|
||||
HFS = 0, // control file
|
||||
User = 128, // user btree type starts from 128
|
||||
Reserved = 255
|
||||
}
|
||||
|
||||
internal enum HFSKeyCompareType : byte
|
||||
{
|
||||
CaseFolding = 0xCF, // case-insensitive
|
||||
BinaryCompare = 0xBC // case-sensitive
|
||||
}
|
||||
|
||||
[Flags]
|
||||
internal enum HFSTreeAttributes : uint
|
||||
{
|
||||
None = 0x00000000,
|
||||
BadClose = 0x00000001,
|
||||
BigKeys = 0x00000002,
|
||||
VariableIndexKeys = 0x00000004
|
||||
}
|
||||
|
||||
internal sealed class HFSTreeHeaderRecord : HFSStructBase
|
||||
{
|
||||
public ushort TreeDepth;
|
||||
public uint RootNode;
|
||||
public uint LeafRecords;
|
||||
public uint FirstLeafNode;
|
||||
public uint LastLeafNode;
|
||||
public ushort NodeSize;
|
||||
public ushort MaxKeyLength;
|
||||
public uint TotalNodes;
|
||||
public uint FreeNodes;
|
||||
public uint ClumpSize;
|
||||
public HFSTreeType TreeType;
|
||||
public HFSKeyCompareType KeyCompareType;
|
||||
public HFSTreeAttributes Attributes;
|
||||
|
||||
private HFSTreeHeaderRecord(
|
||||
ushort treeDepth,
|
||||
uint rootNode,
|
||||
uint leafRecords,
|
||||
uint firstLeafNode,
|
||||
uint lastLeafNode,
|
||||
ushort nodeSize,
|
||||
ushort maxKeyLength,
|
||||
uint totalNodes,
|
||||
uint freeNodes,
|
||||
uint clumpSize,
|
||||
HFSTreeType treeType,
|
||||
HFSKeyCompareType keyCompareType,
|
||||
HFSTreeAttributes attributes)
|
||||
{
|
||||
TreeDepth = treeDepth;
|
||||
RootNode = rootNode;
|
||||
LeafRecords = leafRecords;
|
||||
FirstLeafNode = firstLeafNode;
|
||||
LastLeafNode = lastLeafNode;
|
||||
NodeSize = nodeSize;
|
||||
MaxKeyLength = maxKeyLength;
|
||||
TotalNodes = totalNodes;
|
||||
FreeNodes = freeNodes;
|
||||
ClumpSize = clumpSize;
|
||||
TreeType = treeType;
|
||||
KeyCompareType = keyCompareType;
|
||||
Attributes = attributes;
|
||||
}
|
||||
|
||||
public static HFSTreeHeaderRecord Read(Stream stream)
|
||||
{
|
||||
ushort treeDepth = ReadUInt16(stream);
|
||||
uint rootNode = ReadUInt32(stream);
|
||||
uint leafRecords = ReadUInt32(stream);
|
||||
uint firstLeafNode = ReadUInt32(stream);
|
||||
uint lastLeafNode = ReadUInt32(stream);
|
||||
ushort nodeSize = ReadUInt16(stream);
|
||||
ushort maxKeyLength = ReadUInt16(stream);
|
||||
uint totalNodes = ReadUInt32(stream);
|
||||
uint freeNodes = ReadUInt32(stream);
|
||||
_ = ReadUInt16(stream); // reserved
|
||||
uint clumpSize = ReadUInt32(stream);
|
||||
var treeType = (HFSTreeType)ReadUInt8(stream);
|
||||
var keyCompareType = (HFSKeyCompareType)ReadUInt8(stream);
|
||||
var attributes = (HFSTreeAttributes)ReadUInt32(stream);
|
||||
for (int i = 0; i < 16; i++) _ = ReadUInt32(stream); // reserved
|
||||
|
||||
return new HFSTreeHeaderRecord(
|
||||
treeDepth,
|
||||
rootNode,
|
||||
leafRecords,
|
||||
firstLeafNode,
|
||||
lastLeafNode,
|
||||
nodeSize,
|
||||
maxKeyLength,
|
||||
totalNodes,
|
||||
freeNodes,
|
||||
clumpSize,
|
||||
treeType,
|
||||
keyCompareType,
|
||||
attributes);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,167 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal abstract class HFSTreeNode : HFSStructBase
|
||||
{
|
||||
private static byte[]? _buffer = null;
|
||||
|
||||
public HFSTreeNodeDescriptor Descriptor { get; }
|
||||
|
||||
protected HFSTreeNode(HFSTreeNodeDescriptor descriptor)
|
||||
=> Descriptor = descriptor;
|
||||
|
||||
public static bool TryRead(Stream stream, HFSTreeHeaderRecord headerRecord, bool isHFSX, out HFSTreeNode? node)
|
||||
{
|
||||
node = null;
|
||||
|
||||
if (!HFSTreeNodeDescriptor.TryRead(stream, out var descriptor)) return false;
|
||||
|
||||
int size = (int)headerRecord.NodeSize - HFSTreeNodeDescriptor.Size;
|
||||
if ((_buffer is null) || (_buffer.Length < size))
|
||||
_buffer = new byte[size * 2];
|
||||
|
||||
if (stream.Read(_buffer, 0, size) != size)
|
||||
throw new EndOfStreamException();
|
||||
ReadOnlySpan<byte> data = _buffer.AsSpan(0, size);
|
||||
|
||||
switch (descriptor!.Kind)
|
||||
{
|
||||
case HFSTreeNodeKind.Leaf:
|
||||
node = HFSLeafTreeNode.Read(descriptor, data, headerRecord, isHFSX);
|
||||
return true;
|
||||
|
||||
case HFSTreeNodeKind.Index:
|
||||
node = HFSIndexTreeNode.Read(descriptor, data, headerRecord, isHFSX);
|
||||
return true;
|
||||
|
||||
case HFSTreeNodeKind.Map:
|
||||
node = HFSMapTreeNode.Read(descriptor, data);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSHeaderTreeNode : HFSTreeNode
|
||||
{
|
||||
private const int UserDataSize = 128;
|
||||
|
||||
public HFSTreeHeaderRecord HeaderRecord { get; }
|
||||
|
||||
public IReadOnlyList<byte> UserData { get; }
|
||||
|
||||
public IReadOnlyList<byte> Map { get; }
|
||||
|
||||
private HFSHeaderTreeNode(
|
||||
HFSTreeNodeDescriptor descriptor,
|
||||
HFSTreeHeaderRecord headerRecord,
|
||||
IReadOnlyList<byte> userData,
|
||||
IReadOnlyList<byte> map)
|
||||
: base(descriptor)
|
||||
{
|
||||
HeaderRecord = headerRecord;
|
||||
UserData = userData;
|
||||
Map = map;
|
||||
}
|
||||
|
||||
public static HFSHeaderTreeNode Read(HFSTreeNodeDescriptor descriptor, Stream stream)
|
||||
{
|
||||
if (descriptor.Kind != HFSTreeNodeKind.Header)
|
||||
throw new ArgumentException("Descriptor does not define a header node");
|
||||
|
||||
var headerRecord = HFSTreeHeaderRecord.Read(stream);
|
||||
var userData = new byte[UserDataSize];
|
||||
if (stream.Read(userData, 0, UserDataSize) != UserDataSize)
|
||||
throw new EndOfStreamException();
|
||||
|
||||
int mapSize = (int)(headerRecord.NodeSize - 256);
|
||||
var map = new byte[mapSize];
|
||||
if (stream.Read(map, 0, mapSize) != mapSize)
|
||||
throw new EndOfStreamException();
|
||||
|
||||
// offset values (not required for header node)
|
||||
_ = ReadUInt16(stream);
|
||||
_ = ReadUInt16(stream);
|
||||
_ = ReadUInt16(stream);
|
||||
_ = ReadUInt16(stream);
|
||||
|
||||
return new HFSHeaderTreeNode(descriptor, headerRecord, userData, map);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSMapTreeNode : HFSTreeNode
|
||||
{
|
||||
public IReadOnlyList<byte> Map { get; }
|
||||
|
||||
private HFSMapTreeNode(HFSTreeNodeDescriptor descriptor, IReadOnlyList<byte> map)
|
||||
: base(descriptor)
|
||||
{
|
||||
Map = map;
|
||||
}
|
||||
|
||||
public static HFSMapTreeNode Read(HFSTreeNodeDescriptor descriptor, ReadOnlySpan<byte> data)
|
||||
{
|
||||
int mapSize = data.Length - 6;
|
||||
var map = new byte[mapSize];
|
||||
data.Slice(0, mapSize).CopyTo(map);
|
||||
|
||||
return new HFSMapTreeNode(descriptor, map);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSIndexTreeNode : HFSTreeNode
|
||||
{
|
||||
public IReadOnlyList<HFSPointerRecord> Records { get; }
|
||||
|
||||
private HFSIndexTreeNode(HFSTreeNodeDescriptor descriptor, IReadOnlyList<HFSPointerRecord> records)
|
||||
: base(descriptor)
|
||||
{
|
||||
Records = records;
|
||||
}
|
||||
|
||||
public static HFSIndexTreeNode Read(HFSTreeNodeDescriptor descriptor, ReadOnlySpan<byte> data, HFSTreeHeaderRecord headerRecord, bool isHFSX)
|
||||
{
|
||||
int recordCount = descriptor.NumRecords;
|
||||
var records = new HFSPointerRecord[recordCount];
|
||||
for (int i = 0; i < recordCount; i++)
|
||||
records[i] = HFSPointerRecord.Read(ref data, headerRecord, isHFSX);
|
||||
return new HFSIndexTreeNode(descriptor, records);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSLeafTreeNode : HFSTreeNode
|
||||
{
|
||||
public IReadOnlyList<HFSDataRecord> Records { get; }
|
||||
|
||||
private HFSLeafTreeNode(HFSTreeNodeDescriptor descriptor, IReadOnlyList<HFSDataRecord> records)
|
||||
: base(descriptor)
|
||||
{
|
||||
Records = records;
|
||||
}
|
||||
|
||||
public static HFSLeafTreeNode Read(HFSTreeNodeDescriptor descriptor, ReadOnlySpan<byte> data, HFSTreeHeaderRecord headerRecord, bool isHFSX)
|
||||
{
|
||||
int recordCount = descriptor.NumRecords;
|
||||
var recordOffsets = new int[recordCount + 1];
|
||||
for (int i = 0; i < recordOffsets.Length; i++)
|
||||
{
|
||||
var offsetData = data.Slice(data.Length - (2 * i) - 2);
|
||||
ushort offset = ReadUInt16(ref offsetData);
|
||||
recordOffsets[i] = offset;
|
||||
}
|
||||
|
||||
var records = new HFSDataRecord[recordCount];
|
||||
for (int i = 0; i < recordCount; i++)
|
||||
{
|
||||
int size = recordOffsets[i + 1] - recordOffsets[i];
|
||||
records[i] = HFSDataRecord.Read(ref data, size, headerRecord, isHFSX);
|
||||
}
|
||||
|
||||
return new HFSLeafTreeNode(descriptor, records);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal enum HFSTreeNodeKind : sbyte
|
||||
{
|
||||
Leaf = -1,
|
||||
Index = 0,
|
||||
Header = 1,
|
||||
Map = 2
|
||||
}
|
||||
|
||||
internal sealed class HFSTreeNodeDescriptor : HFSStructBase
|
||||
{
|
||||
public const int Size = 14;
|
||||
|
||||
public uint FLink { get; }
|
||||
public uint BLink { get; }
|
||||
public HFSTreeNodeKind Kind { get; }
|
||||
public byte Height { get; }
|
||||
public ushort NumRecords { get; }
|
||||
|
||||
private HFSTreeNodeDescriptor(uint fLink, uint bLink, HFSTreeNodeKind kind, byte height, ushort numRecords)
|
||||
{
|
||||
FLink = fLink;
|
||||
BLink = bLink;
|
||||
Kind = kind;
|
||||
Height = height;
|
||||
NumRecords = numRecords;
|
||||
}
|
||||
|
||||
public static bool TryRead(Stream stream, out HFSTreeNodeDescriptor? descriptor)
|
||||
{
|
||||
descriptor = null;
|
||||
|
||||
uint fLink = ReadUInt32(stream);
|
||||
uint bLink = ReadUInt32(stream);
|
||||
|
||||
sbyte rawKind = (sbyte)ReadUInt8(stream);
|
||||
if (!Enum.IsDefined(typeof(HFSTreeNodeKind), rawKind)) return false;
|
||||
var kind = (HFSTreeNodeKind)rawKind;
|
||||
|
||||
byte height = ReadUInt8(stream);
|
||||
if (((kind == HFSTreeNodeKind.Header) || (kind == HFSTreeNodeKind.Map)) && (height != 0)) return false;
|
||||
if ((kind == HFSTreeNodeKind.Leaf) && (height != 1)) return false;
|
||||
|
||||
ushort numRecords = ReadUInt16(stream);
|
||||
_ = ReadUInt16(stream); // reserved
|
||||
|
||||
descriptor = new HFSTreeNodeDescriptor(fLink, bLink, kind, height, numRecords);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
using SharpCompress.Archives.Dmg;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal static class HFSUtil
|
||||
{
|
||||
private const string CorruptHFSMessage = "Corrupt HFS volume";
|
||||
|
||||
private static (HFSHeaderTreeNode, IReadOnlyList<HFSTreeNode>) ReadTree(Stream stream, bool isHFSX)
|
||||
{
|
||||
if (!HFSTreeNodeDescriptor.TryRead(stream, out var headerDesc))
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
var header = HFSHeaderTreeNode.Read(headerDesc!, stream);
|
||||
|
||||
var nodes = new HFSTreeNode[header.HeaderRecord.TotalNodes];
|
||||
nodes[0] = header;
|
||||
|
||||
for (int i = 1; i < nodes.Length; i++)
|
||||
{
|
||||
if (!HFSTreeNode.TryRead(stream, header.HeaderRecord, isHFSX, out var node))
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
|
||||
nodes[i] = node!;
|
||||
}
|
||||
|
||||
return (header, nodes);
|
||||
}
|
||||
|
||||
private static void EnumerateExtentsTree(
|
||||
IReadOnlyList<HFSTreeNode> extentsTree,
|
||||
IDictionary<HFSExtentKey, HFSExtentRecord> records,
|
||||
int parentIndex)
|
||||
{
|
||||
var parent = extentsTree[parentIndex];
|
||||
if (parent is HFSLeafTreeNode leafNode)
|
||||
{
|
||||
foreach (var record in leafNode.Records)
|
||||
{
|
||||
ReadOnlySpan<byte> data = record.Data.AsSpan();
|
||||
var recordData = HFSExtentRecord.Read(ref data);
|
||||
var key = record.GetExtentKey();
|
||||
records.Add(key, recordData);
|
||||
}
|
||||
}
|
||||
else if (parent is HFSIndexTreeNode indexNode)
|
||||
{
|
||||
foreach (var record in indexNode.Records)
|
||||
EnumerateExtentsTree(extentsTree, records, (int)record.NodeNumber);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
}
|
||||
}
|
||||
|
||||
private static IReadOnlyDictionary<HFSExtentKey, HFSExtentRecord> LoadExtents(IReadOnlyList<HFSTreeNode> extentsTree, int rootIndex)
|
||||
{
|
||||
var records = new Dictionary<HFSExtentKey, HFSExtentRecord>();
|
||||
if (rootIndex == 0) return records;
|
||||
|
||||
EnumerateExtentsTree(extentsTree, records, rootIndex);
|
||||
return records;
|
||||
}
|
||||
|
||||
private static void EnumerateCatalogTree(
|
||||
HFSHeaderTreeNode catalogHeader,
|
||||
IReadOnlyList<HFSTreeNode> catalogTree,
|
||||
IDictionary<HFSCatalogKey, HFSCatalogRecord> records,
|
||||
IDictionary<uint, HFSCatalogThread> threads,
|
||||
int parentIndex,
|
||||
bool isHFSX)
|
||||
{
|
||||
var parent = catalogTree[parentIndex];
|
||||
if (parent is HFSLeafTreeNode leafNode)
|
||||
{
|
||||
foreach (var record in leafNode.Records)
|
||||
{
|
||||
ReadOnlySpan<byte> data = record.Data.AsSpan();
|
||||
if (HFSCatalogRecord.TryRead(ref data, catalogHeader.HeaderRecord.KeyCompareType, isHFSX, out var recordData))
|
||||
{
|
||||
var key = record.GetCatalogKey();
|
||||
if ((recordData!.Type == HFSCatalogRecordType.FileThread) || (recordData!.Type == HFSCatalogRecordType.FolderThread))
|
||||
{
|
||||
threads.Add(key.ParentId, (HFSCatalogThread)recordData);
|
||||
}
|
||||
else
|
||||
{
|
||||
records.Add(key, recordData);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (parent is HFSIndexTreeNode indexNode)
|
||||
{
|
||||
foreach (var record in indexNode.Records)
|
||||
EnumerateCatalogTree(catalogHeader, catalogTree, records, threads, (int)record.NodeNumber, isHFSX);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
}
|
||||
}
|
||||
|
||||
private static (HFSCatalogKey, HFSCatalogRecord) GetRecord(uint id, IDictionary<HFSCatalogKey, HFSCatalogRecord> records, IDictionary<uint, HFSCatalogThread> threads)
|
||||
{
|
||||
if (threads.TryGetValue(id, out var thread))
|
||||
{
|
||||
if (records.TryGetValue(thread.CatalogKey, out var record))
|
||||
return (thread.CatalogKey, record!);
|
||||
}
|
||||
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
}
|
||||
|
||||
private static string SanitizePath(string path)
|
||||
{
|
||||
var sb = new StringBuilder(path.Length);
|
||||
foreach (char c in path)
|
||||
{
|
||||
if (!char.IsControl(c))
|
||||
sb.Append(c);
|
||||
}
|
||||
return sb.ToString();
|
||||
}
|
||||
|
||||
private static string GetPath(HFSCatalogKey key, IDictionary<HFSCatalogKey, HFSCatalogRecord> records, IDictionary<uint, HFSCatalogThread> threads)
|
||||
{
|
||||
if (key.ParentId == 1)
|
||||
{
|
||||
return key.Name;
|
||||
}
|
||||
else
|
||||
{
|
||||
var (parentKey, _) = GetRecord(key.ParentId, records, threads);
|
||||
var path = Path.Combine(GetPath(parentKey, records, threads), key.Name);
|
||||
return SanitizePath(path);
|
||||
}
|
||||
}
|
||||
|
||||
private static IEnumerable<DmgArchiveEntry> LoadEntriesFromCatalogTree(
|
||||
Stream partitionStream,
|
||||
DmgFilePart filePart,
|
||||
HFSVolumeHeader volumeHeader,
|
||||
HFSHeaderTreeNode catalogHeader,
|
||||
IReadOnlyList<HFSTreeNode> catalogTree,
|
||||
IReadOnlyDictionary<HFSExtentKey, HFSExtentRecord> extents,
|
||||
DmgArchive archive,
|
||||
int rootIndex)
|
||||
{
|
||||
if (rootIndex == 0) return Array.Empty<DmgArchiveEntry>();
|
||||
|
||||
var records = new Dictionary<HFSCatalogKey, HFSCatalogRecord>();
|
||||
var threads = new Dictionary<uint, HFSCatalogThread>();
|
||||
EnumerateCatalogTree(catalogHeader, catalogTree, records, threads, rootIndex, volumeHeader.IsHFSX);
|
||||
|
||||
var entries = new List<DmgArchiveEntry>();
|
||||
foreach (var kvp in records)
|
||||
{
|
||||
var key = kvp.Key;
|
||||
var record = kvp.Value;
|
||||
|
||||
string path = GetPath(key, records, threads);
|
||||
var stream = (record is HFSCatalogFile file) ? new HFSForkStream(partitionStream, volumeHeader, file.DataFork, file.FileId, extents) : null;
|
||||
var entry = new DmgArchiveEntry(stream, archive, record, path, filePart);
|
||||
entries.Add(entry);
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
public static IEnumerable<DmgArchiveEntry> LoadEntriesFromPartition(Stream partitionStream, string fileName, DmgArchive archive)
|
||||
{
|
||||
if (!HFSVolumeHeader.TryRead(partitionStream, out var volumeHeader))
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
var filePart = new DmgFilePart(partitionStream, fileName);
|
||||
|
||||
var extentsFile = volumeHeader!.ExtentsFile;
|
||||
var extentsStream = new HFSForkStream(partitionStream, volumeHeader, extentsFile);
|
||||
var (extentsHeader, extentsTree) = ReadTree(extentsStream, volumeHeader.IsHFSX);
|
||||
|
||||
var extents = LoadExtents(extentsTree, (int)extentsHeader.HeaderRecord.RootNode);
|
||||
|
||||
var catalogFile = volumeHeader!.CatalogFile;
|
||||
var catalogStream = new HFSForkStream(partitionStream, volumeHeader, catalogFile);
|
||||
var (catalogHeader, catalogTree) = ReadTree(catalogStream, volumeHeader.IsHFSX);
|
||||
|
||||
return LoadEntriesFromCatalogTree(
|
||||
partitionStream,
|
||||
filePart,
|
||||
volumeHeader,
|
||||
catalogHeader,
|
||||
catalogTree,
|
||||
extents,
|
||||
archive,
|
||||
(int)catalogHeader.HeaderRecord.RootNode);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSVolumeHeader : HFSStructBase
|
||||
{
|
||||
private const ushort SignaturePlus = 0x482B;
|
||||
private const ushort SignatureX = 0x4858;
|
||||
private const int FinderInfoCount = 8;
|
||||
|
||||
public bool IsHFSX { get; }
|
||||
public ushort Version { get; }
|
||||
public uint Attributes { get; }
|
||||
public uint LastMountedVersion { get; }
|
||||
public uint JournalInfoBlock { get; }
|
||||
|
||||
public DateTime CreateDate { get; }
|
||||
public DateTime ModifyDate { get; }
|
||||
public DateTime BackupDate { get; }
|
||||
public DateTime CheckedDate { get; }
|
||||
|
||||
public uint FileCount { get; }
|
||||
public uint FolderCount { get; }
|
||||
|
||||
public uint BlockSize { get; }
|
||||
public uint TotalBlocks { get; }
|
||||
public uint FreeBlocks { get; }
|
||||
|
||||
public uint NextAllocation { get; }
|
||||
public uint RsrcClumpSize { get; }
|
||||
public uint DataClumpSize { get; }
|
||||
public uint NextCatalogID { get; }
|
||||
|
||||
public uint WriteCount { get; }
|
||||
public ulong EncodingsBitmap { get; }
|
||||
|
||||
public IReadOnlyList<uint> FinderInfo { get; }
|
||||
|
||||
public HFSForkData AllocationFile { get; }
|
||||
public HFSForkData ExtentsFile { get; }
|
||||
public HFSForkData CatalogFile { get; }
|
||||
public HFSForkData AttributesFile { get; }
|
||||
public HFSForkData StartupFile { get; }
|
||||
|
||||
public HFSVolumeHeader(
|
||||
bool isHFSX,
|
||||
ushort version,
|
||||
uint attributes,
|
||||
uint lastMountedVersion,
|
||||
uint journalInfoBlock,
|
||||
DateTime createDate,
|
||||
DateTime modifyDate,
|
||||
DateTime backupDate,
|
||||
DateTime checkedDate,
|
||||
uint fileCount,
|
||||
uint folderCount,
|
||||
uint blockSize,
|
||||
uint totalBlocks,
|
||||
uint freeBlocks,
|
||||
uint nextAllocation,
|
||||
uint rsrcClumpSize,
|
||||
uint dataClumpSize,
|
||||
uint nextCatalogID,
|
||||
uint writeCount,
|
||||
ulong encodingsBitmap,
|
||||
IReadOnlyList<uint> finderInfo,
|
||||
HFSForkData allocationFile,
|
||||
HFSForkData extentsFile,
|
||||
HFSForkData catalogFile,
|
||||
HFSForkData attributesFile,
|
||||
HFSForkData startupFile)
|
||||
{
|
||||
IsHFSX = isHFSX;
|
||||
Version = version;
|
||||
Attributes = attributes;
|
||||
LastMountedVersion = lastMountedVersion;
|
||||
JournalInfoBlock = journalInfoBlock;
|
||||
CreateDate = createDate;
|
||||
ModifyDate = modifyDate;
|
||||
BackupDate = backupDate;
|
||||
CheckedDate = checkedDate;
|
||||
FileCount = fileCount;
|
||||
FolderCount = folderCount;
|
||||
BlockSize = blockSize;
|
||||
TotalBlocks = totalBlocks;
|
||||
FreeBlocks = freeBlocks;
|
||||
NextAllocation = nextAllocation;
|
||||
RsrcClumpSize = rsrcClumpSize;
|
||||
DataClumpSize = dataClumpSize;
|
||||
NextCatalogID = nextCatalogID;
|
||||
WriteCount = writeCount;
|
||||
EncodingsBitmap = encodingsBitmap;
|
||||
FinderInfo = finderInfo;
|
||||
AllocationFile = allocationFile;
|
||||
ExtentsFile = extentsFile;
|
||||
CatalogFile = catalogFile;
|
||||
AttributesFile = attributesFile;
|
||||
StartupFile = startupFile;
|
||||
}
|
||||
|
||||
private static IReadOnlyList<uint> ReadFinderInfo(Stream stream)
|
||||
{
|
||||
var finderInfo = new uint[FinderInfoCount];
|
||||
for (int i = 0; i < FinderInfoCount; i++)
|
||||
finderInfo[i] = ReadUInt32(stream);
|
||||
return finderInfo;
|
||||
}
|
||||
|
||||
public static bool TryRead(Stream stream, out HFSVolumeHeader? header)
|
||||
{
|
||||
header = null;
|
||||
stream.Skip(1024); // reserved bytes
|
||||
|
||||
bool isHFSX;
|
||||
ushort sig = ReadUInt16(stream);
|
||||
if (sig == SignaturePlus) isHFSX = false;
|
||||
else if (sig == SignatureX) isHFSX = true;
|
||||
else return false;
|
||||
|
||||
ushort version = ReadUInt16(stream);
|
||||
uint attributes = ReadUInt32(stream);
|
||||
uint lastMountedVersion = ReadUInt32(stream);
|
||||
uint journalInfoBlock = ReadUInt32(stream);
|
||||
DateTime createDate = ReadDate(stream);
|
||||
DateTime modifyDate = ReadDate(stream);
|
||||
DateTime backupDate = ReadDate(stream);
|
||||
DateTime checkedDate = ReadDate(stream);
|
||||
uint fileCount = ReadUInt32(stream);
|
||||
uint folderCount = ReadUInt32(stream);
|
||||
uint blockSize = ReadUInt32(stream);
|
||||
uint totalBlocks = ReadUInt32(stream);
|
||||
uint freeBlocks = ReadUInt32(stream);
|
||||
uint nextAllocation = ReadUInt32(stream);
|
||||
uint rsrcClumpSize = ReadUInt32(stream);
|
||||
uint dataClumpSize = ReadUInt32(stream);
|
||||
uint nextCatalogID = ReadUInt32(stream);
|
||||
uint writeCount = ReadUInt32(stream);
|
||||
ulong encodingsBitmap = ReadUInt64(stream);
|
||||
IReadOnlyList<uint> finderInfo = ReadFinderInfo(stream);
|
||||
HFSForkData allocationFile = HFSForkData.Read(stream);
|
||||
HFSForkData extentsFile = HFSForkData.Read(stream);
|
||||
HFSForkData catalogFile = HFSForkData.Read(stream);
|
||||
HFSForkData attributesFile = HFSForkData.Read(stream);
|
||||
HFSForkData startupFile = HFSForkData.Read(stream);
|
||||
|
||||
header = new HFSVolumeHeader(
|
||||
isHFSX,
|
||||
version,
|
||||
attributes,
|
||||
lastMountedVersion,
|
||||
journalInfoBlock,
|
||||
createDate,
|
||||
modifyDate,
|
||||
backupDate,
|
||||
checkedDate,
|
||||
fileCount,
|
||||
folderCount,
|
||||
blockSize,
|
||||
totalBlocks,
|
||||
freeBlocks,
|
||||
nextAllocation,
|
||||
rsrcClumpSize,
|
||||
dataClumpSize,
|
||||
nextCatalogID,
|
||||
writeCount,
|
||||
encodingsBitmap,
|
||||
finderInfo,
|
||||
allocationFile,
|
||||
extentsFile,
|
||||
catalogFile,
|
||||
attributesFile,
|
||||
startupFile);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal enum BlkxChunkType : uint
|
||||
{
|
||||
Zero = 0x00000000u,
|
||||
Uncompressed = 0x00000001u,
|
||||
Ignore = 0x00000002u,
|
||||
AdcCompressed = 0x80000004u,
|
||||
ZlibCompressed = 0x80000005u,
|
||||
Bz2Compressed = 0x80000006u,
|
||||
Comment = 0x7FFFFFFEu,
|
||||
Last = 0xFFFFFFFFu,
|
||||
}
|
||||
|
||||
internal sealed class BlkxChunk : DmgStructBase
|
||||
{
|
||||
private const int SectorSize = 512;
|
||||
|
||||
public BlkxChunkType Type { get; } // Compression type used or chunk type
|
||||
public uint Comment { get; } // "+beg" or "+end", if EntryType is comment (0x7FFFFFFE). Else reserved.
|
||||
public ulong UncompressedOffset { get; } // Start sector of this chunk
|
||||
public ulong UncompressedLength { get; } // Number of sectors in this chunk
|
||||
public ulong CompressedOffset { get; } // Start of chunk in data fork
|
||||
public ulong CompressedLength { get; } // Count of bytes of chunk, in data fork
|
||||
|
||||
private BlkxChunk(BlkxChunkType type, uint comment, ulong sectorNumber, ulong sectorCount, ulong compressedOffset, ulong compressedLength)
|
||||
{
|
||||
Type = type;
|
||||
Comment = comment;
|
||||
UncompressedOffset = sectorNumber * SectorSize;
|
||||
UncompressedLength = sectorCount * SectorSize;
|
||||
CompressedOffset = compressedOffset;
|
||||
CompressedLength = compressedLength;
|
||||
}
|
||||
|
||||
public static bool TryRead(ref ReadOnlySpan<byte> data, out BlkxChunk? chunk)
|
||||
{
|
||||
chunk = null;
|
||||
|
||||
var type = (BlkxChunkType)ReadUInt32(ref data);
|
||||
if (!Enum.IsDefined(typeof(BlkxChunkType), type)) return false;
|
||||
|
||||
chunk = new BlkxChunk(type, ReadUInt32(ref data), ReadUInt64(ref data), ReadUInt64(ref data), ReadUInt64(ref data), ReadUInt64(ref data));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal sealed class BlkxTable : DmgStructBase
|
||||
{
|
||||
private const uint Signature = 0x6d697368u;
|
||||
|
||||
public uint Version { get; } // Current version is 1
|
||||
public ulong SectorNumber { get; } // Starting disk sector in this blkx descriptor
|
||||
public ulong SectorCount { get; } // Number of disk sectors in this blkx descriptor
|
||||
|
||||
public ulong DataOffset { get; }
|
||||
public uint BuffersNeeded { get; }
|
||||
public uint BlockDescriptors { get; } // Number of descriptors
|
||||
|
||||
public UdifChecksum Checksum { get; }
|
||||
|
||||
public IReadOnlyList<BlkxChunk> Chunks { get; }
|
||||
|
||||
private BlkxTable(
|
||||
uint version,
|
||||
ulong sectorNumber,
|
||||
ulong sectorCount,
|
||||
ulong dataOffset,
|
||||
uint buffersNeeded,
|
||||
uint blockDescriptors,
|
||||
UdifChecksum checksum,
|
||||
IReadOnlyList<BlkxChunk> chunks)
|
||||
{
|
||||
Version = version;
|
||||
SectorNumber = sectorNumber;
|
||||
SectorCount = sectorCount;
|
||||
DataOffset = dataOffset;
|
||||
BuffersNeeded = buffersNeeded;
|
||||
BlockDescriptors = blockDescriptors;
|
||||
Checksum = checksum;
|
||||
Chunks = chunks;
|
||||
}
|
||||
|
||||
public static bool TryRead(in byte[] buffer, out BlkxTable? header)
|
||||
{
|
||||
header = null;
|
||||
|
||||
ReadOnlySpan<byte> data = buffer.AsSpan();
|
||||
|
||||
uint sig = ReadUInt32(ref data);
|
||||
if (sig != Signature) return false;
|
||||
|
||||
uint version = ReadUInt32(ref data);
|
||||
ulong sectorNumber = ReadUInt64(ref data);
|
||||
ulong sectorCount = ReadUInt64(ref data);
|
||||
|
||||
ulong dataOffset = ReadUInt64(ref data);
|
||||
uint buffersNeeded = ReadUInt32(ref data);
|
||||
uint blockDescriptors = ReadUInt32(ref data);
|
||||
|
||||
data = data.Slice(6 * sizeof(uint)); // reserved
|
||||
|
||||
var checksum = UdifChecksum.Read(ref data);
|
||||
|
||||
uint chunkCount = ReadUInt32(ref data);
|
||||
var chunks = new BlkxChunk[chunkCount];
|
||||
for (int i = 0; i < chunkCount; i++)
|
||||
{
|
||||
if (!BlkxChunk.TryRead(ref data, out var chunk)) return false;
|
||||
chunks[i] = chunk!;
|
||||
}
|
||||
|
||||
header = new BlkxTable(version, sectorNumber, sectorCount, dataOffset, buffersNeeded, blockDescriptors, checksum, chunks);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal sealed class DmgHeader : DmgStructBase
|
||||
{
|
||||
public const int HeaderSize = 512;
|
||||
private const uint Signature = 0x6B6F6C79u;
|
||||
private const int UuidSize = 16; // 128 bit
|
||||
|
||||
public uint Version { get; } // Current version is 4
|
||||
public uint Flags { get; } // Flags
|
||||
public ulong RunningDataForkOffset { get; } //
|
||||
public ulong DataForkOffset { get; } // Data fork offset (usually 0, beginning of file)
|
||||
public ulong DataForkLength { get; } // Size of data fork (usually up to the XMLOffset, below)
|
||||
public ulong RsrcForkOffset { get; } // Resource fork offset, if any
|
||||
public ulong RsrcForkLength { get; } // Resource fork length, if any
|
||||
public uint SegmentNumber { get; } // Usually 1, may be 0
|
||||
public uint SegmentCount { get; } // Usually 1, may be 0
|
||||
public IReadOnlyList<byte> SegmentID { get; } // 128-bit GUID identifier of segment (if SegmentNumber !=0)
|
||||
|
||||
public UdifChecksum DataChecksum { get; }
|
||||
|
||||
public ulong XMLOffset { get; } // Offset of property list in DMG, from beginning
|
||||
public ulong XMLLength { get; } // Length of property list
|
||||
|
||||
public UdifChecksum Checksum { get; }
|
||||
|
||||
public uint ImageVariant { get; } // Commonly 1
|
||||
public ulong SectorCount { get; } // Size of DMG when expanded, in sectors
|
||||
|
||||
private DmgHeader(
|
||||
uint version,
|
||||
uint flags,
|
||||
ulong runningDataForkOffset,
|
||||
ulong dataForkOffset,
|
||||
ulong dataForkLength,
|
||||
ulong rsrcForkOffset,
|
||||
ulong rsrcForkLength,
|
||||
uint segmentNumber,
|
||||
uint segmentCount,
|
||||
IReadOnlyList<byte> segmentID,
|
||||
UdifChecksum dataChecksum,
|
||||
ulong xMLOffset,
|
||||
ulong xMLLength,
|
||||
UdifChecksum checksum,
|
||||
uint imageVariant,
|
||||
ulong sectorCount)
|
||||
{
|
||||
Version = version;
|
||||
Flags = flags;
|
||||
RunningDataForkOffset = runningDataForkOffset;
|
||||
DataForkOffset = dataForkOffset;
|
||||
DataForkLength = dataForkLength;
|
||||
RsrcForkOffset = rsrcForkOffset;
|
||||
RsrcForkLength = rsrcForkLength;
|
||||
SegmentNumber = segmentNumber;
|
||||
SegmentCount = segmentCount;
|
||||
SegmentID = segmentID;
|
||||
DataChecksum = dataChecksum;
|
||||
XMLOffset = xMLOffset;
|
||||
XMLLength = xMLLength;
|
||||
Checksum = checksum;
|
||||
ImageVariant = imageVariant;
|
||||
SectorCount = sectorCount;
|
||||
}
|
||||
|
||||
private static void ReadUuid(ref ReadOnlySpan<byte> data, byte[] buffer)
|
||||
{
|
||||
data.Slice(0, UuidSize).CopyTo(buffer);
|
||||
data = data.Slice(UuidSize);
|
||||
}
|
||||
|
||||
internal static bool TryRead(Stream input, out DmgHeader? header)
|
||||
{
|
||||
header = null;
|
||||
|
||||
var buffer = new byte[HeaderSize];
|
||||
int count = input.Read(buffer, 0, HeaderSize);
|
||||
if (count != HeaderSize) return false;
|
||||
ReadOnlySpan<byte> data = buffer.AsSpan();
|
||||
|
||||
uint sig = ReadUInt32(ref data);
|
||||
if (sig != Signature) return false;
|
||||
|
||||
uint version = ReadUInt32(ref data);
|
||||
|
||||
uint size = ReadUInt32(ref data);
|
||||
if (size != (uint)HeaderSize) return false;
|
||||
|
||||
uint flags = ReadUInt32(ref data);
|
||||
ulong runningDataForkOffset = ReadUInt64(ref data);
|
||||
ulong dataForkOffset = ReadUInt64(ref data);
|
||||
ulong dataForkLength = ReadUInt64(ref data);
|
||||
ulong rsrcForkOffset = ReadUInt64(ref data);
|
||||
ulong rsrcForkLength = ReadUInt64(ref data);
|
||||
uint segmentNumber = ReadUInt32(ref data);
|
||||
uint segmentCount = ReadUInt32(ref data);
|
||||
|
||||
var segmentID = new byte[UuidSize];
|
||||
ReadUuid(ref data, segmentID);
|
||||
|
||||
var dataChecksum = UdifChecksum.Read(ref data);
|
||||
|
||||
ulong xmlOffset = ReadUInt64(ref data);
|
||||
ulong xmlLength = ReadUInt64(ref data);
|
||||
|
||||
data = data.Slice(120); // Reserved bytes
|
||||
|
||||
var checksum = UdifChecksum.Read(ref data);
|
||||
|
||||
uint imageVariant = ReadUInt32(ref data);
|
||||
ulong sectorCount = ReadUInt64(ref data);
|
||||
|
||||
header = new DmgHeader(
|
||||
version,
|
||||
flags,
|
||||
runningDataForkOffset,
|
||||
dataForkOffset,
|
||||
dataForkLength,
|
||||
rsrcForkOffset,
|
||||
rsrcForkLength,
|
||||
segmentNumber,
|
||||
segmentCount,
|
||||
segmentID,
|
||||
dataChecksum,
|
||||
xmlOffset,
|
||||
xmlLength,
|
||||
checksum,
|
||||
imageVariant,
|
||||
sectorCount);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal abstract class DmgStructBase
|
||||
{
|
||||
protected static uint ReadUInt32(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
uint val = BinaryPrimitives.ReadUInt32BigEndian(data);
|
||||
data = data.Slice(sizeof(uint));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static ulong ReadUInt64(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
ulong val = BinaryPrimitives.ReadUInt64BigEndian(data);
|
||||
data = data.Slice(sizeof(ulong));
|
||||
return val;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal sealed class GptHeader : GptStructBase
|
||||
{
|
||||
private const int HeaderSize = 92;
|
||||
private static readonly ulong Signature = BinaryPrimitives.ReadUInt64LittleEndian(new byte[] { 69, 70, 73, 32, 80, 65, 82, 84 });
|
||||
|
||||
public uint Revision { get; }
|
||||
public uint Crc32Header { get; }
|
||||
public ulong CurrentLba { get; }
|
||||
public ulong BackupLba { get; }
|
||||
public ulong FirstUsableLba { get; }
|
||||
public ulong LastUsableLba { get; }
|
||||
public Guid DiskGuid { get; }
|
||||
public ulong EntriesStart { get; }
|
||||
public uint EntriesCount { get; }
|
||||
public uint EntriesSize { get; }
|
||||
public uint Crc32Array { get; }
|
||||
|
||||
private GptHeader(
|
||||
uint revision,
|
||||
uint crc32Header,
|
||||
ulong currentLba,
|
||||
ulong backupLba,
|
||||
ulong firstUsableLba,
|
||||
ulong lastUsableLba,
|
||||
Guid diskGuid,
|
||||
ulong entriesStart,
|
||||
uint entriesCount,
|
||||
uint entriesSize,
|
||||
uint crc32Array)
|
||||
{
|
||||
Revision = revision;
|
||||
Crc32Header = crc32Header;
|
||||
CurrentLba = currentLba;
|
||||
BackupLba = backupLba;
|
||||
FirstUsableLba = firstUsableLba;
|
||||
LastUsableLba = lastUsableLba;
|
||||
DiskGuid = diskGuid;
|
||||
EntriesStart = entriesStart;
|
||||
EntriesCount = entriesCount;
|
||||
EntriesSize = entriesSize;
|
||||
Crc32Array = crc32Array;
|
||||
}
|
||||
|
||||
public static bool TryRead(Stream stream, out GptHeader? header)
|
||||
{
|
||||
header = null;
|
||||
|
||||
ulong sig = ReadUInt64(stream);
|
||||
if (sig != Signature) return false;
|
||||
|
||||
uint revision = ReadUInt32(stream);
|
||||
|
||||
uint headerSize = ReadUInt32(stream);
|
||||
if (headerSize != HeaderSize) return false;
|
||||
|
||||
uint crc32Header = ReadUInt32(stream);
|
||||
_ = ReadUInt32(stream); // reserved
|
||||
ulong currentLba = ReadUInt64(stream);
|
||||
ulong backupLba = ReadUInt64(stream);
|
||||
ulong firstUsableLba = ReadUInt64(stream);
|
||||
ulong lastUsableLba = ReadUInt64(stream);
|
||||
Guid diskGuid = ReadGuid(stream);
|
||||
ulong entriesStart = ReadUInt64(stream);
|
||||
uint entriesCount = ReadUInt32(stream);
|
||||
uint entriesSize = ReadUInt32(stream);
|
||||
uint crc32Array = ReadUInt32(stream);
|
||||
|
||||
header = new GptHeader(
|
||||
revision,
|
||||
crc32Header,
|
||||
currentLba,
|
||||
backupLba,
|
||||
firstUsableLba,
|
||||
lastUsableLba,
|
||||
diskGuid,
|
||||
entriesStart,
|
||||
entriesCount,
|
||||
entriesSize,
|
||||
crc32Array);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal sealed class GptPartitionEntry : GptStructBase
|
||||
{
|
||||
public Guid TypeGuid { get; }
|
||||
public Guid Guid { get; }
|
||||
public ulong FirstLba { get; }
|
||||
public ulong LastLba { get; }
|
||||
public ulong Attributes { get; }
|
||||
public string Name { get; }
|
||||
|
||||
private GptPartitionEntry(Guid typeGuid, Guid guid, ulong firstLba, ulong lastLba, ulong attributes, string name)
|
||||
{
|
||||
TypeGuid = typeGuid;
|
||||
Guid = guid;
|
||||
FirstLba = firstLba;
|
||||
LastLba = lastLba;
|
||||
Attributes = attributes;
|
||||
Name = name;
|
||||
}
|
||||
|
||||
public static GptPartitionEntry Read(Stream stream)
|
||||
{
|
||||
return new GptPartitionEntry(
|
||||
ReadGuid(stream),
|
||||
ReadGuid(stream),
|
||||
ReadUInt64(stream),
|
||||
ReadUInt64(stream),
|
||||
ReadUInt64(stream),
|
||||
ReadString(stream, 72));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal abstract class GptStructBase
|
||||
{
|
||||
private static readonly byte[] _buffer = new byte[8];
|
||||
|
||||
protected static ushort ReadUInt16(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(ushort)) != sizeof(ushort))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt16LittleEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static uint ReadUInt32(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(uint)) != sizeof(uint))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt32LittleEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static ulong ReadUInt64(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(ulong)) != sizeof(ulong))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt64LittleEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static Guid ReadGuid(Stream stream)
|
||||
{
|
||||
int a = (int)ReadUInt32(stream);
|
||||
short b = (short)ReadUInt16(stream);
|
||||
short c = (short)ReadUInt16(stream);
|
||||
|
||||
if (stream.Read(_buffer, 0, 8) != 8)
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return new Guid(a, b, c, _buffer);
|
||||
}
|
||||
|
||||
protected static string ReadString(Stream stream, int byteSize)
|
||||
{
|
||||
var buffer = new byte[byteSize];
|
||||
if (stream.Read(buffer, 0, byteSize) != byteSize)
|
||||
throw new EndOfStreamException();
|
||||
return Encoding.Unicode.GetString(buffer).NullTerminate();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal sealed class UdifChecksum : DmgStructBase
|
||||
{
|
||||
private const int MaxSize = 32; // * 4 to get byte size
|
||||
|
||||
public uint Type { get; }
|
||||
public uint Size { get; } // in bits
|
||||
public IReadOnlyList<uint> Bits { get; }
|
||||
|
||||
private UdifChecksum(uint type, uint size, IReadOnlyList<uint> bits)
|
||||
{
|
||||
Type = type;
|
||||
Size = size;
|
||||
Bits = bits;
|
||||
}
|
||||
|
||||
public static UdifChecksum Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
uint type = ReadUInt32(ref data);
|
||||
uint size = ReadUInt32(ref data);
|
||||
|
||||
var bits = new uint[MaxSize];
|
||||
for (int i = 0; i < MaxSize; i++)
|
||||
bits[i] = ReadUInt32(ref data);
|
||||
|
||||
return new UdifChecksum(type, size, bits);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
internal static class PartitionFormat
|
||||
{
|
||||
public static readonly Guid AppleHFS = new Guid("48465300-0000-11AA-AA11-00306543ECAC");
|
||||
public static readonly Guid AppleUFS = new Guid("55465300-0000-11AA-AA11-00306543ECAC");
|
||||
public static readonly Guid AppleBoot = new Guid("426F6F74-0000-11AA-AA11-00306543ECAC");
|
||||
public static readonly Guid AppleRaid = new Guid("52414944-0000-11AA-AA11-00306543ECAC");
|
||||
public static readonly Guid AppleRaidOffline = new Guid("52414944-5F4F-11AA-AA11-00306543ECAC");
|
||||
public static readonly Guid AppleLabel = new Guid("4C616265-6C00-11AA-AA11-00306543ECAC");
|
||||
}
|
||||
}
|
||||
@@ -14,14 +14,25 @@ namespace SharpCompress.Common
|
||||
Action<string, ExtractionOptions?> write)
|
||||
{
|
||||
string destinationFileName;
|
||||
string file = Path.GetFileName(entry.Key);
|
||||
string fullDestinationDirectoryPath = Path.GetFullPath(destinationDirectory);
|
||||
|
||||
//check for trailing slash.
|
||||
if (fullDestinationDirectoryPath[fullDestinationDirectoryPath.Length - 1] != Path.DirectorySeparatorChar)
|
||||
{
|
||||
fullDestinationDirectoryPath += Path.DirectorySeparatorChar;
|
||||
}
|
||||
|
||||
if (!Directory.Exists(fullDestinationDirectoryPath))
|
||||
{
|
||||
throw new ExtractionException($"Directory does not exist to extract to: {fullDestinationDirectoryPath}");
|
||||
}
|
||||
|
||||
options ??= new ExtractionOptions()
|
||||
{
|
||||
Overwrite = true
|
||||
};
|
||||
|
||||
string file = Path.GetFileName(entry.Key);
|
||||
if (options.ExtractFullPath)
|
||||
{
|
||||
string folder = Path.GetDirectoryName(entry.Key)!;
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common
|
||||
namespace SharpCompress.Common
|
||||
{
|
||||
public class IncompleteArchiveException : ArchiveException
|
||||
{
|
||||
@@ -8,10 +6,5 @@ namespace SharpCompress.Common
|
||||
: base(message)
|
||||
{
|
||||
}
|
||||
|
||||
public IncompleteArchiveException(string message, Exception inner)
|
||||
: base(message, inner)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
50
src/SharpCompress/Common/ZStandard/ZStandardEntry.cs
Normal file
50
src/SharpCompress/Common/ZStandard/ZStandardEntry.cs
Normal file
@@ -0,0 +1,50 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.GZip;
|
||||
|
||||
namespace SharpCompress.Common.ZStandard
|
||||
{
|
||||
public class ZStandardEntry : Entry
|
||||
{
|
||||
private readonly ZStandardFilePart _filePart;
|
||||
|
||||
internal ZStandardEntry(ZStandardFilePart filePart)
|
||||
{
|
||||
_filePart = filePart;
|
||||
}
|
||||
|
||||
public override CompressionType CompressionType => CompressionType.GZip;
|
||||
|
||||
public override long Crc => _filePart.Crc ?? 0;
|
||||
|
||||
public override string Key => _filePart.FilePartName;
|
||||
|
||||
public override string? LinkTarget => null;
|
||||
|
||||
public override long CompressedSize => 0;
|
||||
|
||||
public override long Size => _filePart.UncompressedSize ?? 0;
|
||||
|
||||
public override DateTime? LastModifiedTime => _filePart.DateModified;
|
||||
|
||||
public override DateTime? CreatedTime => null;
|
||||
|
||||
public override DateTime? LastAccessedTime => null;
|
||||
|
||||
public override DateTime? ArchivedTime => null;
|
||||
|
||||
public override bool IsEncrypted => false;
|
||||
|
||||
public override bool IsDirectory => false;
|
||||
|
||||
public override bool IsSplitAfter => false;
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => _filePart.AsEnumerable<FilePart>();
|
||||
|
||||
internal static IEnumerable<GZipEntry> GetEntries(Stream stream, OptionsBase options)
|
||||
{
|
||||
yield return new GZipEntry(new GZipFilePart(stream, options.ArchiveEncoding));
|
||||
}
|
||||
}
|
||||
}
|
||||
37
src/SharpCompress/Common/ZStandard/ZStandardFilePart.cs
Normal file
37
src/SharpCompress/Common/ZStandard/ZStandardFilePart.cs
Normal file
@@ -0,0 +1,37 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using ZstdSharp;
|
||||
|
||||
namespace SharpCompress.Common.ZStandard
|
||||
{
|
||||
internal sealed class ZStandardFilePart : FilePart
|
||||
{
|
||||
private string _name = "";
|
||||
private readonly Stream _stream;
|
||||
|
||||
internal ZStandardFilePart(Stream stream, ArchiveEncoding archiveEncoding)
|
||||
: base(archiveEncoding)
|
||||
{
|
||||
_stream = stream;
|
||||
EntryStartPosition = stream.Position;
|
||||
}
|
||||
|
||||
internal long EntryStartPosition { get; }
|
||||
|
||||
internal DateTime? DateModified { get; private set; }
|
||||
internal int? Crc { get; private set; }
|
||||
internal int? UncompressedSize { get; private set; }
|
||||
|
||||
internal override string FilePartName => _name!;
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
return new DecompressionStream(_stream);
|
||||
}
|
||||
|
||||
internal override Stream GetRawStream()
|
||||
{
|
||||
return _stream;
|
||||
}
|
||||
}
|
||||
}
|
||||
23
src/SharpCompress/Common/ZStandard/ZStandardVolume.cs
Normal file
23
src/SharpCompress/Common/ZStandard/ZStandardVolume.cs
Normal file
@@ -0,0 +1,23 @@
|
||||
using System.IO;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Common.ZStandard
|
||||
{
|
||||
public class ZStandardVolume : Volume
|
||||
{
|
||||
public ZStandardVolume(Stream stream, ReaderOptions options)
|
||||
: base(stream, options)
|
||||
{
|
||||
}
|
||||
|
||||
public ZStandardVolume(FileInfo fileInfo, ReaderOptions options)
|
||||
: base(fileInfo.OpenRead(), options)
|
||||
{
|
||||
options.LeaveStreamOpen = false;
|
||||
}
|
||||
|
||||
public override bool IsFirstVolume => true;
|
||||
|
||||
public override bool IsMultiVolume => true;
|
||||
}
|
||||
}
|
||||
@@ -36,7 +36,7 @@ namespace SharpCompress.Common.Zip
|
||||
// ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR should be before the EOCD
|
||||
stream.Seek(eocd_location - ZIP64_EOCD_LENGTH - 4, SeekOrigin.Begin);
|
||||
uint zip64_locator = reader.ReadUInt32();
|
||||
if( zip64_locator != ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR )
|
||||
if (zip64_locator != ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR)
|
||||
{
|
||||
throw new ArchiveException("Failed to locate the Zip64 Directory Locator");
|
||||
}
|
||||
@@ -86,11 +86,11 @@ namespace SharpCompress.Common.Zip
|
||||
}
|
||||
}
|
||||
|
||||
private static bool IsMatch( byte[] haystack, int position, byte[] needle)
|
||||
private static bool IsMatch(byte[] haystack, int position, byte[] needle)
|
||||
{
|
||||
for( int i = 0; i < needle.Length; i++ )
|
||||
for (int i = 0; i < needle.Length; i++)
|
||||
{
|
||||
if( haystack[ position + i ] != needle[ i ] )
|
||||
if (haystack[position + i] != needle[i])
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@@ -117,11 +117,12 @@ namespace SharpCompress.Common.Zip
|
||||
// Search in reverse
|
||||
Array.Reverse(seek);
|
||||
|
||||
var max_search_area = len - MINIMUM_EOCD_LENGTH;
|
||||
// don't exclude the minimum eocd region, otherwise you fail to locate the header in empty zip files
|
||||
var max_search_area = len; // - MINIMUM_EOCD_LENGTH;
|
||||
|
||||
for( int pos_from_end = 0; pos_from_end < max_search_area; ++pos_from_end)
|
||||
for (int pos_from_end = 0; pos_from_end < max_search_area; ++pos_from_end)
|
||||
{
|
||||
if( IsMatch(seek, pos_from_end, needle) )
|
||||
if (IsMatch(seek, pos_from_end, needle))
|
||||
{
|
||||
stream.Seek(-pos_from_end, SeekOrigin.End);
|
||||
return;
|
||||
|
||||
@@ -83,7 +83,7 @@ namespace SharpCompress.Compressors.BZip2
|
||||
stream.SetLength(value);
|
||||
}
|
||||
|
||||
#if !NET461 && !NETSTANDARD2_0
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
|
||||
public override int Read(Span<byte> buffer)
|
||||
{
|
||||
@@ -123,4 +123,4 @@ namespace SharpCompress.Compressors.BZip2
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -502,13 +502,37 @@ namespace SharpCompress.Compressors.Deflate
|
||||
throw new ZlibException("Cannot Read after Writing.");
|
||||
}
|
||||
|
||||
int rc = 0;
|
||||
|
||||
// set up the output of the deflate/inflate codec:
|
||||
_z.OutputBuffer = buffer;
|
||||
_z.NextOut = offset;
|
||||
_z.AvailableBytesOut = count;
|
||||
|
||||
if (count == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if (nomoreinput && _wantCompress)
|
||||
{
|
||||
return 0; // workitem 8557
|
||||
// no more input data available; therefore we flush to
|
||||
// try to complete the read
|
||||
rc = _z.Deflate(FlushType.Finish);
|
||||
|
||||
if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
|
||||
{
|
||||
throw new ZlibException(String.Format("Deflating: rc={0} msg={1}", rc, _z.Message));
|
||||
}
|
||||
|
||||
rc = (count - _z.AvailableBytesOut);
|
||||
|
||||
// calculate CRC after reading
|
||||
if (crc != null)
|
||||
{
|
||||
crc.SlurpBlock(buffer, offset, rc);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
if (buffer is null)
|
||||
{
|
||||
@@ -527,13 +551,6 @@ namespace SharpCompress.Compressors.Deflate
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
|
||||
int rc = 0;
|
||||
|
||||
// set up the output of the deflate/inflate codec:
|
||||
_z.OutputBuffer = buffer;
|
||||
_z.NextOut = offset;
|
||||
_z.AvailableBytesOut = count;
|
||||
|
||||
// This is necessary in case _workingBuffer has been resized. (new byte[])
|
||||
// (The first reference to _workingBuffer goes through the private accessor which
|
||||
// may initialize it.)
|
||||
|
||||
@@ -118,7 +118,7 @@ namespace SharpCompress.Compressors.LZMA
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
|
||||
#if !NET461 && !NETSTANDARD2_0
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
|
||||
public override int Read(Span<byte> buffer)
|
||||
{
|
||||
|
||||
201
src/SharpCompress/Compressors/Zstd/CompressionStream.cs
Normal file
201
src/SharpCompress/Compressors/Zstd/CompressionStream.cs
Normal file
@@ -0,0 +1,201 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using ZstdSharp.Unsafe;
|
||||
|
||||
namespace ZstdSharp
|
||||
{
|
||||
public class CompressionStream : Stream
|
||||
{
|
||||
private readonly Stream innerStream;
|
||||
private readonly byte[] outputBuffer;
|
||||
private Compressor compressor;
|
||||
private ZSTD_outBuffer_s output;
|
||||
|
||||
public CompressionStream(Stream stream, int level = Compressor.DefaultCompressionLevel,
|
||||
int bufferSize = 0)
|
||||
{
|
||||
if (stream == null)
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
|
||||
if (!stream.CanWrite)
|
||||
throw new ArgumentException("Stream is not writable", nameof(stream));
|
||||
|
||||
if (bufferSize < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(bufferSize));
|
||||
|
||||
innerStream = stream;
|
||||
compressor = new Compressor(level);
|
||||
|
||||
var outputBufferSize =
|
||||
bufferSize > 0 ? bufferSize : (int) Methods.ZSTD_CStreamOutSize().EnsureZstdSuccess();
|
||||
outputBuffer = ArrayPool<byte>.Shared.Rent(outputBufferSize);
|
||||
output = new ZSTD_outBuffer_s {pos = 0, size = (nuint) outputBufferSize};
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_cParameter parameter, int value)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
compressor.SetParameter(parameter, value);
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_cParameter parameter)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
return compressor.GetParameter(parameter);
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
compressor.LoadDictionary(dict);
|
||||
}
|
||||
|
||||
~CompressionStream() => Dispose(false);
|
||||
|
||||
#if !NETSTANDARD2_0 && !NET461
|
||||
public override async ValueTask DisposeAsync()
|
||||
#else
|
||||
public async ValueTask DisposeAsync()
|
||||
#endif
|
||||
{
|
||||
if (compressor == null)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
await FlushAsync().ConfigureAwait(false);
|
||||
}
|
||||
finally
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (compressor == null)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
if (disposing)
|
||||
Flush();
|
||||
}
|
||||
finally
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
}
|
||||
}
|
||||
|
||||
private void ReleaseUnmanagedResources()
|
||||
{
|
||||
compressor.Dispose();
|
||||
ArrayPool<byte>.Shared.Return(outputBuffer);
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
=> WriteInternal(null, true);
|
||||
|
||||
public override async Task FlushAsync(CancellationToken cancellationToken)
|
||||
=> await WriteInternalAsync(null, true, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
=> Write(new ReadOnlySpan<byte>(buffer, offset, count));
|
||||
|
||||
#if !NETSTANDARD2_0 && !NET461
|
||||
public override void Write(ReadOnlySpan<byte> buffer)
|
||||
=> WriteInternal(buffer, false);
|
||||
#else
|
||||
public void Write(ReadOnlySpan<byte> buffer)
|
||||
=> WriteInternal(buffer, false);
|
||||
#endif
|
||||
|
||||
private void WriteInternal(ReadOnlySpan<byte> buffer, bool lastChunk)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
var input = new ZSTD_inBuffer_s {pos = 0, size = buffer != null ? (nuint) buffer.Length : 0};
|
||||
nuint remaining;
|
||||
do
|
||||
{
|
||||
output.pos = 0;
|
||||
remaining = CompressStream(ref input, buffer,
|
||||
lastChunk ? ZSTD_EndDirective.ZSTD_e_end : ZSTD_EndDirective.ZSTD_e_continue);
|
||||
|
||||
var written = (int) output.pos;
|
||||
if (written > 0)
|
||||
innerStream.Write(outputBuffer, 0, written);
|
||||
} while (lastChunk ? remaining > 0 : input.pos < input.size);
|
||||
}
|
||||
|
||||
private async ValueTask WriteInternalAsync(ReadOnlyMemory<byte>? buffer, bool lastChunk,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
var input = new ZSTD_inBuffer_s { pos = 0, size = buffer.HasValue ? (nuint)buffer.Value.Length : 0 };
|
||||
nuint remaining;
|
||||
do
|
||||
{
|
||||
output.pos = 0;
|
||||
remaining = CompressStream(ref input, buffer.HasValue ? buffer.Value.Span : null,
|
||||
lastChunk ? ZSTD_EndDirective.ZSTD_e_end : ZSTD_EndDirective.ZSTD_e_continue);
|
||||
|
||||
var written = (int) output.pos;
|
||||
if (written > 0)
|
||||
await innerStream.WriteAsync(outputBuffer, 0, written, cancellationToken).ConfigureAwait(false);
|
||||
} while (lastChunk ? remaining > 0 : input.pos < input.size);
|
||||
}
|
||||
|
||||
public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
|
||||
=> WriteAsync(new ReadOnlyMemory<byte>(buffer, offset, count), cancellationToken).AsTask();
|
||||
|
||||
#if !NETSTANDARD2_0 && !NET461
|
||||
public override async ValueTask WriteAsync(ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default)
|
||||
=> await WriteInternalAsync(buffer, false, cancellationToken).ConfigureAwait(false);
|
||||
#else
|
||||
public async ValueTask WriteAsync(ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default)
|
||||
=> await WriteInternalAsync(buffer, false, cancellationToken).ConfigureAwait(false);
|
||||
#endif
|
||||
|
||||
internal unsafe nuint CompressStream(ref ZSTD_inBuffer_s input, ReadOnlySpan<byte> inputBuffer,
|
||||
ZSTD_EndDirective directive)
|
||||
{
|
||||
fixed (byte* inputBufferPtr = inputBuffer)
|
||||
fixed (byte* outputBufferPtr = outputBuffer)
|
||||
{
|
||||
input.src = inputBufferPtr;
|
||||
output.dst = outputBufferPtr;
|
||||
return compressor.CompressStream(ref input, ref output, directive).EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanRead => false;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => true;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
public override int Read(byte[] buffer, int offset, int count) => throw new NotSupportedException();
|
||||
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (compressor == null)
|
||||
throw new ObjectDisposedException(nameof(CompressionStream));
|
||||
}
|
||||
}
|
||||
}
|
||||
164
src/SharpCompress/Compressors/Zstd/Compressor.cs
Normal file
164
src/SharpCompress/Compressors/Zstd/Compressor.cs
Normal file
@@ -0,0 +1,164 @@
|
||||
using System;
|
||||
using ZstdSharp.Unsafe;
|
||||
|
||||
namespace ZstdSharp
|
||||
{
|
||||
public unsafe class Compressor : IDisposable
|
||||
{
|
||||
public static int MinCompressionLevel => Methods.ZSTD_minCLevel();
|
||||
public static int MaxCompressionLevel => Methods.ZSTD_maxCLevel();
|
||||
public const int DefaultCompressionLevel = 0;
|
||||
|
||||
private int level = DefaultCompressionLevel;
|
||||
|
||||
private ZSTD_CCtx_s* cctx;
|
||||
|
||||
public int Level
|
||||
{
|
||||
get => level;
|
||||
set
|
||||
{
|
||||
if (level != value)
|
||||
{
|
||||
level = value;
|
||||
SetParameter(ZSTD_cParameter.ZSTD_c_compressionLevel, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_cParameter parameter, int value)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
Methods.ZSTD_CCtx_setParameter(cctx, parameter, value).EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_cParameter parameter)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
int value;
|
||||
Methods.ZSTD_CCtx_getParameter(cctx, parameter, &value).EnsureZstdSuccess();
|
||||
return value;
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
if (dict == null)
|
||||
{
|
||||
Methods.ZSTD_CCtx_loadDictionary(cctx, null, 0).EnsureZstdSuccess();
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
fixed (byte* dictPtr = dict)
|
||||
Methods.ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint) dict.Length).EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public Compressor(int level = DefaultCompressionLevel)
|
||||
{
|
||||
cctx = Methods.ZSTD_createCCtx();
|
||||
if (cctx == null)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create cctx");
|
||||
|
||||
Level = level;
|
||||
}
|
||||
|
||||
~Compressor()
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
}
|
||||
|
||||
public static int GetCompressBound(int length)
|
||||
=> (int) Methods.ZSTD_compressBound((nuint) length);
|
||||
|
||||
public static ulong GetCompressBoundLong(ulong length)
|
||||
=> Methods.ZSTD_compressBound((nuint) length);
|
||||
|
||||
public Span<byte> Wrap(ReadOnlySpan<byte> src)
|
||||
{
|
||||
var dest = new byte[GetCompressBound(src.Length)];
|
||||
var length = Wrap(src, dest);
|
||||
return new Span<byte>(dest, 0, length);
|
||||
}
|
||||
|
||||
public int Wrap(byte[] src, byte[] dest, int offset)
|
||||
=> Wrap(src, new Span<byte>(dest, offset, dest.Length - offset));
|
||||
|
||||
public int Wrap(ReadOnlySpan<byte> src, Span<byte> dest)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
return (int) Methods
|
||||
.ZSTD_compress2(cctx, destPtr, (nuint) dest.Length, srcPtr, (nuint) src.Length)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public int Wrap(ArraySegment<byte> src, ArraySegment<byte> dest)
|
||||
=> Wrap((ReadOnlySpan<byte>) src, dest);
|
||||
|
||||
public int Wrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength)
|
||||
=> Wrap(new ReadOnlySpan<byte>(src, srcOffset, srcLength), new Span<byte>(dst, dstOffset, dstLength));
|
||||
|
||||
public bool TryWrap(byte[] src, byte[] dest, int offset, out int written)
|
||||
=> TryWrap(src, new Span<byte>(dest, offset, dest.Length - offset), out written);
|
||||
|
||||
public bool TryWrap(ReadOnlySpan<byte> src, Span<byte> dest, out int written)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
var returnValue =
|
||||
Methods.ZSTD_compress2(cctx, destPtr, (nuint) dest.Length, srcPtr, (nuint) src.Length);
|
||||
|
||||
if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))
|
||||
{
|
||||
written = default;
|
||||
return false;
|
||||
}
|
||||
|
||||
returnValue.EnsureZstdSuccess();
|
||||
written = (int) returnValue;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public bool TryWrap(ArraySegment<byte> src, ArraySegment<byte> dest, out int written)
|
||||
=> TryWrap((ReadOnlySpan<byte>)src, dest, out written);
|
||||
|
||||
public bool TryWrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength, out int written)
|
||||
=> TryWrap(new ReadOnlySpan<byte>(src, srcOffset, srcLength), new Span<byte>(dst, dstOffset, dstLength), out written);
|
||||
|
||||
private void ReleaseUnmanagedResources()
|
||||
{
|
||||
if (cctx != null)
|
||||
{
|
||||
Methods.ZSTD_freeCCtx(cctx);
|
||||
cctx = null;
|
||||
}
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (cctx == null)
|
||||
throw new ObjectDisposedException(nameof(Compressor));
|
||||
}
|
||||
|
||||
internal nuint CompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s output, ZSTD_EndDirective directive)
|
||||
{
|
||||
fixed (ZSTD_inBuffer_s* inputPtr = &input)
|
||||
fixed (ZSTD_outBuffer_s* outputPtr = &output)
|
||||
{
|
||||
return Methods.ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive).EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
9
src/SharpCompress/Compressors/Zstd/Constants.cs
Normal file
9
src/SharpCompress/Compressors/Zstd/Constants.cs
Normal file
@@ -0,0 +1,9 @@
|
||||
namespace ZstdSharp
|
||||
{
|
||||
internal class Constants
|
||||
{
|
||||
//NOTE: https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element#remarks
|
||||
//NOTE: https://github.com/dotnet/runtime/blob/v5.0.0-rtm.20519.4/src/libraries/System.Private.CoreLib/src/System/Array.cs#L27
|
||||
public const ulong MaxByteArrayLength = 0x7FFFFFC7;
|
||||
}
|
||||
}
|
||||
190
src/SharpCompress/Compressors/Zstd/DecompressionStream.cs
Normal file
190
src/SharpCompress/Compressors/Zstd/DecompressionStream.cs
Normal file
@@ -0,0 +1,190 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using ZstdSharp.Unsafe;
|
||||
|
||||
namespace ZstdSharp
|
||||
{
|
||||
public class DecompressionStream : Stream
|
||||
{
|
||||
private readonly Stream innerStream;
|
||||
private readonly byte[] inputBuffer;
|
||||
private readonly int inputBufferSize;
|
||||
private Decompressor decompressor;
|
||||
private ZSTD_inBuffer_s input;
|
||||
private nuint lastDecompressResult = 0;
|
||||
|
||||
public DecompressionStream(Stream stream, int bufferSize = 0)
|
||||
{
|
||||
if (stream == null)
|
||||
throw new ArgumentNullException(nameof(stream));
|
||||
|
||||
if (!stream.CanRead)
|
||||
throw new ArgumentException("Stream is not readable", nameof(stream));
|
||||
|
||||
if (bufferSize < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(bufferSize));
|
||||
|
||||
innerStream = stream;
|
||||
decompressor = new Decompressor();
|
||||
|
||||
inputBufferSize = bufferSize > 0 ? bufferSize : (int) Methods.ZSTD_CStreamInSize().EnsureZstdSuccess();
|
||||
inputBuffer = ArrayPool<byte>.Shared.Rent(inputBufferSize);
|
||||
input = new ZSTD_inBuffer_s {pos = (nuint) inputBufferSize, size = (nuint) inputBufferSize};
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_dParameter parameter, int value)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
decompressor.SetParameter(parameter, value);
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_dParameter parameter)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
return decompressor.GetParameter(parameter);
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
decompressor.LoadDictionary(dict);
|
||||
}
|
||||
|
||||
~DecompressionStream() => Dispose(false);
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (decompressor == null)
|
||||
return;
|
||||
|
||||
decompressor.Dispose();
|
||||
ArrayPool<byte>.Shared.Return(inputBuffer);
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
=> Read(new Span<byte>(buffer, offset, count));
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETFRAMEWORK
|
||||
public override int Read(Span<byte> buffer)
|
||||
#else
|
||||
public int Read(Span<byte> buffer)
|
||||
#endif
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
var output = new ZSTD_outBuffer_s {pos = 0, size = (nuint) buffer.Length};
|
||||
while (output.pos < output.size)
|
||||
{
|
||||
if (input.pos >= input.size)
|
||||
{
|
||||
int bytesRead;
|
||||
if ((bytesRead = innerStream.Read(inputBuffer, 0, inputBufferSize)) == 0)
|
||||
{
|
||||
if (lastDecompressResult != 0)
|
||||
throw new EndOfStreamException("Premature end of stream");
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
input.size = (nuint) bytesRead;
|
||||
input.pos = 0;
|
||||
}
|
||||
|
||||
lastDecompressResult = DecompressStream(ref output, buffer);
|
||||
}
|
||||
|
||||
return (int) output.pos;
|
||||
}
|
||||
|
||||
public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
|
||||
=> ReadAsync(new Memory<byte>(buffer, offset, count), cancellationToken).AsTask();
|
||||
|
||||
#if !NETSTANDARD2_0 && !NET461
|
||||
public override async ValueTask<int> ReadAsync(Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default)
|
||||
#else
|
||||
public async ValueTask<int> ReadAsync(Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default)
|
||||
#endif
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
|
||||
var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length};
|
||||
while (output.pos < output.size)
|
||||
{
|
||||
if (input.pos >= input.size)
|
||||
{
|
||||
int bytesRead;
|
||||
if ((bytesRead = await innerStream.ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken)
|
||||
.ConfigureAwait(false)) == 0)
|
||||
{
|
||||
if (lastDecompressResult != 0)
|
||||
throw new EndOfStreamException("Premature end of stream");
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
input.size = (nuint) bytesRead;
|
||||
input.pos = 0;
|
||||
}
|
||||
|
||||
lastDecompressResult = DecompressStream(ref output, buffer.Span);
|
||||
}
|
||||
|
||||
return (int) output.pos;
|
||||
}
|
||||
|
||||
private unsafe nuint DecompressStream(ref ZSTD_outBuffer_s output, Span<byte> outputBuffer)
|
||||
{
|
||||
fixed (byte* inputBufferPtr = inputBuffer)
|
||||
fixed (byte* outputBufferPtr = outputBuffer)
|
||||
{
|
||||
input.src = inputBufferPtr;
|
||||
output.dst = outputBufferPtr;
|
||||
return decompressor.DecompressStream(ref input, ref output);
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotSupportedException();
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException();
|
||||
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (decompressor == null)
|
||||
throw new ObjectDisposedException(nameof(DecompressionStream));
|
||||
}
|
||||
|
||||
#if NETSTANDARD2_0 || NET461
|
||||
public virtual ValueTask DisposeAsync()
|
||||
{
|
||||
try
|
||||
{
|
||||
Dispose();
|
||||
return default;
|
||||
}
|
||||
catch (Exception exc)
|
||||
{
|
||||
return new ValueTask(Task.FromException(exc));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
149
src/SharpCompress/Compressors/Zstd/Decompressor.cs
Normal file
149
src/SharpCompress/Compressors/Zstd/Decompressor.cs
Normal file
@@ -0,0 +1,149 @@
|
||||
using System;
|
||||
using ZstdSharp.Unsafe;
|
||||
|
||||
namespace ZstdSharp
|
||||
{
|
||||
public unsafe class Decompressor : IDisposable
|
||||
{
|
||||
private ZSTD_DCtx_s* dctx;
|
||||
|
||||
public Decompressor()
|
||||
{
|
||||
dctx = Methods.ZSTD_createDCtx();
|
||||
if (dctx == null)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create dctx");
|
||||
}
|
||||
|
||||
~Decompressor()
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
}
|
||||
|
||||
public void SetParameter(ZSTD_dParameter parameter, int value)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
Methods.ZSTD_DCtx_setParameter(dctx, parameter, value).EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public int GetParameter(ZSTD_dParameter parameter)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
int value;
|
||||
Methods.ZSTD_DCtx_getParameter(dctx, parameter, &value).EnsureZstdSuccess();
|
||||
return value;
|
||||
}
|
||||
|
||||
public void LoadDictionary(byte[] dict)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
if (dict == null)
|
||||
{
|
||||
Methods.ZSTD_DCtx_loadDictionary(dctx, null, 0).EnsureZstdSuccess();
|
||||
}
|
||||
else
|
||||
{
|
||||
fixed (byte* dictPtr = dict)
|
||||
Methods.ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint) dict.Length).EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
public static ulong GetDecompressedSize(ReadOnlySpan<byte> src)
|
||||
{
|
||||
fixed (byte* srcPtr = src)
|
||||
return Methods.ZSTD_decompressBound(srcPtr, (nuint) src.Length).EnsureContentSizeOk();
|
||||
}
|
||||
|
||||
public static ulong GetDecompressedSize(ArraySegment<byte> src)
|
||||
=> GetDecompressedSize((ReadOnlySpan<byte>) src);
|
||||
|
||||
public static ulong GetDecompressedSize(byte[] src, int srcOffset, int srcLength)
|
||||
=> GetDecompressedSize(new ReadOnlySpan<byte>(src, srcOffset, srcLength));
|
||||
|
||||
public Span<byte> Unwrap(ReadOnlySpan<byte> src, int maxDecompressedSize = int.MaxValue)
|
||||
{
|
||||
var expectedDstSize = GetDecompressedSize(src);
|
||||
if (expectedDstSize > (ulong) maxDecompressedSize)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall,
|
||||
$"Decompressed content size {expectedDstSize} is greater than {nameof(maxDecompressedSize)} {maxDecompressedSize}");
|
||||
if (expectedDstSize > Constants.MaxByteArrayLength)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall,
|
||||
$"Decompressed content size {expectedDstSize} is greater than max possible byte array size {Constants.MaxByteArrayLength}");
|
||||
|
||||
var dest = new byte[expectedDstSize];
|
||||
var length = Unwrap(src, dest);
|
||||
return new Span<byte>(dest, 0, length);
|
||||
}
|
||||
|
||||
public int Unwrap(byte[] src, byte[] dest, int offset)
|
||||
=> Unwrap(src, new Span<byte>(dest, offset, dest.Length - offset));
|
||||
|
||||
public int Unwrap(ReadOnlySpan<byte> src, Span<byte> dest)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
return (int) Methods
|
||||
.ZSTD_decompressDCtx(dctx, destPtr, (nuint) dest.Length, srcPtr, (nuint) src.Length)
|
||||
.EnsureZstdSuccess();
|
||||
}
|
||||
|
||||
public int Unwrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength)
|
||||
=> Unwrap(new ReadOnlySpan<byte>(src, srcOffset, srcLength), new Span<byte>(dst, dstOffset, dstLength));
|
||||
|
||||
public bool TryUnwrap(byte[] src, byte[] dest, int offset, out int written)
|
||||
=> TryUnwrap(src, new Span<byte>(dest, offset, dest.Length - offset), out written);
|
||||
|
||||
public bool TryUnwrap(ReadOnlySpan<byte> src, Span<byte> dest, out int written)
|
||||
{
|
||||
EnsureNotDisposed();
|
||||
fixed (byte* srcPtr = src)
|
||||
fixed (byte* destPtr = dest)
|
||||
{
|
||||
var returnValue =
|
||||
Methods.ZSTD_decompressDCtx(dctx, destPtr, (nuint) dest.Length, srcPtr, (nuint) src.Length);
|
||||
|
||||
if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))
|
||||
{
|
||||
written = default;
|
||||
return false;
|
||||
}
|
||||
|
||||
returnValue.EnsureZstdSuccess();
|
||||
written = (int) returnValue;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public bool TryUnwrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength, out int written)
|
||||
=> TryUnwrap(new ReadOnlySpan<byte>(src, srcOffset, srcLength), new Span<byte>(dst, dstOffset, dstLength), out written);
|
||||
|
||||
private void ReleaseUnmanagedResources()
|
||||
{
|
||||
if (dctx != null)
|
||||
{
|
||||
Methods.ZSTD_freeDCtx(dctx);
|
||||
dctx = null;
|
||||
}
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
ReleaseUnmanagedResources();
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
private void EnsureNotDisposed()
|
||||
{
|
||||
if (dctx == null)
|
||||
throw new ObjectDisposedException(nameof(Decompressor));
|
||||
}
|
||||
|
||||
internal nuint DecompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s output)
|
||||
{
|
||||
fixed (ZSTD_inBuffer_s* inputPtr = &input)
|
||||
fixed (ZSTD_outBuffer_s* outputPtr = &output)
|
||||
{
|
||||
return Methods.ZSTD_decompressStream(dctx, outputPtr, inputPtr).EnsureZstdSuccess();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
39
src/SharpCompress/Compressors/Zstd/DictBuilder.cs
Normal file
39
src/SharpCompress/Compressors/Zstd/DictBuilder.cs
Normal file
@@ -0,0 +1,39 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using ZstdSharp.Unsafe;
|
||||
|
||||
namespace ZstdSharp
|
||||
{
|
||||
public static unsafe class DictBuilder
|
||||
{
|
||||
public static byte[] TrainFromBuffer(IEnumerable<byte[]> samples, int dictCapacity = DefaultDictCapacity)
|
||||
{
|
||||
var ms = new MemoryStream();
|
||||
var samplesSizes = samples.Select(sample =>
|
||||
{
|
||||
ms.Write(sample, 0, sample.Length);
|
||||
return (nuint) sample.Length;
|
||||
}).ToArray();
|
||||
|
||||
var dictBuffer = new byte[dictCapacity];
|
||||
fixed (byte* dictBufferPtr = dictBuffer)
|
||||
fixed (byte* samplesBufferPtr = ms.GetBuffer())
|
||||
fixed (nuint* samplesSizesPtr = samplesSizes)
|
||||
{
|
||||
var dictSize = (int) Methods
|
||||
.ZDICT_trainFromBuffer(dictBufferPtr, (nuint) dictCapacity, samplesBufferPtr, samplesSizesPtr,
|
||||
(uint) samplesSizes.Length)
|
||||
.EnsureZdictSuccess();
|
||||
|
||||
if (dictCapacity != dictSize)
|
||||
Array.Resize(ref dictBuffer, dictSize);
|
||||
|
||||
return dictBuffer;
|
||||
}
|
||||
}
|
||||
|
||||
public const int DefaultDictCapacity = 112640; // Used by zstd utility by default
|
||||
}
|
||||
}
|
||||
201
src/SharpCompress/Compressors/Zstd/Polyfills/BitOperations.cs
Normal file
201
src/SharpCompress/Compressors/Zstd/Polyfills/BitOperations.cs
Normal file
@@ -0,0 +1,201 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
|
||||
#if !NETCOREAPP3_0_OR_GREATER
|
||||
|
||||
using System.Runtime.CompilerServices;
|
||||
using static ZstdSharp.UnsafeHelper;
|
||||
|
||||
// Some routines inspired by the Stanford Bit Twiddling Hacks by Sean Eron Anderson:
|
||||
// http://graphics.stanford.edu/~seander/bithacks.html
|
||||
|
||||
namespace System.Numerics
|
||||
{
|
||||
/// <summary>
|
||||
/// Utility methods for intrinsic bit-twiddling operations.
|
||||
/// The methods use hardware intrinsics when available on the underlying platform,
|
||||
/// otherwise they use optimized software fallbacks.
|
||||
/// </summary>
|
||||
public static unsafe class BitOperations
|
||||
{
|
||||
// hack: should be public because of inline
|
||||
public static readonly byte* TrailingZeroCountDeBruijn = GetArrayPointer(new byte[]
|
||||
{
|
||||
00, 01, 28, 02, 29, 14, 24, 03,
|
||||
30, 22, 20, 15, 25, 17, 04, 08,
|
||||
31, 27, 13, 23, 21, 19, 16, 07,
|
||||
26, 12, 18, 06, 11, 05, 10, 09
|
||||
});
|
||||
|
||||
// hack: should be public because of inline
|
||||
public static readonly byte* Log2DeBruijn = GetArrayPointer(new byte[]
|
||||
{
|
||||
00, 09, 01, 10, 13, 21, 02, 29,
|
||||
11, 14, 16, 18, 22, 25, 03, 30,
|
||||
08, 12, 20, 28, 15, 17, 24, 07,
|
||||
19, 27, 23, 06, 26, 05, 04, 31
|
||||
});
|
||||
|
||||
/// <summary>
|
||||
/// Returns the integer (floor) log of the specified value, base 2.
|
||||
/// Note that by convention, input value 0 returns 0 since log(0) is undefined.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int Log2(uint value)
|
||||
{
|
||||
// The 0->0 contract is fulfilled by setting the LSB to 1.
|
||||
// Log(1) is 0, and setting the LSB for values > 1 does not change the log2 result.
|
||||
value |= 1;
|
||||
|
||||
// value lzcnt actual expected
|
||||
// ..0001 31 31-31 0
|
||||
// ..0010 30 31-30 1
|
||||
// 0010.. 2 31-2 29
|
||||
// 0100.. 1 31-1 30
|
||||
// 1000.. 0 31-0 31
|
||||
|
||||
// Fallback contract is 0->0
|
||||
// No AggressiveInlining due to large method size
|
||||
// Has conventional contract 0->0 (Log(0) is undefined)
|
||||
|
||||
// Fill trailing zeros with ones, eg 00010010 becomes 00011111
|
||||
value |= value >> 01;
|
||||
value |= value >> 02;
|
||||
value |= value >> 04;
|
||||
value |= value >> 08;
|
||||
value |= value >> 16;
|
||||
|
||||
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
|
||||
return Log2DeBruijn[
|
||||
// Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_1100_0100_1010_1100_1101_1101u
|
||||
(int)((value * 0x07C4ACDDu) >> 27)];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns the integer (floor) log of the specified value, base 2.
|
||||
/// Note that by convention, input value 0 returns 0 since log(0) is undefined.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int Log2(ulong value)
|
||||
{
|
||||
value |= 1;
|
||||
|
||||
uint hi = (uint)(value >> 32);
|
||||
|
||||
if (hi == 0)
|
||||
{
|
||||
return Log2((uint)value);
|
||||
}
|
||||
|
||||
return 32 + Log2(hi);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in an integer value.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(int value)
|
||||
=> TrailingZeroCount((uint)value);
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in an integer value.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(uint value)
|
||||
{
|
||||
// Unguarded fallback contract is 0->0, BSF contract is 0->undefined
|
||||
if (value == 0)
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
|
||||
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
|
||||
return TrailingZeroCountDeBruijn[
|
||||
// Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_0111_1100_1011_0101_0011_0001u
|
||||
(int)(((value & (uint)-(int)value) * 0x077CB531u) >> 27)]; // Multi-cast mitigates redundant conv.u8
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(long value)
|
||||
=> TrailingZeroCount((ulong)value);
|
||||
|
||||
/// <summary>
|
||||
/// Count the number of trailing zero bits in a mask.
|
||||
/// Similar in behavior to the x86 instruction TZCNT.
|
||||
/// </summary>
|
||||
/// <param name="value">The value.</param>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static int TrailingZeroCount(ulong value)
|
||||
{
|
||||
uint lo = (uint)value;
|
||||
|
||||
if (lo == 0)
|
||||
{
|
||||
return 32 + TrailingZeroCount((uint)(value >> 32));
|
||||
}
|
||||
|
||||
return TrailingZeroCount(lo);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value left by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROL.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..31] is treated as congruent mod 32.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static uint RotateLeft(uint value, int offset)
|
||||
=> (value << offset) | (value >> (32 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value left by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROL.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..63] is treated as congruent mod 64.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ulong RotateLeft(ulong value, int offset)
|
||||
=> (value << offset) | (value >> (64 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value right by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROR.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..31] is treated as congruent mod 32.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static uint RotateRight(uint value, int offset)
|
||||
=> (value >> offset) | (value << (32 - offset));
|
||||
|
||||
/// <summary>
|
||||
/// Rotates the specified value right by the specified number of bits.
|
||||
/// Similar in behavior to the x86 instruction ROR.
|
||||
/// </summary>
|
||||
/// <param name="value">The value to rotate.</param>
|
||||
/// <param name="offset">The number of bits to rotate by.
|
||||
/// Any value outside the range [0..63] is treated as congruent mod 64.</param>
|
||||
/// <returns>The rotated value.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ulong RotateRight(ulong value, int offset)
|
||||
=> (value >> offset) | (value << (64 - offset));
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,25 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
|
||||
#if !NET5_0_OR_GREATER
|
||||
namespace System.Runtime.CompilerServices
|
||||
{
|
||||
/// <summary>
|
||||
/// Used to indicate to the compiler that the <c>.locals init</c> flag should not be set in method headers.
|
||||
/// </summary>
|
||||
/// <remarks>Internal copy of the .NET 5 attribute.</remarks>
|
||||
[AttributeUsage(
|
||||
AttributeTargets.Module |
|
||||
AttributeTargets.Class |
|
||||
AttributeTargets.Struct |
|
||||
AttributeTargets.Interface |
|
||||
AttributeTargets.Constructor |
|
||||
AttributeTargets.Method |
|
||||
AttributeTargets.Property |
|
||||
AttributeTargets.Event,
|
||||
Inherited = false)]
|
||||
internal sealed class SkipLocalsInitAttribute : Attribute
|
||||
{
|
||||
}
|
||||
}
|
||||
#endif
|
||||
54
src/SharpCompress/Compressors/Zstd/Polyfills/Vector128.cs
Normal file
54
src/SharpCompress/Compressors/Zstd/Polyfills/Vector128.cs
Normal file
@@ -0,0 +1,54 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
|
||||
#if !NETCOREAPP3_0_OR_GREATER
|
||||
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace System.Runtime.Intrinsics
|
||||
{
|
||||
public static class Vector128
|
||||
{
|
||||
internal const int Size = 16;
|
||||
|
||||
public static unsafe Vector128<byte> Create(byte value)
|
||||
{
|
||||
byte* pResult = stackalloc byte[16]
|
||||
{
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
value,
|
||||
};
|
||||
|
||||
return Unsafe.AsRef<Vector128<byte>>(pResult);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static Vector128<U> As<T, U>(this Vector128<T> vector)
|
||||
where T : struct
|
||||
where U : struct =>
|
||||
Unsafe.As<Vector128<T>, Vector128<U>>(ref vector);
|
||||
|
||||
public static T GetElement<T>(this Vector128<T> vector, int index)
|
||||
where T : struct
|
||||
{
|
||||
ref T e0 = ref Unsafe.As<Vector128<T>, T>(ref vector);
|
||||
return Unsafe.Add(ref e0, index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
36
src/SharpCompress/Compressors/Zstd/Polyfills/Vector128_1.cs
Normal file
36
src/SharpCompress/Compressors/Zstd/Polyfills/Vector128_1.cs
Normal file
@@ -0,0 +1,36 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
|
||||
#if !NETCOREAPP3_0_OR_GREATER
|
||||
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace System.Runtime.Intrinsics
|
||||
{
|
||||
[StructLayout(LayoutKind.Sequential, Size = Vector128.Size)]
|
||||
public readonly struct Vector128<T> : IEquatable<Vector128<T>>
|
||||
where T : struct
|
||||
{
|
||||
private readonly ulong _00;
|
||||
private readonly ulong _01;
|
||||
|
||||
public static int Count => Vector128.Size / Unsafe.SizeOf<T>();
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public bool Equals(Vector128<T> other)
|
||||
{
|
||||
for (int i = 0; i < Count; i++)
|
||||
{
|
||||
if (!((IEquatable<T>)(this.GetElement(i))).Equals(other.GetElement(i)))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
43
src/SharpCompress/Compressors/Zstd/ThrowHelper.cs
Normal file
43
src/SharpCompress/Compressors/Zstd/ThrowHelper.cs
Normal file
@@ -0,0 +1,43 @@
|
||||
using ZstdSharp.Unsafe;
|
||||
|
||||
namespace ZstdSharp
|
||||
{
|
||||
public static unsafe class ThrowHelper
|
||||
{
|
||||
private const ulong ZSTD_CONTENTSIZE_UNKNOWN = unchecked(0UL - 1);
|
||||
private const ulong ZSTD_CONTENTSIZE_ERROR = unchecked(0UL - 2);
|
||||
|
||||
public static nuint EnsureZstdSuccess(this nuint returnValue)
|
||||
{
|
||||
if (Methods.ZSTD_isError(returnValue) != 0)
|
||||
ThrowException(returnValue, Methods.ZSTD_getErrorName(returnValue));
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
public static nuint EnsureZdictSuccess(this nuint returnValue)
|
||||
{
|
||||
if (Methods.ZDICT_isError(returnValue) != 0)
|
||||
ThrowException(returnValue, Methods.ZDICT_getErrorName(returnValue));
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
public static ulong EnsureContentSizeOk(this ulong returnValue)
|
||||
{
|
||||
if (returnValue == ZSTD_CONTENTSIZE_UNKNOWN)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Decompressed content size is not specified");
|
||||
|
||||
if (returnValue == ZSTD_CONTENTSIZE_ERROR)
|
||||
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Decompressed content size cannot be determined (e.g. invalid magic number, srcSize too small)");
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
private static void ThrowException(nuint returnValue, string message)
|
||||
{
|
||||
var code = 0 - returnValue;
|
||||
throw new ZstdException((ZSTD_ErrorCode) code, message);
|
||||
}
|
||||
}
|
||||
}
|
||||
368
src/SharpCompress/Compressors/Zstd/Unsafe/Arrays.cs
Normal file
368
src/SharpCompress/Compressors/Zstd/Unsafe/Arrays.cs
Normal file
@@ -0,0 +1,368 @@
|
||||
using System;
|
||||
using static ZstdSharp.UnsafeHelper;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
static uint* rtbTable = GetArrayPointer(new uint[8] {
|
||||
0,
|
||||
473195,
|
||||
504333,
|
||||
520860,
|
||||
550000,
|
||||
700000,
|
||||
750000,
|
||||
830000,
|
||||
});
|
||||
static byte* LL_Code = GetArrayPointer(new byte[64]
|
||||
{
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
16,
|
||||
17,
|
||||
17,
|
||||
18,
|
||||
18,
|
||||
19,
|
||||
19,
|
||||
20,
|
||||
20,
|
||||
20,
|
||||
20,
|
||||
21,
|
||||
21,
|
||||
21,
|
||||
21,
|
||||
22,
|
||||
22,
|
||||
22,
|
||||
22,
|
||||
22,
|
||||
22,
|
||||
22,
|
||||
22,
|
||||
23,
|
||||
23,
|
||||
23,
|
||||
23,
|
||||
23,
|
||||
23,
|
||||
23,
|
||||
23,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
24,
|
||||
});
|
||||
static byte* ML_Code = GetArrayPointer(new byte[128]
|
||||
{
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25,
|
||||
26,
|
||||
27,
|
||||
28,
|
||||
29,
|
||||
30,
|
||||
31,
|
||||
32,
|
||||
32,
|
||||
33,
|
||||
33,
|
||||
34,
|
||||
34,
|
||||
35,
|
||||
35,
|
||||
36,
|
||||
36,
|
||||
36,
|
||||
36,
|
||||
37,
|
||||
37,
|
||||
37,
|
||||
37,
|
||||
38,
|
||||
38,
|
||||
38,
|
||||
38,
|
||||
38,
|
||||
38,
|
||||
38,
|
||||
38,
|
||||
39,
|
||||
39,
|
||||
39,
|
||||
39,
|
||||
39,
|
||||
39,
|
||||
39,
|
||||
39,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
40,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
41,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
42,
|
||||
});
|
||||
static ulong* srcSizeTiers = GetArrayPointer(new ulong[4]
|
||||
{
|
||||
16 * (1 << 10),
|
||||
128 * (1 << 10),
|
||||
256 * (1 << 10),
|
||||
(unchecked(0UL - 1)),
|
||||
});
|
||||
static ZSTD_blockCompressor?[][] blockCompressor = new ZSTD_blockCompressor?[4][]
|
||||
{
|
||||
new ZSTD_blockCompressor[10]
|
||||
{
|
||||
ZSTD_compressBlock_fast,
|
||||
ZSTD_compressBlock_fast,
|
||||
ZSTD_compressBlock_doubleFast,
|
||||
ZSTD_compressBlock_greedy,
|
||||
ZSTD_compressBlock_lazy,
|
||||
ZSTD_compressBlock_lazy2,
|
||||
ZSTD_compressBlock_btlazy2,
|
||||
ZSTD_compressBlock_btopt,
|
||||
ZSTD_compressBlock_btultra,
|
||||
ZSTD_compressBlock_btultra2,
|
||||
},
|
||||
new ZSTD_blockCompressor[10]
|
||||
{
|
||||
ZSTD_compressBlock_fast_extDict,
|
||||
ZSTD_compressBlock_fast_extDict,
|
||||
ZSTD_compressBlock_doubleFast_extDict,
|
||||
ZSTD_compressBlock_greedy_extDict,
|
||||
ZSTD_compressBlock_lazy_extDict,
|
||||
ZSTD_compressBlock_lazy2_extDict,
|
||||
ZSTD_compressBlock_btlazy2_extDict,
|
||||
ZSTD_compressBlock_btopt_extDict,
|
||||
ZSTD_compressBlock_btultra_extDict,
|
||||
ZSTD_compressBlock_btultra_extDict,
|
||||
},
|
||||
new ZSTD_blockCompressor[10]
|
||||
{
|
||||
ZSTD_compressBlock_fast_dictMatchState,
|
||||
ZSTD_compressBlock_fast_dictMatchState,
|
||||
ZSTD_compressBlock_doubleFast_dictMatchState,
|
||||
ZSTD_compressBlock_greedy_dictMatchState,
|
||||
ZSTD_compressBlock_lazy_dictMatchState,
|
||||
ZSTD_compressBlock_lazy2_dictMatchState,
|
||||
ZSTD_compressBlock_btlazy2_dictMatchState,
|
||||
ZSTD_compressBlock_btopt_dictMatchState,
|
||||
ZSTD_compressBlock_btultra_dictMatchState,
|
||||
ZSTD_compressBlock_btultra_dictMatchState,
|
||||
},
|
||||
new ZSTD_blockCompressor?[10]
|
||||
{
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
ZSTD_compressBlock_greedy_dedicatedDictSearch,
|
||||
ZSTD_compressBlock_lazy_dedicatedDictSearch,
|
||||
ZSTD_compressBlock_lazy2_dedicatedDictSearch,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
},
|
||||
};
|
||||
static ZSTD_blockCompressor[][] rowBasedBlockCompressors = new ZSTD_blockCompressor[4][]
|
||||
{
|
||||
new ZSTD_blockCompressor[3]
|
||||
{
|
||||
ZSTD_compressBlock_greedy_row,
|
||||
ZSTD_compressBlock_lazy_row,
|
||||
ZSTD_compressBlock_lazy2_row,
|
||||
},
|
||||
new ZSTD_blockCompressor[3]
|
||||
{
|
||||
ZSTD_compressBlock_greedy_extDict_row,
|
||||
ZSTD_compressBlock_lazy_extDict_row,
|
||||
ZSTD_compressBlock_lazy2_extDict_row,
|
||||
},
|
||||
new ZSTD_blockCompressor[3]
|
||||
{
|
||||
ZSTD_compressBlock_greedy_dictMatchState_row,
|
||||
ZSTD_compressBlock_lazy_dictMatchState_row,
|
||||
ZSTD_compressBlock_lazy2_dictMatchState_row,
|
||||
},
|
||||
new ZSTD_blockCompressor[3]
|
||||
{
|
||||
ZSTD_compressBlock_greedy_dedicatedDictSearch_row,
|
||||
ZSTD_compressBlock_lazy_dedicatedDictSearch_row,
|
||||
ZSTD_compressBlock_lazy2_dedicatedDictSearch_row,
|
||||
},
|
||||
};
|
||||
static searchMax_f?[][] searchFuncs = new searchMax_f?[4][]
|
||||
{
|
||||
new searchMax_f[3]
|
||||
{
|
||||
ZSTD_HcFindBestMatch_selectMLS,
|
||||
ZSTD_BtFindBestMatch_selectMLS,
|
||||
ZSTD_RowFindBestMatch_selectRowLog,
|
||||
},
|
||||
new searchMax_f?[3]
|
||||
{
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
},
|
||||
new searchMax_f[3]
|
||||
{
|
||||
ZSTD_HcFindBestMatch_dictMatchState_selectMLS,
|
||||
ZSTD_BtFindBestMatch_dictMatchState_selectMLS,
|
||||
ZSTD_RowFindBestMatch_dictMatchState_selectRowLog,
|
||||
},
|
||||
new searchMax_f?[3]
|
||||
{
|
||||
ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS,
|
||||
null,
|
||||
ZSTD_RowFindBestMatch_dedicatedDictSearch_selectRowLog,
|
||||
},
|
||||
};
|
||||
static searchMax_f[] searchFuncsExtGeneric = new searchMax_f[3]
|
||||
{
|
||||
ZSTD_HcFindBestMatch_extDict_selectMLS,
|
||||
ZSTD_BtFindBestMatch_extDict_selectMLS,
|
||||
ZSTD_RowFindBestMatch_extDict_selectRowLog,
|
||||
};
|
||||
static decompressionAlgo[] decompress = new decompressionAlgo[2]
|
||||
{
|
||||
HUF_decompress4X1,
|
||||
HUF_decompress4X2,
|
||||
};
|
||||
static uint* dec32table = GetArrayPointer(new uint[8]
|
||||
{
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
1,
|
||||
4,
|
||||
4,
|
||||
4,
|
||||
4,
|
||||
});
|
||||
static int* dec64table = GetArrayPointer(new int[8]
|
||||
{
|
||||
8,
|
||||
8,
|
||||
8,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
});
|
||||
|
||||
private static byte* emptyString = GetArrayPointer(new byte[] {0});
|
||||
}
|
||||
}
|
||||
24
src/SharpCompress/Compressors/Zstd/Unsafe/BIT_CStream_t.cs
Normal file
24
src/SharpCompress/Compressors/Zstd/Unsafe/BIT_CStream_t.cs
Normal file
@@ -0,0 +1,24 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/*-******************************************
|
||||
* bitStream encoding API (write forward)
|
||||
********************************************/
|
||||
/* bitStream can mix input from multiple sources.
|
||||
* A critical property of these streams is that they encode and decode in **reverse** direction.
|
||||
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
|
||||
*/
|
||||
public unsafe partial struct BIT_CStream_t
|
||||
{
|
||||
public nuint bitContainer;
|
||||
|
||||
public uint bitPos;
|
||||
|
||||
public sbyte* startPtr;
|
||||
|
||||
public sbyte* ptr;
|
||||
|
||||
public sbyte* endPtr;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public enum BIT_DStream_status
|
||||
{
|
||||
BIT_DStream_unfinished = 0,
|
||||
BIT_DStream_endOfBuffer = 1,
|
||||
BIT_DStream_completed = 2,
|
||||
BIT_DStream_overflow = 3,
|
||||
}
|
||||
}
|
||||
20
src/SharpCompress/Compressors/Zstd/Unsafe/BIT_DStream_t.cs
Normal file
20
src/SharpCompress/Compressors/Zstd/Unsafe/BIT_DStream_t.cs
Normal file
@@ -0,0 +1,20 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/*-********************************************
|
||||
* bitStream decoding API (read backward)
|
||||
**********************************************/
|
||||
public unsafe partial struct BIT_DStream_t
|
||||
{
|
||||
public nuint bitContainer;
|
||||
|
||||
public uint bitsConsumed;
|
||||
|
||||
public sbyte* ptr;
|
||||
|
||||
public sbyte* start;
|
||||
|
||||
public sbyte* limitPtr;
|
||||
}
|
||||
}
|
||||
420
src/SharpCompress/Compressors/Zstd/Unsafe/Bitstream.cs
Normal file
420
src/SharpCompress/Compressors/Zstd/Unsafe/Bitstream.cs
Normal file
@@ -0,0 +1,420 @@
|
||||
using System;
|
||||
using System.Numerics;
|
||||
using System.Runtime.CompilerServices;
|
||||
using static ZstdSharp.UnsafeHelper;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/*-**************************************************************
|
||||
* Internal functions
|
||||
****************************************************************/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
private static uint BIT_highbit32(uint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
|
||||
{
|
||||
return (uint) BitOperations.Log2(val);
|
||||
}
|
||||
}
|
||||
|
||||
public static uint* BIT_mask = GetArrayPointer(new uint[32]
|
||||
{
|
||||
0,
|
||||
1,
|
||||
3,
|
||||
7,
|
||||
0xF,
|
||||
0x1F,
|
||||
0x3F,
|
||||
0x7F,
|
||||
0xFF,
|
||||
0x1FF,
|
||||
0x3FF,
|
||||
0x7FF,
|
||||
0xFFF,
|
||||
0x1FFF,
|
||||
0x3FFF,
|
||||
0x7FFF,
|
||||
0xFFFF,
|
||||
0x1FFFF,
|
||||
0x3FFFF,
|
||||
0x7FFFF,
|
||||
0xFFFFF,
|
||||
0x1FFFFF,
|
||||
0x3FFFFF,
|
||||
0x7FFFFF,
|
||||
0xFFFFFF,
|
||||
0x1FFFFFF,
|
||||
0x3FFFFFF,
|
||||
0x7FFFFFF,
|
||||
0xFFFFFFF,
|
||||
0x1FFFFFFF,
|
||||
0x3FFFFFFF,
|
||||
0x7FFFFFFF,
|
||||
});
|
||||
|
||||
/*-**************************************************************
|
||||
* bitStream encoding
|
||||
****************************************************************/
|
||||
/*! BIT_initCStream() :
|
||||
* `dstCapacity` must be > sizeof(size_t)
|
||||
* @return : 0 if success,
|
||||
* otherwise an error code (can be tested using ERR_isError()) */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_initCStream(BIT_CStream_t* bitC, void* startPtr, nuint dstCapacity)
|
||||
{
|
||||
bitC->bitContainer = 0;
|
||||
bitC->bitPos = 0;
|
||||
bitC->startPtr = (sbyte*)(startPtr);
|
||||
bitC->ptr = bitC->startPtr;
|
||||
bitC->endPtr = bitC->startPtr + dstCapacity - (nuint)(sizeof(nuint));
|
||||
if (dstCapacity <= (nuint)(sizeof(nuint)))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*! BIT_addBits() :
|
||||
* can add up to 31 bits into `bitC`.
|
||||
* Note : does not check for register overflow ! */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_addBits(BIT_CStream_t* bitC, nuint value, uint nbBits)
|
||||
{
|
||||
assert(nbBits < ((nuint)(sizeof(uint) * 32) / (nuint)(sizeof(uint))));
|
||||
assert(nbBits + bitC->bitPos < (nuint)(sizeof(nuint)) * 8);
|
||||
bitC->bitContainer |= (value & BIT_mask[nbBits]) << (int)bitC->bitPos;
|
||||
bitC->bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_addBitsFast() :
|
||||
* works only if `value` is _clean_,
|
||||
* meaning all high bits above nbBits are 0 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_addBitsFast(BIT_CStream_t* bitC, nuint value, uint nbBits)
|
||||
{
|
||||
assert((value >> (int)nbBits) == 0);
|
||||
assert(nbBits + bitC->bitPos < (nuint)(sizeof(nuint)) * 8);
|
||||
bitC->bitContainer |= value << (int)bitC->bitPos;
|
||||
bitC->bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_flushBitsFast() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* unsafe version; does not check buffer overflow */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_flushBitsFast(BIT_CStream_t* bitC)
|
||||
{
|
||||
nuint nbBytes = bitC->bitPos >> 3;
|
||||
|
||||
assert(bitC->bitPos < (nuint)(sizeof(nuint)) * 8);
|
||||
assert(bitC->ptr <= bitC->endPtr);
|
||||
MEM_writeLEST((void*)bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
bitC->bitPos &= 7;
|
||||
bitC->bitContainer >>= (int)(nbBytes * 8);
|
||||
}
|
||||
|
||||
/*! BIT_flushBits() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* safe version; check for buffer overflow, and prevents it.
|
||||
* note : does not signal buffer overflow.
|
||||
* overflow will be revealed later on using BIT_closeCStream() */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_flushBits(BIT_CStream_t* bitC)
|
||||
{
|
||||
nuint nbBytes = bitC->bitPos >> 3;
|
||||
|
||||
assert(bitC->bitPos < (nuint)(sizeof(nuint)) * 8);
|
||||
assert(bitC->ptr <= bitC->endPtr);
|
||||
MEM_writeLEST((void*)bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
if (bitC->ptr > bitC->endPtr)
|
||||
{
|
||||
bitC->ptr = bitC->endPtr;
|
||||
}
|
||||
|
||||
bitC->bitPos &= 7;
|
||||
bitC->bitContainer >>= (int)(nbBytes * 8);
|
||||
}
|
||||
|
||||
/*! BIT_closeCStream() :
|
||||
* @return : size of CStream, in bytes,
|
||||
* or 0 if it could not fit into dstBuffer */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_closeCStream(BIT_CStream_t* bitC)
|
||||
{
|
||||
BIT_addBitsFast(bitC, 1, 1);
|
||||
BIT_flushBits(bitC);
|
||||
if (bitC->ptr >= bitC->endPtr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (nuint)((bitC->ptr - bitC->startPtr) + (((bitC->bitPos > 0)) ? 1 : 0));
|
||||
}
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint srcSize)
|
||||
{
|
||||
if (srcSize < 1)
|
||||
{
|
||||
memset((void*)(bitD), (0), ((nuint)(sizeof(BIT_DStream_t))));
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)));
|
||||
}
|
||||
|
||||
bitD->start = (sbyte*)(srcBuffer);
|
||||
bitD->limitPtr = bitD->start + (nuint)(sizeof(nuint));
|
||||
if (srcSize >= (nuint)(sizeof(nuint)))
|
||||
{
|
||||
bitD->ptr = (sbyte*)(srcBuffer) + srcSize - (nuint)(sizeof(nuint));
|
||||
bitD->bitContainer = MEM_readLEST((void*)bitD->ptr);
|
||||
|
||||
{
|
||||
byte lastByte = ((byte*)(srcBuffer))[srcSize - 1];
|
||||
|
||||
bitD->bitsConsumed = lastByte != 0 ? 8 - BIT_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bitD->ptr = bitD->start;
|
||||
bitD->bitContainer = *(byte*)(bitD->start);
|
||||
switch (srcSize)
|
||||
{
|
||||
case 7:
|
||||
{
|
||||
bitD->bitContainer += (nuint)(((byte*)(srcBuffer))[6]) << (int)((nuint)(sizeof(nuint)) * 8 - 16);
|
||||
}
|
||||
|
||||
|
||||
goto case 6;
|
||||
case 6:
|
||||
{
|
||||
bitD->bitContainer += (nuint)(((byte*)(srcBuffer))[5]) << (int)((nuint)(sizeof(nuint)) * 8 - 24);
|
||||
}
|
||||
|
||||
|
||||
goto case 5;
|
||||
case 5:
|
||||
{
|
||||
bitD->bitContainer += (nuint)(((byte*)(srcBuffer))[4]) << (int)((nuint)(sizeof(nuint)) * 8 - 32);
|
||||
}
|
||||
|
||||
|
||||
goto case 4;
|
||||
case 4:
|
||||
{
|
||||
bitD->bitContainer += (nuint)(((byte*)(srcBuffer))[3]) << 24;
|
||||
}
|
||||
|
||||
|
||||
goto case 3;
|
||||
case 3:
|
||||
{
|
||||
bitD->bitContainer += (nuint)(((byte*)(srcBuffer))[2]) << 16;
|
||||
}
|
||||
|
||||
|
||||
goto case 2;
|
||||
case 2:
|
||||
{
|
||||
bitD->bitContainer += (nuint)(((byte*)(srcBuffer))[1]) << 8;
|
||||
}
|
||||
|
||||
|
||||
goto default;
|
||||
default:
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
byte lastByte = ((byte*)(srcBuffer))[srcSize - 1];
|
||||
|
||||
bitD->bitsConsumed = lastByte != 0 ? 8 - BIT_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)));
|
||||
}
|
||||
}
|
||||
|
||||
bitD->bitsConsumed += (uint)((nuint)(sizeof(nuint)) - srcSize) * 8;
|
||||
}
|
||||
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getUpperBits(nuint bitContainer, uint start)
|
||||
{
|
||||
return bitContainer >> (int)start;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getMiddleBits(nuint bitContainer, uint start, uint nbBits)
|
||||
{
|
||||
uint regMask = (uint)((nuint)(sizeof(nuint)) * 8 - 1);
|
||||
|
||||
assert(nbBits < ((nuint)(sizeof(uint) * 32) / (nuint)(sizeof(uint))));
|
||||
return (bitContainer >> (int)(start & regMask)) & BIT_mask[nbBits];
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_getLowerBits(nuint bitContainer, uint nbBits)
|
||||
{
|
||||
assert(nbBits < ((nuint)(sizeof(uint) * 32) / (nuint)(sizeof(uint))));
|
||||
return bitContainer & BIT_mask[nbBits];
|
||||
}
|
||||
|
||||
/*! BIT_lookBits() :
|
||||
* Provides next n bits from local register.
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
return BIT_getMiddleBits(bitD->bitContainer, (uint)(((nuint)(sizeof(nuint)) * 8) - bitD->bitsConsumed - nbBits), nbBits);
|
||||
}
|
||||
|
||||
/*! BIT_lookBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_lookBitsFast(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
uint regMask = (uint)((nuint)(sizeof(nuint)) * 8 - 1);
|
||||
|
||||
assert(nbBits >= 1);
|
||||
return (bitD->bitContainer << (int)(bitD->bitsConsumed & regMask)) >> (int)(((regMask + 1) - nbBits) & regMask);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void BIT_skipBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
bitD->bitsConsumed += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value. */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBits(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
nuint value = BIT_lookBits(bitD, nbBits);
|
||||
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works only if nbBits >= 1 */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint BIT_readBitsFast(BIT_DStream_t* bitD, uint nbBits)
|
||||
{
|
||||
nuint value = BIT_lookBitsFast(bitD, nbBits);
|
||||
|
||||
assert(nbBits >= 1);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStreamFast() :
|
||||
* Similar to BIT_reloadDStream(), but with two differences:
|
||||
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
|
||||
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
|
||||
* point you must use BIT_reloadDStream() to reload.
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
|
||||
{
|
||||
if ((bitD->ptr < bitD->limitPtr))
|
||||
{
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
}
|
||||
|
||||
assert(bitD->bitsConsumed <= (nuint)(sizeof(nuint)) * 8);
|
||||
bitD->ptr -= bitD->bitsConsumed >> 3;
|
||||
bitD->bitsConsumed &= 7;
|
||||
bitD->bitContainer = MEM_readLEST((void*)bitD->ptr);
|
||||
return BIT_DStream_status.BIT_DStream_unfinished;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not read beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->bitsConsumed > ((nuint)(sizeof(nuint)) * 8))
|
||||
{
|
||||
return BIT_DStream_status.BIT_DStream_overflow;
|
||||
}
|
||||
|
||||
if (bitD->ptr >= bitD->limitPtr)
|
||||
{
|
||||
return BIT_reloadDStreamFast(bitD);
|
||||
}
|
||||
|
||||
if (bitD->ptr == bitD->start)
|
||||
{
|
||||
if (bitD->bitsConsumed < (nuint)(sizeof(nuint)) * 8)
|
||||
{
|
||||
return BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
}
|
||||
|
||||
return BIT_DStream_status.BIT_DStream_completed;
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
uint nbBytes = bitD->bitsConsumed >> 3;
|
||||
BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished;
|
||||
|
||||
if (bitD->ptr - nbBytes < bitD->start)
|
||||
{
|
||||
nbBytes = (uint)(bitD->ptr - bitD->start);
|
||||
result = BIT_DStream_status.BIT_DStream_endOfBuffer;
|
||||
}
|
||||
|
||||
bitD->ptr -= nbBytes;
|
||||
bitD->bitsConsumed -= nbBytes * 8;
|
||||
bitD->bitContainer = MEM_readLEST((void*)bitD->ptr);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint BIT_endOfDStream(BIT_DStream_t* DStream)
|
||||
{
|
||||
return ((((DStream->ptr == DStream->start) && (DStream->bitsConsumed == (nuint)(sizeof(nuint)) * 8))) ? 1U : 0U);
|
||||
}
|
||||
}
|
||||
}
|
||||
29
src/SharpCompress/Compressors/Zstd/Unsafe/COVER_best_s.cs
Normal file
29
src/SharpCompress/Compressors/Zstd/Unsafe/COVER_best_s.cs
Normal file
@@ -0,0 +1,29 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/**
|
||||
* COVER_best_t is used for two purposes:
|
||||
* 1. Synchronizing threads.
|
||||
* 2. Saving the best parameters and dictionary.
|
||||
*
|
||||
* All of the methods except COVER_best_init() are thread safe if zstd is
|
||||
* compiled with multithreaded support.
|
||||
*/
|
||||
public unsafe partial struct COVER_best_s
|
||||
{
|
||||
public int mutex;
|
||||
|
||||
public int cond;
|
||||
|
||||
public nuint liveJobs;
|
||||
|
||||
public void* dict;
|
||||
|
||||
public nuint dictSize;
|
||||
|
||||
public ZDICT_cover_params_t parameters;
|
||||
|
||||
public nuint compressedSize;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/**
|
||||
* Struct used for the dictionary selection function.
|
||||
*/
|
||||
public unsafe partial struct COVER_dictSelection
|
||||
{
|
||||
public byte* dictContent;
|
||||
|
||||
public nuint dictSize;
|
||||
|
||||
public nuint totalCompressedSize;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/**
|
||||
*Number of epochs and size of each epoch.
|
||||
*/
|
||||
public partial struct COVER_epoch_info_t
|
||||
{
|
||||
public uint num;
|
||||
|
||||
public uint size;
|
||||
}
|
||||
}
|
||||
16
src/SharpCompress/Compressors/Zstd/Unsafe/COVER_segment_t.cs
Normal file
16
src/SharpCompress/Compressors/Zstd/Unsafe/COVER_segment_t.cs
Normal file
@@ -0,0 +1,16 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/**
|
||||
* A segment is a range in the source as well as the score of the segment.
|
||||
*/
|
||||
public partial struct COVER_segment_t
|
||||
{
|
||||
public uint begin;
|
||||
|
||||
public uint end;
|
||||
|
||||
public uint score;
|
||||
}
|
||||
}
|
||||
411
src/SharpCompress/Compressors/Zstd/Unsafe/Cover.cs
Normal file
411
src/SharpCompress/Compressors/Zstd/Unsafe/Cover.cs
Normal file
@@ -0,0 +1,411 @@
|
||||
using System;
|
||||
using static ZstdSharp.UnsafeHelper;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
public static int g_displayLevel = 2;
|
||||
|
||||
/**
|
||||
* Returns the sum of the sample sizes.
|
||||
*/
|
||||
public static nuint COVER_sum(nuint* samplesSizes, uint nbSamples)
|
||||
{
|
||||
nuint sum = 0;
|
||||
uint i;
|
||||
|
||||
for (i = 0; i < nbSamples; ++i)
|
||||
{
|
||||
sum += samplesSizes[i];
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
/**
|
||||
* Warns the user when their corpus is too small.
|
||||
*/
|
||||
public static void COVER_warnOnSmallCorpus(nuint maxDictSize, nuint nbDmers, int displayLevel)
|
||||
{
|
||||
double ratio = (double)(nbDmers) / maxDictSize;
|
||||
|
||||
if (ratio >= 10)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (displayLevel >= 1)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the number of epochs and the size of each epoch.
|
||||
* We will make sure that each epoch gets at least 10 * k bytes.
|
||||
*
|
||||
* The COVER algorithms divide the data up into epochs of equal size and
|
||||
* select one segment from each epoch.
|
||||
*
|
||||
* @param maxDictSize The maximum allowed dictionary size.
|
||||
* @param nbDmers The number of dmers we are training on.
|
||||
* @param k The parameter k (segment size).
|
||||
* @param passes The target number of passes over the dmer corpus.
|
||||
* More passes means a better dictionary.
|
||||
*/
|
||||
public static COVER_epoch_info_t COVER_computeEpochs(uint maxDictSize, uint nbDmers, uint k, uint passes)
|
||||
{
|
||||
uint minEpochSize = k * 10;
|
||||
COVER_epoch_info_t epochs;
|
||||
|
||||
epochs.num = (uint)((1) > (maxDictSize / k / passes) ? (1) : (maxDictSize / k / passes));
|
||||
epochs.size = nbDmers / epochs.num;
|
||||
if (epochs.size >= minEpochSize)
|
||||
{
|
||||
assert(epochs.size * epochs.num <= nbDmers);
|
||||
return epochs;
|
||||
}
|
||||
|
||||
epochs.size = ((minEpochSize) < (nbDmers) ? (minEpochSize) : (nbDmers));
|
||||
epochs.num = nbDmers / epochs.size;
|
||||
assert(epochs.size * epochs.num <= nbDmers);
|
||||
return epochs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks total compressed size of a dictionary
|
||||
*/
|
||||
public static nuint COVER_checkTotalCompressedSize(ZDICT_cover_params_t parameters, nuint* samplesSizes, byte* samples, nuint* offsets, nuint nbTrainSamples, nuint nbSamples, byte* dict, nuint dictBufferCapacity)
|
||||
{
|
||||
nuint totalCompressedSize = (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
ZSTD_CCtx_s* cctx;
|
||||
ZSTD_CDict_s* cdict;
|
||||
void* dst;
|
||||
nuint dstCapacity;
|
||||
nuint i;
|
||||
|
||||
|
||||
{
|
||||
nuint maxSampleSize = 0;
|
||||
|
||||
i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
|
||||
for (; i < nbSamples; ++i)
|
||||
{
|
||||
maxSampleSize = ((samplesSizes[i]) > (maxSampleSize) ? (samplesSizes[i]) : (maxSampleSize));
|
||||
}
|
||||
|
||||
dstCapacity = ZSTD_compressBound(maxSampleSize);
|
||||
dst = malloc(dstCapacity);
|
||||
}
|
||||
|
||||
cctx = ZSTD_createCCtx();
|
||||
cdict = ZSTD_createCDict((void*)dict, dictBufferCapacity, parameters.zParams.compressionLevel);
|
||||
if (dst == null || cctx == null || cdict == null)
|
||||
{
|
||||
goto _compressCleanup;
|
||||
}
|
||||
|
||||
totalCompressedSize = dictBufferCapacity;
|
||||
i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
|
||||
for (; i < nbSamples; ++i)
|
||||
{
|
||||
nuint size = ZSTD_compress_usingCDict(cctx, dst, dstCapacity, (void*)(samples + offsets[i]), samplesSizes[i], cdict);
|
||||
|
||||
if ((ERR_isError(size)) != 0)
|
||||
{
|
||||
totalCompressedSize = size;
|
||||
goto _compressCleanup;
|
||||
}
|
||||
|
||||
totalCompressedSize += size;
|
||||
}
|
||||
|
||||
_compressCleanup:
|
||||
ZSTD_freeCCtx(cctx);
|
||||
ZSTD_freeCDict(cdict);
|
||||
if (dst != null)
|
||||
{
|
||||
free(dst);
|
||||
}
|
||||
|
||||
return totalCompressedSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the `COVER_best_t`.
|
||||
*/
|
||||
public static void COVER_best_init(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
best->liveJobs = 0;
|
||||
best->dict = null;
|
||||
best->dictSize = 0;
|
||||
best->compressedSize = unchecked((nuint)(-1));
|
||||
memset((void*)&best->parameters, 0, (nuint)(sizeof(ZDICT_cover_params_t)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait until liveJobs == 0.
|
||||
*/
|
||||
public static void COVER_best_wait(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
while (best->liveJobs != 0)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Call COVER_best_wait() and then destroy the COVER_best_t.
|
||||
*/
|
||||
public static void COVER_best_destroy(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
COVER_best_wait(best);
|
||||
if (best->dict != null)
|
||||
{
|
||||
free(best->dict);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a thread is about to be launched.
|
||||
* Increments liveJobs.
|
||||
*/
|
||||
public static void COVER_best_start(COVER_best_s* best)
|
||||
{
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
++best->liveJobs;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a thread finishes executing, both on error or success.
|
||||
* Decrements liveJobs and signals any waiting threads if liveJobs == 0.
|
||||
* If this dictionary is the best so far save it and its parameters.
|
||||
*/
|
||||
public static void COVER_best_finish(COVER_best_s* best, ZDICT_cover_params_t parameters, COVER_dictSelection selection)
|
||||
{
|
||||
void* dict = (void*)selection.dictContent;
|
||||
nuint compressedSize = selection.totalCompressedSize;
|
||||
nuint dictSize = selection.dictSize;
|
||||
|
||||
if (best == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
nuint liveJobs;
|
||||
|
||||
|
||||
--best->liveJobs;
|
||||
liveJobs = best->liveJobs;
|
||||
if (compressedSize < best->compressedSize)
|
||||
{
|
||||
if (best->dict == null || best->dictSize < dictSize)
|
||||
{
|
||||
if (best->dict != null)
|
||||
{
|
||||
free(best->dict);
|
||||
}
|
||||
|
||||
best->dict = malloc(dictSize);
|
||||
if (best->dict == null)
|
||||
{
|
||||
best->compressedSize = (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
best->dictSize = 0;
|
||||
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (dict != null)
|
||||
{
|
||||
memcpy(best->dict, dict, dictSize);
|
||||
best->dictSize = dictSize;
|
||||
best->parameters = parameters;
|
||||
best->compressedSize = compressedSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (liveJobs == 0)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Error function for COVER_selectDict function. Returns a struct where
|
||||
* return.totalCompressedSize is a ZSTD error.
|
||||
*/
|
||||
public static COVER_dictSelection COVER_dictSelectionError(nuint error)
|
||||
{
|
||||
COVER_dictSelection selection = new COVER_dictSelection
|
||||
{
|
||||
dictContent = null,
|
||||
dictSize = 0,
|
||||
totalCompressedSize = error,
|
||||
};
|
||||
|
||||
return selection;
|
||||
}
|
||||
|
||||
/**
|
||||
* Error function for COVER_selectDict function. Checks if the return
|
||||
* value is an error.
|
||||
*/
|
||||
public static uint COVER_dictSelectionIsError(COVER_dictSelection selection)
|
||||
{
|
||||
return ((((ERR_isError(selection.totalCompressedSize)) != 0 || selection.dictContent == null)) ? 1U : 0U);
|
||||
}
|
||||
|
||||
/**
|
||||
* Always call after selectDict is called to free up used memory from
|
||||
* newly created dictionary.
|
||||
*/
|
||||
public static void COVER_dictSelectionFree(COVER_dictSelection selection)
|
||||
{
|
||||
free((void*)selection.dictContent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called to finalize the dictionary and select one based on whether or not
|
||||
* the shrink-dict flag was enabled. If enabled the dictionary used is the
|
||||
* smallest dictionary within a specified regression of the compressed size
|
||||
* from the largest dictionary.
|
||||
*/
|
||||
public static COVER_dictSelection COVER_selectDict(byte* customDictContent, nuint dictBufferCapacity, nuint dictContentSize, byte* samplesBuffer, nuint* samplesSizes, uint nbFinalizeSamples, nuint nbCheckSamples, nuint nbSamples, ZDICT_cover_params_t @params, nuint* offsets, nuint totalCompressedSize)
|
||||
{
|
||||
nuint largestDict = 0;
|
||||
nuint largestCompressed = 0;
|
||||
byte* customDictContentEnd = customDictContent + dictContentSize;
|
||||
byte* largestDictbuffer = (byte*)(malloc(dictBufferCapacity));
|
||||
byte* candidateDictBuffer = (byte*)(malloc(dictBufferCapacity));
|
||||
double regressionTolerance = ((double)(@params.shrinkDictMaxRegression) / 100.0) + 1.00;
|
||||
|
||||
if (largestDictbuffer == null || candidateDictBuffer == null)
|
||||
{
|
||||
free((void*)largestDictbuffer);
|
||||
free((void*)candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
memcpy((void*)largestDictbuffer, (void*)customDictContent, dictContentSize);
|
||||
dictContentSize = ZDICT_finalizeDictionary((void*)largestDictbuffer, dictBufferCapacity, (void*)customDictContent, dictContentSize, (void*)samplesBuffer, samplesSizes, nbFinalizeSamples, @params.zParams);
|
||||
if ((ZDICT_isError(dictContentSize)) != 0)
|
||||
{
|
||||
free((void*)largestDictbuffer);
|
||||
free((void*)candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
totalCompressedSize = COVER_checkTotalCompressedSize(@params, samplesSizes, samplesBuffer, offsets, nbCheckSamples, nbSamples, largestDictbuffer, dictContentSize);
|
||||
if ((ERR_isError(totalCompressedSize)) != 0)
|
||||
{
|
||||
free((void*)largestDictbuffer);
|
||||
free((void*)candidateDictBuffer);
|
||||
return COVER_dictSelectionError(totalCompressedSize);
|
||||
}
|
||||
|
||||
if (@params.shrinkDict == 0)
|
||||
{
|
||||
COVER_dictSelection selection = new COVER_dictSelection
|
||||
{
|
||||
dictContent = largestDictbuffer,
|
||||
dictSize = dictContentSize,
|
||||
totalCompressedSize = totalCompressedSize,
|
||||
};
|
||||
|
||||
free((void*)candidateDictBuffer);
|
||||
return selection;
|
||||
}
|
||||
|
||||
largestDict = dictContentSize;
|
||||
largestCompressed = totalCompressedSize;
|
||||
dictContentSize = 256;
|
||||
while (dictContentSize < largestDict)
|
||||
{
|
||||
memcpy((void*)candidateDictBuffer, (void*)largestDictbuffer, largestDict);
|
||||
dictContentSize = ZDICT_finalizeDictionary((void*)candidateDictBuffer, dictBufferCapacity, (void*)(customDictContentEnd - dictContentSize), dictContentSize, (void*)samplesBuffer, samplesSizes, nbFinalizeSamples, @params.zParams);
|
||||
if ((ZDICT_isError(dictContentSize)) != 0)
|
||||
{
|
||||
free((void*)largestDictbuffer);
|
||||
free((void*)candidateDictBuffer);
|
||||
return COVER_dictSelectionError(dictContentSize);
|
||||
}
|
||||
|
||||
totalCompressedSize = COVER_checkTotalCompressedSize(@params, samplesSizes, samplesBuffer, offsets, nbCheckSamples, nbSamples, candidateDictBuffer, dictContentSize);
|
||||
if ((ERR_isError(totalCompressedSize)) != 0)
|
||||
{
|
||||
free((void*)largestDictbuffer);
|
||||
free((void*)candidateDictBuffer);
|
||||
return COVER_dictSelectionError(totalCompressedSize);
|
||||
}
|
||||
|
||||
if (totalCompressedSize <= largestCompressed * regressionTolerance)
|
||||
{
|
||||
COVER_dictSelection selection = new COVER_dictSelection
|
||||
{
|
||||
dictContent = candidateDictBuffer,
|
||||
dictSize = dictContentSize,
|
||||
totalCompressedSize = totalCompressedSize,
|
||||
};
|
||||
|
||||
free((void*)largestDictbuffer);
|
||||
return selection;
|
||||
}
|
||||
|
||||
dictContentSize *= 2;
|
||||
}
|
||||
|
||||
dictContentSize = largestDict;
|
||||
totalCompressedSize = largestCompressed;
|
||||
|
||||
{
|
||||
COVER_dictSelection selection = new COVER_dictSelection
|
||||
{
|
||||
dictContent = largestDictbuffer,
|
||||
dictSize = dictContentSize,
|
||||
totalCompressedSize = totalCompressedSize,
|
||||
};
|
||||
|
||||
free((void*)candidateDictBuffer);
|
||||
return selection;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
18
src/SharpCompress/Compressors/Zstd/Unsafe/DTableDesc.cs
Normal file
18
src/SharpCompress/Compressors/Zstd/Unsafe/DTableDesc.cs
Normal file
@@ -0,0 +1,18 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/*-***************************/
|
||||
/* generic DTableDesc */
|
||||
/*-***************************/
|
||||
public partial struct DTableDesc
|
||||
{
|
||||
public byte maxTableLog;
|
||||
|
||||
public byte tableType;
|
||||
|
||||
public byte tableLog;
|
||||
|
||||
public byte reserved;
|
||||
}
|
||||
}
|
||||
16
src/SharpCompress/Compressors/Zstd/Unsafe/EStats_ress_t.cs
Normal file
16
src/SharpCompress/Compressors/Zstd/Unsafe/EStats_ress_t.cs
Normal file
@@ -0,0 +1,16 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public unsafe partial struct EStats_ress_t
|
||||
{
|
||||
/* dictionary */
|
||||
public ZSTD_CDict_s* dict;
|
||||
|
||||
/* working context */
|
||||
public ZSTD_CCtx_s* zc;
|
||||
|
||||
/* must be ZSTD_BLOCKSIZE_MAX allocated */
|
||||
public void* workPlace;
|
||||
}
|
||||
}
|
||||
425
src/SharpCompress/Compressors/Zstd/Unsafe/EntropyCommon.cs
Normal file
425
src/SharpCompress/Compressors/Zstd/Unsafe/EntropyCommon.cs
Normal file
@@ -0,0 +1,425 @@
|
||||
using System;
|
||||
using System.Numerics;
|
||||
using System.Runtime.CompilerServices;
|
||||
using static ZstdSharp.UnsafeHelper;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/*=== Version ===*/
|
||||
public static uint FSE_versionNumber()
|
||||
{
|
||||
return (uint)((0 * 100 * 100 + 9 * 100 + 0));
|
||||
}
|
||||
|
||||
/*=== Error Management ===*/
|
||||
public static uint FSE_isError(nuint code)
|
||||
{
|
||||
return ERR_isError(code);
|
||||
}
|
||||
|
||||
public static string FSE_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorName(code);
|
||||
}
|
||||
|
||||
/* Error Management */
|
||||
public static uint HUF_isError(nuint code)
|
||||
{
|
||||
return ERR_isError(code);
|
||||
}
|
||||
|
||||
public static string HUF_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorName(code);
|
||||
}
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding-decoding
|
||||
****************************************************************/
|
||||
[InlineMethod.Inline]
|
||||
private static uint FSE_ctz(uint val)
|
||||
{
|
||||
assert(val != 0);
|
||||
|
||||
{
|
||||
return (uint) BitOperations.TrailingZeroCount(val);
|
||||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint FSE_readNCount_body(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize)
|
||||
{
|
||||
byte* istart = (byte*)(headerBuffer);
|
||||
byte* iend = istart + hbSize;
|
||||
byte* ip = istart;
|
||||
int nbBits;
|
||||
int remaining;
|
||||
int threshold;
|
||||
uint bitStream;
|
||||
int bitCount;
|
||||
uint charnum = 0;
|
||||
uint maxSV1 = *maxSVPtr + 1;
|
||||
int previous0 = 0;
|
||||
|
||||
if (hbSize < 8)
|
||||
{
|
||||
sbyte* buffer = stackalloc sbyte[8];
|
||||
memset(buffer, 0, sizeof(sbyte) * 8);
|
||||
|
||||
memcpy((void*)(buffer), (headerBuffer), (hbSize));
|
||||
|
||||
{
|
||||
nuint countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr, (void*)buffer, (nuint)(8));
|
||||
|
||||
if ((FSE_isError(countSize)) != 0)
|
||||
{
|
||||
return countSize;
|
||||
}
|
||||
|
||||
if (countSize > hbSize)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)));
|
||||
}
|
||||
|
||||
return countSize;
|
||||
}
|
||||
}
|
||||
|
||||
assert(hbSize >= 8);
|
||||
memset((void*)(normalizedCounter), (0), ((*maxSVPtr + 1) * (nuint)(sizeof(short))));
|
||||
bitStream = MEM_readLE32((void*)ip);
|
||||
nbBits = (int)((bitStream & 0xF) + 5);
|
||||
if (nbBits > 15)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)));
|
||||
}
|
||||
|
||||
bitStream >>= 4;
|
||||
bitCount = 4;
|
||||
*tableLogPtr = (uint)nbBits;
|
||||
remaining = (1 << nbBits) + 1;
|
||||
threshold = 1 << nbBits;
|
||||
nbBits++;
|
||||
for (;;)
|
||||
{
|
||||
if (previous0 != 0)
|
||||
{
|
||||
int repeats = (int)(FSE_ctz(~bitStream | 0x80000000) >> 1);
|
||||
|
||||
while (repeats >= 12)
|
||||
{
|
||||
charnum += (uint)(3 * 12);
|
||||
if ((ip <= iend - 7))
|
||||
{
|
||||
ip += 3;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 7 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32((void*)ip) >> bitCount;
|
||||
repeats = (int)(FSE_ctz(~bitStream | 0x80000000) >> 1);
|
||||
}
|
||||
|
||||
charnum += (uint)(3 * repeats);
|
||||
bitStream >>= 2 * repeats;
|
||||
bitCount += 2 * repeats;
|
||||
assert((bitStream & 3) < 3);
|
||||
charnum += bitStream & 3;
|
||||
bitCount += 2;
|
||||
if (charnum >= maxSV1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4))
|
||||
{
|
||||
assert((bitCount >> 3) <= 3);
|
||||
ip += bitCount >> 3;
|
||||
bitCount &= 7;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32((void*)ip) >> bitCount;
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
int max = (2 * threshold - 1) - remaining;
|
||||
int count;
|
||||
|
||||
if ((bitStream & (uint)((threshold - 1))) < (uint)(max))
|
||||
{
|
||||
count = (int)(bitStream & (uint)((threshold - 1)));
|
||||
bitCount += nbBits - 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
count = (int)(bitStream & (uint)((2 * threshold - 1)));
|
||||
if (count >= threshold)
|
||||
{
|
||||
count -= max;
|
||||
}
|
||||
|
||||
bitCount += nbBits;
|
||||
}
|
||||
|
||||
count--;
|
||||
if (count >= 0)
|
||||
{
|
||||
remaining -= count;
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(count == -1);
|
||||
remaining += count;
|
||||
}
|
||||
|
||||
normalizedCounter[charnum++] = (short)(count);
|
||||
previous0 = (count == 0 ? 1 : 0);
|
||||
assert(threshold > 1);
|
||||
if (remaining < threshold)
|
||||
{
|
||||
if (remaining <= 1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
nbBits = (int)(BIT_highbit32((uint)remaining) + 1);
|
||||
threshold = 1 << (nbBits - 1);
|
||||
}
|
||||
|
||||
if (charnum >= maxSV1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4))
|
||||
{
|
||||
ip += bitCount >> 3;
|
||||
bitCount &= 7;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
|
||||
bitStream = MEM_readLE32((void*)ip) >> bitCount;
|
||||
}
|
||||
}
|
||||
|
||||
if (remaining != 1)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)));
|
||||
}
|
||||
|
||||
if (charnum > maxSV1)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall)));
|
||||
}
|
||||
|
||||
if (bitCount > 32)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)));
|
||||
}
|
||||
|
||||
*maxSVPtr = charnum - 1;
|
||||
ip += (bitCount + 7) >> 3;
|
||||
return (nuint)(ip - istart);
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
private static nuint FSE_readNCount_body_default(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize)
|
||||
{
|
||||
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
|
||||
private static nuint FSE_readNCount_body_bmi2(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize)
|
||||
{
|
||||
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
|
||||
/*! FSE_readNCount_bmi2():
|
||||
* Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
|
||||
*/
|
||||
public static nuint FSE_readNCount_bmi2(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize, int bmi2)
|
||||
{
|
||||
if (bmi2 != 0)
|
||||
{
|
||||
return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
|
||||
return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
|
||||
/*! FSE_readNCount():
|
||||
Read compactly saved 'normalizedCounter' from 'rBuffer'.
|
||||
@return : size read from 'rBuffer',
|
||||
or an errorCode, which can be tested using FSE_isError().
|
||||
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
|
||||
public static nuint FSE_readNCount(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize)
|
||||
{
|
||||
return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, 0);
|
||||
}
|
||||
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
|
||||
@return : size read from `src` , or an error Code .
|
||||
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
|
||||
*/
|
||||
public static nuint HUF_readStats(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize)
|
||||
{
|
||||
uint* wksp = stackalloc uint[218];
|
||||
|
||||
return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, (void*)wksp, (nuint)(sizeof(uint) * 218), 0);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint HUF_readStats_body(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize, void* workSpace, nuint wkspSize, int bmi2)
|
||||
{
|
||||
uint weightTotal;
|
||||
byte* ip = (byte*)(src);
|
||||
nuint iSize;
|
||||
nuint oSize;
|
||||
|
||||
if (srcSize == 0)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)));
|
||||
}
|
||||
|
||||
iSize = ip[0];
|
||||
if (iSize >= 128)
|
||||
{
|
||||
oSize = iSize - 127;
|
||||
iSize = ((oSize + 1) / 2);
|
||||
if (iSize + 1 > srcSize)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)));
|
||||
}
|
||||
|
||||
if (oSize >= hwSize)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)));
|
||||
}
|
||||
|
||||
ip += 1;
|
||||
|
||||
{
|
||||
uint n;
|
||||
|
||||
for (n = 0; n < oSize; n += 2)
|
||||
{
|
||||
huffWeight[n] = (byte)(ip[n / 2] >> 4);
|
||||
huffWeight[n + 1] = (byte)(ip[n / 2] & 15);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (iSize + 1 > srcSize)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)));
|
||||
}
|
||||
|
||||
oSize = FSE_decompress_wksp_bmi2((void*)huffWeight, hwSize - 1, (void*)(ip + 1), iSize, 6, workSpace, wkspSize, bmi2);
|
||||
if ((FSE_isError(oSize)) != 0)
|
||||
{
|
||||
return oSize;
|
||||
}
|
||||
}
|
||||
|
||||
memset((void*)(rankStats), (0), ((uint)((12 + 1)) * (nuint)(sizeof(uint))));
|
||||
weightTotal = 0;
|
||||
|
||||
{
|
||||
uint n;
|
||||
|
||||
for (n = 0; n < oSize; n++)
|
||||
{
|
||||
if (huffWeight[n] >= 12)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)));
|
||||
}
|
||||
|
||||
rankStats[huffWeight[n]]++;
|
||||
weightTotal += (uint)((1 << (int)(huffWeight[n])) >> 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (weightTotal == 0)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)));
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
uint tableLog = BIT_highbit32(weightTotal) + 1;
|
||||
|
||||
if (tableLog > 12)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)));
|
||||
}
|
||||
|
||||
*tableLogPtr = tableLog;
|
||||
|
||||
{
|
||||
uint total = (uint)(1 << (int)tableLog);
|
||||
uint rest = total - weightTotal;
|
||||
uint verif = (uint)(1 << (int)(BIT_highbit32(rest)));
|
||||
uint lastWeight = BIT_highbit32(rest) + 1;
|
||||
|
||||
if (verif != rest)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)));
|
||||
}
|
||||
|
||||
huffWeight[oSize] = (byte)(lastWeight);
|
||||
rankStats[lastWeight]++;
|
||||
}
|
||||
}
|
||||
|
||||
if ((rankStats[1] < 2) || (rankStats[1] & 1) != 0)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)));
|
||||
}
|
||||
|
||||
*nbSymbolsPtr = (uint)(oSize + 1);
|
||||
return iSize + 1;
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
private static nuint HUF_readStats_body_default(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize, void* workSpace, nuint wkspSize)
|
||||
{
|
||||
return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
|
||||
}
|
||||
|
||||
private static nuint HUF_readStats_body_bmi2(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize, void* workSpace, nuint wkspSize)
|
||||
{
|
||||
return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
|
||||
}
|
||||
|
||||
public static nuint HUF_readStats_wksp(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize, void* workSpace, nuint wkspSize, int bmi2)
|
||||
{
|
||||
if (bmi2 != 0)
|
||||
{
|
||||
return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
|
||||
}
|
||||
|
||||
return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
184
src/SharpCompress/Compressors/Zstd/Unsafe/ErrorPrivate.cs
Normal file
184
src/SharpCompress/Compressors/Zstd/Unsafe/ErrorPrivate.cs
Normal file
@@ -0,0 +1,184 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using static ZstdSharp.UnsafeHelper;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint ERR_isError(nuint code)
|
||||
{
|
||||
return (((code > (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode))))) ? 1U : 0U);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static ZSTD_ErrorCode ERR_getErrorCode(nuint code)
|
||||
{
|
||||
if ((ERR_isError(code)) == 0)
|
||||
{
|
||||
return (ZSTD_ErrorCode)(0);
|
||||
}
|
||||
|
||||
return (ZSTD_ErrorCode)(0 - code);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static string ERR_getErrorName(nuint code)
|
||||
{
|
||||
return ERR_getErrorString(ERR_getErrorCode(code));
|
||||
}
|
||||
|
||||
/*-****************************************
|
||||
* Error Strings
|
||||
******************************************/
|
||||
public static string ERR_getErrorString(ZSTD_ErrorCode code)
|
||||
{
|
||||
var notErrorCode = "Unspecified error code";
|
||||
|
||||
switch (code)
|
||||
{
|
||||
case ZSTD_ErrorCode.ZSTD_error_no_error:
|
||||
{
|
||||
return "No error detected";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_GENERIC:
|
||||
{
|
||||
return "Error (generic)";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_prefix_unknown:
|
||||
{
|
||||
return "Unknown frame descriptor";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_version_unsupported:
|
||||
{
|
||||
return "Version not supported";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported:
|
||||
{
|
||||
return "Unsupported frame parameter";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge:
|
||||
{
|
||||
return "Frame requires too much memory for decoding";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_corruption_detected:
|
||||
{
|
||||
return "Corrupted block detected";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_checksum_wrong:
|
||||
{
|
||||
return "Restored data doesn't match checksum";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_parameter_unsupported:
|
||||
{
|
||||
return "Unsupported parameter";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound:
|
||||
{
|
||||
return "Parameter is out of bound";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_init_missing:
|
||||
{
|
||||
return "Context should be init first";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_memory_allocation:
|
||||
{
|
||||
return "Allocation error : not enough memory";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall:
|
||||
{
|
||||
return "workSpace buffer is not large enough";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_stage_wrong:
|
||||
{
|
||||
return "Operation not authorized at current processing stage";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge:
|
||||
{
|
||||
return "tableLog requires too much memory : unsupported";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge:
|
||||
{
|
||||
return "Unsupported max Symbol Value : too large";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall:
|
||||
{
|
||||
return "Specified maxSymbolValue is too small";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted:
|
||||
{
|
||||
return "Dictionary is corrupted";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionary_wrong:
|
||||
{
|
||||
return "Dictionary mismatch";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed:
|
||||
{
|
||||
return "Cannot create Dictionary from provided samples";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall:
|
||||
{
|
||||
return "Destination buffer is too small";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_srcSize_wrong:
|
||||
{
|
||||
return "Src size is incorrect";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstBuffer_null:
|
||||
{
|
||||
return "Operation on NULL destination buffer";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_frameIndex_tooLarge:
|
||||
{
|
||||
return "Frame index is too large";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_seekableIO:
|
||||
{
|
||||
return "An I/O error occurred when reading/seeking";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong:
|
||||
{
|
||||
return "Destination buffer is wrong";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_srcBuffer_wrong:
|
||||
{
|
||||
return "Source buffer is wrong";
|
||||
}
|
||||
|
||||
case ZSTD_ErrorCode.ZSTD_error_maxCode:
|
||||
default:
|
||||
{
|
||||
return notErrorCode;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/*-*************************************
|
||||
* Acceleration
|
||||
***************************************/
|
||||
public partial struct FASTCOVER_accel_t
|
||||
{
|
||||
/* Percentage of training samples used for ZDICT_finalizeDictionary */
|
||||
public uint finalize;
|
||||
|
||||
/* Number of dmer skipped between each dmer counted in computeFrequency */
|
||||
public uint skip;
|
||||
}
|
||||
}
|
||||
32
src/SharpCompress/Compressors/Zstd/Unsafe/FASTCOVER_ctx_t.cs
Normal file
32
src/SharpCompress/Compressors/Zstd/Unsafe/FASTCOVER_ctx_t.cs
Normal file
@@ -0,0 +1,32 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/*-*************************************
|
||||
* Context
|
||||
***************************************/
|
||||
public unsafe partial struct FASTCOVER_ctx_t
|
||||
{
|
||||
public byte* samples;
|
||||
|
||||
public nuint* offsets;
|
||||
|
||||
public nuint* samplesSizes;
|
||||
|
||||
public nuint nbSamples;
|
||||
|
||||
public nuint nbTrainSamples;
|
||||
|
||||
public nuint nbTestSamples;
|
||||
|
||||
public nuint nbDmers;
|
||||
|
||||
public uint* freqs;
|
||||
|
||||
public uint d;
|
||||
|
||||
public uint f;
|
||||
|
||||
public FASTCOVER_accel_t accelParams;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/**
|
||||
* Parameters for FASTCOVER_tryParameters().
|
||||
*/
|
||||
public unsafe partial struct FASTCOVER_tryParameters_data_s
|
||||
{
|
||||
public FASTCOVER_ctx_t* ctx;
|
||||
|
||||
public COVER_best_s* best;
|
||||
|
||||
public nuint dictBufferCapacity;
|
||||
|
||||
public ZDICT_cover_params_t parameters;
|
||||
}
|
||||
}
|
||||
22
src/SharpCompress/Compressors/Zstd/Unsafe/FSE_CState_t.cs
Normal file
22
src/SharpCompress/Compressors/Zstd/Unsafe/FSE_CState_t.cs
Normal file
@@ -0,0 +1,22 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/* *****************************************
|
||||
* FSE symbol compression API
|
||||
*******************************************/
|
||||
/*!
|
||||
This API consists of small unitary functions, which highly benefit from being inlined.
|
||||
Hence their body are included in next section.
|
||||
*/
|
||||
public unsafe partial struct FSE_CState_t
|
||||
{
|
||||
public nint value;
|
||||
|
||||
public void* stateTable;
|
||||
|
||||
public void* symbolTT;
|
||||
|
||||
public uint stateLog;
|
||||
}
|
||||
}
|
||||
15
src/SharpCompress/Compressors/Zstd/Unsafe/FSE_DState_t.cs
Normal file
15
src/SharpCompress/Compressors/Zstd/Unsafe/FSE_DState_t.cs
Normal file
@@ -0,0 +1,15 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/* *****************************************
|
||||
* FSE symbol decompression API
|
||||
*******************************************/
|
||||
public unsafe partial struct FSE_DState_t
|
||||
{
|
||||
public nuint state;
|
||||
|
||||
/* precise table may vary, depending on U16 */
|
||||
public void* table;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/* ====== Decompression ====== */
|
||||
public partial struct FSE_DTableHeader
|
||||
{
|
||||
public ushort tableLog;
|
||||
|
||||
public ushort fastMode;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public unsafe partial struct FSE_DecompressWksp
|
||||
{
|
||||
public fixed short ncount[256];
|
||||
|
||||
/* Dynamically sized */
|
||||
public fixed uint dtable[1];
|
||||
}
|
||||
}
|
||||
13
src/SharpCompress/Compressors/Zstd/Unsafe/FSE_decode_t.cs
Normal file
13
src/SharpCompress/Compressors/Zstd/Unsafe/FSE_decode_t.cs
Normal file
@@ -0,0 +1,13 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public partial struct FSE_decode_t
|
||||
{
|
||||
public ushort newState;
|
||||
|
||||
public byte symbol;
|
||||
|
||||
public byte nbBits;
|
||||
}
|
||||
}
|
||||
11
src/SharpCompress/Compressors/Zstd/Unsafe/FSE_repeat.cs
Normal file
11
src/SharpCompress/Compressors/Zstd/Unsafe/FSE_repeat.cs
Normal file
@@ -0,0 +1,11 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public enum FSE_repeat
|
||||
{
|
||||
FSE_repeat_none,
|
||||
FSE_repeat_check,
|
||||
FSE_repeat_valid,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/* *****************************************
|
||||
* Implementation of inlined functions
|
||||
*******************************************/
|
||||
public partial struct FSE_symbolCompressionTransform
|
||||
{
|
||||
public int deltaFindState;
|
||||
|
||||
public uint deltaNbBits;
|
||||
}
|
||||
}
|
||||
715
src/SharpCompress/Compressors/Zstd/Unsafe/Fastcover.cs
Normal file
715
src/SharpCompress/Compressors/Zstd/Unsafe/Fastcover.cs
Normal file
@@ -0,0 +1,715 @@
|
||||
using System;
|
||||
using static ZstdSharp.UnsafeHelper;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/*-*************************************
|
||||
* Hash Functions
|
||||
***************************************/
|
||||
/**
|
||||
* Hash the d-byte value pointed to by p and mod 2^f into the frequency vector
|
||||
*/
|
||||
private static nuint FASTCOVER_hashPtrToIndex(void* p, uint f, uint d)
|
||||
{
|
||||
if (d == 6)
|
||||
{
|
||||
return ZSTD_hash6Ptr(p, f);
|
||||
}
|
||||
|
||||
return ZSTD_hash8Ptr(p, f);
|
||||
}
|
||||
|
||||
public static FASTCOVER_accel_t* FASTCOVER_defaultAccelParameters = GetArrayPointer(new FASTCOVER_accel_t[11]
|
||||
{
|
||||
new FASTCOVER_accel_t
|
||||
{
|
||||
finalize = 100,
|
||||
skip = 0,
|
||||
},
|
||||
new FASTCOVER_accel_t
|
||||
{
|
||||
finalize = 100,
|
||||
skip = 0,
|
||||
},
|
||||
new FASTCOVER_accel_t
|
||||
{
|
||||
finalize = 50,
|
||||
skip = 1,
|
||||
},
|
||||
new FASTCOVER_accel_t
|
||||
{
|
||||
finalize = 34,
|
||||
skip = 2,
|
||||
},
|
||||
new FASTCOVER_accel_t
|
||||
{
|
||||
finalize = 25,
|
||||
skip = 3,
|
||||
},
|
||||
new FASTCOVER_accel_t
|
||||
{
|
||||
finalize = 20,
|
||||
skip = 4,
|
||||
},
|
||||
new FASTCOVER_accel_t
|
||||
{
|
||||
finalize = 17,
|
||||
skip = 5,
|
||||
},
|
||||
new FASTCOVER_accel_t
|
||||
{
|
||||
finalize = 14,
|
||||
skip = 6,
|
||||
},
|
||||
new FASTCOVER_accel_t
|
||||
{
|
||||
finalize = 13,
|
||||
skip = 7,
|
||||
},
|
||||
new FASTCOVER_accel_t
|
||||
{
|
||||
finalize = 11,
|
||||
skip = 8,
|
||||
},
|
||||
new FASTCOVER_accel_t
|
||||
{
|
||||
finalize = 10,
|
||||
skip = 9,
|
||||
},
|
||||
});
|
||||
|
||||
/*-*************************************
|
||||
* Helper functions
|
||||
***************************************/
|
||||
/**
|
||||
* Selects the best segment in an epoch.
|
||||
* Segments of are scored according to the function:
|
||||
*
|
||||
* Let F(d) be the frequency of all dmers with hash value d.
|
||||
* Let S_i be hash value of the dmer at position i of segment S which has length k.
|
||||
*
|
||||
* Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
|
||||
*
|
||||
* Once the dmer with hash value d is in the dictionary we set F(d) = 0.
|
||||
*/
|
||||
private static COVER_segment_t FASTCOVER_selectSegment(FASTCOVER_ctx_t* ctx, uint* freqs, uint begin, uint end, ZDICT_cover_params_t parameters, ushort* segmentFreqs)
|
||||
{
|
||||
uint k = parameters.k;
|
||||
uint d = parameters.d;
|
||||
uint f = ctx->f;
|
||||
uint dmersInK = k - d + 1;
|
||||
COVER_segment_t bestSegment = new COVER_segment_t
|
||||
{
|
||||
begin = 0,
|
||||
end = 0,
|
||||
score = 0,
|
||||
};
|
||||
COVER_segment_t activeSegment;
|
||||
|
||||
activeSegment.begin = begin;
|
||||
activeSegment.end = begin;
|
||||
activeSegment.score = 0;
|
||||
while (activeSegment.end < end)
|
||||
{
|
||||
nuint idx = FASTCOVER_hashPtrToIndex((void*)(ctx->samples + activeSegment.end), f, d);
|
||||
|
||||
if (segmentFreqs[idx] == 0)
|
||||
{
|
||||
activeSegment.score += freqs[idx];
|
||||
}
|
||||
|
||||
activeSegment.end += 1;
|
||||
segmentFreqs[idx] += 1;
|
||||
if (activeSegment.end - activeSegment.begin == dmersInK + 1)
|
||||
{
|
||||
nuint delIndex = FASTCOVER_hashPtrToIndex((void*)(ctx->samples + activeSegment.begin), f, d);
|
||||
|
||||
segmentFreqs[delIndex] -= 1;
|
||||
if (segmentFreqs[delIndex] == 0)
|
||||
{
|
||||
activeSegment.score -= freqs[delIndex];
|
||||
}
|
||||
|
||||
activeSegment.begin += 1;
|
||||
}
|
||||
|
||||
if (activeSegment.score > bestSegment.score)
|
||||
{
|
||||
bestSegment = activeSegment;
|
||||
}
|
||||
}
|
||||
|
||||
while (activeSegment.begin < end)
|
||||
{
|
||||
nuint delIndex = FASTCOVER_hashPtrToIndex((void*)(ctx->samples + activeSegment.begin), f, d);
|
||||
|
||||
segmentFreqs[delIndex] -= 1;
|
||||
activeSegment.begin += 1;
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
uint pos;
|
||||
|
||||
for (pos = bestSegment.begin; pos != bestSegment.end; ++pos)
|
||||
{
|
||||
nuint i = FASTCOVER_hashPtrToIndex((void*)(ctx->samples + pos), f, d);
|
||||
|
||||
freqs[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return bestSegment;
|
||||
}
|
||||
|
||||
private static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters, nuint maxDictSize, uint f, uint accel)
|
||||
{
|
||||
if (parameters.d == 0 || parameters.k == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.d != 6 && parameters.d != 8)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.k > maxDictSize)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.d > parameters.k)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (f > 31 || f == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parameters.splitPoint <= 0 || parameters.splitPoint > 1)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (accel > 10 || accel == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up a context initialized with `FASTCOVER_ctx_init()`.
|
||||
*/
|
||||
private static void FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx)
|
||||
{
|
||||
if (ctx == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
free((void*)ctx->freqs);
|
||||
ctx->freqs = null;
|
||||
free((void*)ctx->offsets);
|
||||
ctx->offsets = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate for frequency of hash value of each dmer in ctx->samples
|
||||
*/
|
||||
private static void FASTCOVER_computeFrequency(uint* freqs, FASTCOVER_ctx_t* ctx)
|
||||
{
|
||||
uint f = ctx->f;
|
||||
uint d = ctx->d;
|
||||
uint skip = ctx->accelParams.skip;
|
||||
uint readLength = ((d) > (8) ? (d) : (8));
|
||||
nuint i;
|
||||
|
||||
assert(ctx->nbTrainSamples >= 5);
|
||||
assert(ctx->nbTrainSamples <= ctx->nbSamples);
|
||||
for (i = 0; i < ctx->nbTrainSamples; i++)
|
||||
{
|
||||
nuint start = ctx->offsets[i];
|
||||
nuint currSampleEnd = ctx->offsets[i + 1];
|
||||
|
||||
while (start + readLength <= currSampleEnd)
|
||||
{
|
||||
nuint dmerIndex = FASTCOVER_hashPtrToIndex((void*)(ctx->samples + start), f, d);
|
||||
|
||||
freqs[dmerIndex]++;
|
||||
start = start + skip + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare a context for dictionary building.
|
||||
* The context is only dependent on the parameter `d` and can used multiple
|
||||
* times.
|
||||
* Returns 0 on success or error code on error.
|
||||
* The context must be destroyed with `FASTCOVER_ctx_destroy()`.
|
||||
*/
|
||||
private static nuint FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, uint d, double splitPoint, uint f, FASTCOVER_accel_t accelParams)
|
||||
{
|
||||
byte* samples = (byte*)(samplesBuffer);
|
||||
nuint totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
|
||||
uint nbTrainSamples = splitPoint < 1.0 ? (uint)((double)(nbSamples) * splitPoint) : nbSamples;
|
||||
uint nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
|
||||
nuint trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
|
||||
nuint testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
|
||||
|
||||
if (totalSamplesSize < ((d) > ((nuint)(sizeof(ulong))) ? (d) : ((nuint)(sizeof(ulong)))) || totalSamplesSize >= (nuint)((nuint)(sizeof(nuint)) == 8 ? (unchecked((uint)(-1))) : ((uint)(1) * (1U << 30))))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)));
|
||||
}
|
||||
|
||||
if (nbTrainSamples < 5)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)));
|
||||
}
|
||||
|
||||
if (nbTestSamples < 1)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)));
|
||||
}
|
||||
|
||||
memset((void*)ctx, 0, (nuint)(sizeof(FASTCOVER_ctx_t)));
|
||||
ctx->samples = samples;
|
||||
ctx->samplesSizes = samplesSizes;
|
||||
ctx->nbSamples = nbSamples;
|
||||
ctx->nbTrainSamples = nbTrainSamples;
|
||||
ctx->nbTestSamples = nbTestSamples;
|
||||
ctx->nbDmers = trainingSamplesSize - ((d) > ((nuint)(sizeof(ulong))) ? (d) : ((nuint)(sizeof(ulong)))) + 1;
|
||||
ctx->d = d;
|
||||
ctx->f = f;
|
||||
ctx->accelParams = accelParams;
|
||||
ctx->offsets = (nuint*)(calloc((nbSamples + 1), (nuint)(sizeof(nuint))));
|
||||
if (ctx->offsets == null)
|
||||
{
|
||||
FASTCOVER_ctx_destroy(ctx);
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)));
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
uint i;
|
||||
|
||||
ctx->offsets[0] = 0;
|
||||
assert(nbSamples >= 5);
|
||||
for (i = 1; i <= nbSamples; ++i)
|
||||
{
|
||||
ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
|
||||
}
|
||||
}
|
||||
|
||||
ctx->freqs = (uint*)(calloc((nuint)((ulong)(1) << (int)f), (nuint)(sizeof(uint))));
|
||||
if (ctx->freqs == null)
|
||||
{
|
||||
FASTCOVER_ctx_destroy(ctx);
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)));
|
||||
}
|
||||
|
||||
FASTCOVER_computeFrequency(ctx->freqs, ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given the prepared context build the dictionary.
|
||||
*/
|
||||
private static nuint FASTCOVER_buildDictionary(FASTCOVER_ctx_t* ctx, uint* freqs, void* dictBuffer, nuint dictBufferCapacity, ZDICT_cover_params_t parameters, ushort* segmentFreqs)
|
||||
{
|
||||
byte* dict = (byte*)(dictBuffer);
|
||||
nuint tail = dictBufferCapacity;
|
||||
COVER_epoch_info_t epochs = COVER_computeEpochs((uint)(dictBufferCapacity), (uint)(ctx->nbDmers), parameters.k, 1);
|
||||
nuint maxZeroScoreRun = 10;
|
||||
nuint zeroScoreRun = 0;
|
||||
nuint epoch;
|
||||
|
||||
for (epoch = 0; tail > 0; epoch = (nuint)((epoch + 1) % epochs.num))
|
||||
{
|
||||
uint epochBegin = (uint)(epoch * epochs.size);
|
||||
uint epochEnd = epochBegin + epochs.size;
|
||||
nuint segmentSize;
|
||||
COVER_segment_t segment = FASTCOVER_selectSegment(ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs);
|
||||
|
||||
if (segment.score == 0)
|
||||
{
|
||||
if (++zeroScoreRun >= maxZeroScoreRun)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
zeroScoreRun = 0;
|
||||
segmentSize = ((segment.end - segment.begin + parameters.d - 1) < (tail) ? (segment.end - segment.begin + parameters.d - 1) : (tail));
|
||||
if (segmentSize < parameters.d)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
tail -= segmentSize;
|
||||
memcpy((void*)(dict + tail), (void*)(ctx->samples + segment.begin), segmentSize);
|
||||
}
|
||||
|
||||
return tail;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries a set of parameters and updates the COVER_best_t with the results.
|
||||
* This function is thread safe if zstd is compiled with multithreaded support.
|
||||
* It takes its parameters as an *OWNING* opaque pointer to support threading.
|
||||
*/
|
||||
private static void FASTCOVER_tryParameters(void* opaque)
|
||||
{
|
||||
FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)(opaque);
|
||||
FASTCOVER_ctx_t* ctx = data->ctx;
|
||||
ZDICT_cover_params_t parameters = data->parameters;
|
||||
nuint dictBufferCapacity = data->dictBufferCapacity;
|
||||
nuint totalCompressedSize = (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
ushort* segmentFreqs = (ushort*)(calloc((nuint)((ulong)(1) << (int)ctx->f), (nuint)(2)));
|
||||
byte* dict = (byte*)(malloc(dictBufferCapacity));
|
||||
COVER_dictSelection selection = COVER_dictSelectionError((unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC))));
|
||||
uint* freqs = (uint*)(malloc((nuint)(((ulong)(1) << (int)ctx->f) * (nuint)(4))));
|
||||
|
||||
if (segmentFreqs == null || dict == null || freqs == null)
|
||||
{
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
memcpy((void*)freqs, (void*)ctx->freqs, (nuint)(((ulong)(1) << (int)ctx->f) * (nuint)(sizeof(uint))));
|
||||
|
||||
{
|
||||
nuint tail = FASTCOVER_buildDictionary(ctx, freqs, (void*)dict, dictBufferCapacity, parameters, segmentFreqs);
|
||||
uint nbFinalizeSamples = (uint)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);
|
||||
|
||||
selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail, ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, totalCompressedSize);
|
||||
if ((COVER_dictSelectionIsError(selection)) != 0)
|
||||
{
|
||||
goto _cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
_cleanup:
|
||||
free((void*)dict);
|
||||
COVER_best_finish(data->best, parameters, selection);
|
||||
free((void*)data);
|
||||
free((void*)segmentFreqs);
|
||||
COVER_dictSelectionFree(selection);
|
||||
free((void*)freqs);
|
||||
}
|
||||
|
||||
private static void FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams, ZDICT_cover_params_t* coverParams)
|
||||
{
|
||||
coverParams->k = fastCoverParams.k;
|
||||
coverParams->d = fastCoverParams.d;
|
||||
coverParams->steps = fastCoverParams.steps;
|
||||
coverParams->nbThreads = fastCoverParams.nbThreads;
|
||||
coverParams->splitPoint = fastCoverParams.splitPoint;
|
||||
coverParams->zParams = fastCoverParams.zParams;
|
||||
coverParams->shrinkDict = fastCoverParams.shrinkDict;
|
||||
}
|
||||
|
||||
private static void FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams, ZDICT_fastCover_params_t* fastCoverParams, uint f, uint accel)
|
||||
{
|
||||
fastCoverParams->k = coverParams.k;
|
||||
fastCoverParams->d = coverParams.d;
|
||||
fastCoverParams->steps = coverParams.steps;
|
||||
fastCoverParams->nbThreads = coverParams.nbThreads;
|
||||
fastCoverParams->splitPoint = coverParams.splitPoint;
|
||||
fastCoverParams->f = f;
|
||||
fastCoverParams->accel = accel;
|
||||
fastCoverParams->zParams = coverParams.zParams;
|
||||
fastCoverParams->shrinkDict = coverParams.shrinkDict;
|
||||
}
|
||||
|
||||
/*! ZDICT_trainFromBuffer_fastCover():
|
||||
* Train a dictionary from an array of samples using a modified version of COVER algorithm.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* d and k are required.
|
||||
* All other parameters are optional, will use default values if not provided
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
public static nuint ZDICT_trainFromBuffer_fastCover(void* dictBuffer, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, ZDICT_fastCover_params_t parameters)
|
||||
{
|
||||
byte* dict = (byte*)(dictBuffer);
|
||||
FASTCOVER_ctx_t ctx;
|
||||
ZDICT_cover_params_t coverParams;
|
||||
FASTCOVER_accel_t accelParams;
|
||||
|
||||
g_displayLevel = (int)parameters.zParams.notificationLevel;
|
||||
parameters.splitPoint = 1.0;
|
||||
parameters.f = (uint)(parameters.f == 0 ? 20 : parameters.f);
|
||||
parameters.accel = (uint)(parameters.accel == 0 ? 1 : parameters.accel);
|
||||
memset((void*)&coverParams, 0, (nuint)(sizeof(ZDICT_cover_params_t)));
|
||||
FASTCOVER_convertToCoverParams(parameters, &coverParams);
|
||||
if ((FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f, parameters.accel)) == 0)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)));
|
||||
}
|
||||
|
||||
if (nbSamples == 0)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)));
|
||||
}
|
||||
|
||||
if (dictBufferCapacity < 256)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)));
|
||||
}
|
||||
|
||||
accelParams = FASTCOVER_defaultAccelParameters[parameters.accel];
|
||||
|
||||
{
|
||||
nuint initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, coverParams.d, parameters.splitPoint, parameters.f, accelParams);
|
||||
|
||||
if ((ERR_isError(initVal)) != 0)
|
||||
{
|
||||
return initVal;
|
||||
}
|
||||
}
|
||||
|
||||
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel);
|
||||
|
||||
{
|
||||
ushort* segmentFreqs = (ushort*)(calloc((nuint)((ulong)(1) << (int)parameters.f), (nuint)(2)));
|
||||
nuint tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer, dictBufferCapacity, coverParams, segmentFreqs);
|
||||
uint nbFinalizeSamples = (uint)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100);
|
||||
nuint dictionarySize = ZDICT_finalizeDictionary((void*)dict, dictBufferCapacity, (void*)(dict + tail), dictBufferCapacity - tail, samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams);
|
||||
|
||||
if ((ERR_isError(dictionarySize)) == 0)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
free((void*)segmentFreqs);
|
||||
return dictionarySize;
|
||||
}
|
||||
}
|
||||
|
||||
/*! ZDICT_optimizeTrainFromBuffer_fastCover():
|
||||
* The same requirements as above hold for all the parameters except `parameters`.
|
||||
* This function tries many parameter combinations (specifically, k and d combinations)
|
||||
* and picks the best parameters. `*parameters` is filled with the best parameters found,
|
||||
* dictionary constructed with those parameters is stored in `dictBuffer`.
|
||||
* All of the parameters d, k, steps, f, and accel are optional.
|
||||
* If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
|
||||
* if steps is zero it defaults to its default value.
|
||||
* If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
|
||||
* If f is zero, default value of 20 is used.
|
||||
* If accel is zero, default value of 1 is used.
|
||||
*
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* On success `*parameters` contains the parameters selected.
|
||||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.
|
||||
*/
|
||||
public static nuint ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, ZDICT_fastCover_params_t* parameters)
|
||||
{
|
||||
ZDICT_cover_params_t coverParams;
|
||||
FASTCOVER_accel_t accelParams;
|
||||
uint nbThreads = parameters->nbThreads;
|
||||
double splitPoint = parameters->splitPoint <= 0.0 ? 0.75 : parameters->splitPoint;
|
||||
uint kMinD = (uint)(parameters->d == 0 ? 6 : parameters->d);
|
||||
uint kMaxD = (uint)(parameters->d == 0 ? 8 : parameters->d);
|
||||
uint kMinK = (uint)(parameters->k == 0 ? 50 : parameters->k);
|
||||
uint kMaxK = (uint)(parameters->k == 0 ? 2000 : parameters->k);
|
||||
uint kSteps = (uint)(parameters->steps == 0 ? 40 : parameters->steps);
|
||||
uint kStepSize = (((kMaxK - kMinK) / kSteps) > (1) ? ((kMaxK - kMinK) / kSteps) : (1));
|
||||
uint kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
|
||||
uint f = (uint)(parameters->f == 0 ? 20 : parameters->f);
|
||||
uint accel = (uint)(parameters->accel == 0 ? 1 : parameters->accel);
|
||||
uint shrinkDict = 0;
|
||||
int displayLevel = (int)parameters->zParams.notificationLevel;
|
||||
uint iteration = 1;
|
||||
uint d;
|
||||
uint k;
|
||||
COVER_best_s best;
|
||||
int warned = 0;
|
||||
|
||||
if (splitPoint <= 0 || splitPoint > 1)
|
||||
{
|
||||
if (displayLevel >= 1)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)));
|
||||
}
|
||||
|
||||
if (accel == 0 || accel > 10)
|
||||
{
|
||||
if (displayLevel >= 1)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)));
|
||||
}
|
||||
|
||||
if (kMinK < kMaxD || kMaxK < kMinK)
|
||||
{
|
||||
if (displayLevel >= 1)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)));
|
||||
}
|
||||
|
||||
if (nbSamples == 0)
|
||||
{
|
||||
if (displayLevel >= 1)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)));
|
||||
}
|
||||
|
||||
if (dictBufferCapacity < 256)
|
||||
{
|
||||
if (displayLevel >= 1)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)));
|
||||
}
|
||||
|
||||
if (nbThreads > 1)
|
||||
{
|
||||
throw new NotImplementedException("Multiple threads are not supported");
|
||||
}
|
||||
|
||||
COVER_best_init(&best);
|
||||
memset((void*)&coverParams, 0, (nuint)(sizeof(ZDICT_cover_params_t)));
|
||||
FASTCOVER_convertToCoverParams(*parameters, &coverParams);
|
||||
accelParams = FASTCOVER_defaultAccelParameters[accel];
|
||||
g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
|
||||
if (displayLevel >= 2)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
for (d = kMinD; d <= kMaxD; d += 2)
|
||||
{
|
||||
FASTCOVER_ctx_t ctx;
|
||||
|
||||
if (displayLevel >= 3)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
nuint initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams);
|
||||
|
||||
if ((ERR_isError(initVal)) != 0)
|
||||
{
|
||||
if (displayLevel >= 1)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
COVER_best_destroy(&best);
|
||||
return initVal;
|
||||
}
|
||||
}
|
||||
|
||||
if (warned == 0)
|
||||
{
|
||||
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel);
|
||||
warned = 1;
|
||||
}
|
||||
|
||||
for (k = kMinK; k <= kMaxK; k += kStepSize)
|
||||
{
|
||||
FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)(malloc((nuint)(sizeof(FASTCOVER_tryParameters_data_s))));
|
||||
|
||||
if (displayLevel >= 3)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
if (data == null)
|
||||
{
|
||||
if (displayLevel >= 1)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
COVER_best_destroy(&best);
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)));
|
||||
}
|
||||
|
||||
data->ctx = &ctx;
|
||||
data->best = &best;
|
||||
data->dictBufferCapacity = dictBufferCapacity;
|
||||
data->parameters = coverParams;
|
||||
data->parameters.k = k;
|
||||
data->parameters.d = d;
|
||||
data->parameters.splitPoint = splitPoint;
|
||||
data->parameters.steps = kSteps;
|
||||
data->parameters.shrinkDict = shrinkDict;
|
||||
data->parameters.zParams.notificationLevel = (uint)g_displayLevel;
|
||||
if ((FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, data->ctx->f, accel)) == 0)
|
||||
{
|
||||
free((void*)data);
|
||||
continue;
|
||||
}
|
||||
|
||||
COVER_best_start(&best);
|
||||
FASTCOVER_tryParameters((void*)data);
|
||||
|
||||
++iteration;
|
||||
}
|
||||
|
||||
COVER_best_wait(&best);
|
||||
FASTCOVER_ctx_destroy(&ctx);
|
||||
}
|
||||
|
||||
if (displayLevel >= 2)
|
||||
{
|
||||
;
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
nuint dictSize = best.dictSize;
|
||||
|
||||
if ((ERR_isError(best.compressedSize)) != 0)
|
||||
{
|
||||
nuint compressedSize = best.compressedSize;
|
||||
|
||||
COVER_best_destroy(&best);
|
||||
return compressedSize;
|
||||
}
|
||||
|
||||
FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel);
|
||||
memcpy(dictBuffer, best.dict, dictSize);
|
||||
COVER_best_destroy(&best);
|
||||
return dictSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
158
src/SharpCompress/Compressors/Zstd/Unsafe/Fse.cs
Normal file
158
src/SharpCompress/Compressors/Zstd/Unsafe/Fse.cs
Normal file
@@ -0,0 +1,158 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using static ZstdSharp.UnsafeHelper;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_initCState(FSE_CState_t* statePtr, uint* ct)
|
||||
{
|
||||
void* ptr = (void*)ct;
|
||||
ushort* u16ptr = (ushort*)(ptr);
|
||||
uint tableLog = MEM_read16(ptr);
|
||||
|
||||
statePtr->value = (nint)(1) << (int)tableLog;
|
||||
statePtr->stateTable = u16ptr + 2;
|
||||
statePtr->symbolTT = ct + 1 + (tableLog != 0 ? (1 << (int)(tableLog - 1)) : 1);
|
||||
statePtr->stateLog = tableLog;
|
||||
}
|
||||
|
||||
/*! FSE_initCState2() :
|
||||
* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
|
||||
* uses the smallest state value possible, saving the cost of this symbol */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_initCState2(FSE_CState_t* statePtr, uint* ct, uint symbol)
|
||||
{
|
||||
FSE_initCState(statePtr, ct);
|
||||
|
||||
{
|
||||
FSE_symbolCompressionTransform symbolTT = ((FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
|
||||
ushort* stateTable = (ushort*)(statePtr->stateTable);
|
||||
uint nbBitsOut = (uint)((symbolTT.deltaNbBits + (uint)((1 << 15))) >> 16);
|
||||
|
||||
statePtr->value = (nint)((nbBitsOut << 16) - symbolTT.deltaNbBits);
|
||||
statePtr->value = (nint)(stateTable[(statePtr->value >> (int)nbBitsOut) + symbolTT.deltaFindState]);
|
||||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, uint symbol)
|
||||
{
|
||||
FSE_symbolCompressionTransform symbolTT = ((FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
|
||||
ushort* stateTable = (ushort*)(statePtr->stateTable);
|
||||
uint nbBitsOut = (uint)(((nuint)statePtr->value + symbolTT.deltaNbBits) >> 16);
|
||||
|
||||
BIT_addBits(bitC, (nuint)statePtr->value, nbBitsOut);
|
||||
statePtr->value = (nint)(stateTable[(statePtr->value >> (int)nbBitsOut) + symbolTT.deltaFindState]);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_flushCState(BIT_CStream_t* bitC, FSE_CState_t* statePtr)
|
||||
{
|
||||
BIT_addBits(bitC, (nuint)statePtr->value, statePtr->stateLog);
|
||||
BIT_flushBits(bitC);
|
||||
}
|
||||
|
||||
/* FSE_getMaxNbBits() :
|
||||
* Approximate maximum cost of a symbol, in bits.
|
||||
* Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
|
||||
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
|
||||
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint FSE_getMaxNbBits(void* symbolTTPtr, uint symbolValue)
|
||||
{
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)(symbolTTPtr);
|
||||
|
||||
return (symbolTT[symbolValue].deltaNbBits + (uint)(((1 << 16) - 1))) >> 16;
|
||||
}
|
||||
|
||||
/* FSE_bitCost() :
|
||||
* Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
|
||||
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
|
||||
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint FSE_bitCost(void* symbolTTPtr, uint tableLog, uint symbolValue, uint accuracyLog)
|
||||
{
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)(symbolTTPtr);
|
||||
uint minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
|
||||
uint threshold = (minNbBits + 1) << 16;
|
||||
|
||||
assert(tableLog < 16);
|
||||
assert(accuracyLog < 31 - tableLog);
|
||||
|
||||
{
|
||||
uint tableSize = (uint)(1 << (int)tableLog);
|
||||
uint deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
|
||||
uint normalizedDeltaFromThreshold = (deltaFromThreshold << (int)accuracyLog) >> (int)tableLog;
|
||||
uint bitMultiplier = (uint)(1 << (int)accuracyLog);
|
||||
|
||||
assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
|
||||
assert(normalizedDeltaFromThreshold <= bitMultiplier);
|
||||
return (minNbBits + 1) * bitMultiplier - normalizedDeltaFromThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, uint* dt)
|
||||
{
|
||||
void* ptr = (void*)dt;
|
||||
FSE_DTableHeader* DTableH = (FSE_DTableHeader*)(ptr);
|
||||
|
||||
DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
|
||||
BIT_reloadDStream(bitD);
|
||||
DStatePtr->table = dt + 1;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte FSE_peekSymbol(FSE_DState_t* DStatePtr)
|
||||
{
|
||||
FSE_decode_t DInfo = ((FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
|
||||
return DInfo.symbol;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t DInfo = ((FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
uint nbBits = DInfo.nbBits;
|
||||
nuint lowBits = BIT_readBits(bitD, nbBits);
|
||||
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t DInfo = ((FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
uint nbBits = DInfo.nbBits;
|
||||
byte symbol = DInfo.symbol;
|
||||
nuint lowBits = BIT_readBits(bitD, nbBits);
|
||||
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
/*! FSE_decodeSymbolFast() :
|
||||
unsafe, only works if no symbol has a probability > 50% */
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static byte FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t DInfo = ((FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
uint nbBits = DInfo.nbBits;
|
||||
byte symbol = DInfo.symbol;
|
||||
nuint lowBits = BIT_readBitsFast(bitD, nbBits);
|
||||
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static uint FSE_endOfDState(FSE_DState_t* DStatePtr)
|
||||
{
|
||||
return ((DStatePtr->state == 0) ? 1U : 0U);
|
||||
}
|
||||
}
|
||||
}
|
||||
960
src/SharpCompress/Compressors/Zstd/Unsafe/FseCompress.cs
Normal file
960
src/SharpCompress/Compressors/Zstd/Unsafe/FseCompress.cs
Normal file
@@ -0,0 +1,960 @@
|
||||
using System;
|
||||
using static ZstdSharp.UnsafeHelper;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
|
||||
* workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
|
||||
*/
|
||||
public static nuint FSE_buildCTable_wksp(uint* ct, short* normalizedCounter, uint maxSymbolValue, uint tableLog, void* workSpace, nuint wkspSize)
|
||||
{
|
||||
uint tableSize = (uint)(1 << (int)tableLog);
|
||||
uint tableMask = tableSize - 1;
|
||||
void* ptr = (void*)ct;
|
||||
ushort* tableU16 = ((ushort*)(ptr)) + 2;
|
||||
void* FSCT = (void*)(((uint*)(ptr)) + 1 + (tableLog != 0 ? tableSize >> 1 : 1));
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)(FSCT);
|
||||
uint step = (((tableSize) >> 1) + ((tableSize) >> 3) + 3);
|
||||
uint* cumul = (uint*)(workSpace);
|
||||
byte* tableSymbol = (byte*)(cumul + (maxSymbolValue + 2));
|
||||
uint highThreshold = tableSize - 1;
|
||||
|
||||
if (((nuint)(workSpace) & 3) != 0)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
|
||||
if (((nuint)(sizeof(uint)) * (maxSymbolValue + 2 + (1UL << (int)(tableLog - 2)))) > wkspSize)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)));
|
||||
}
|
||||
|
||||
tableU16[-2] = (ushort)(tableLog);
|
||||
tableU16[-1] = (ushort)(maxSymbolValue);
|
||||
assert(tableLog < 16);
|
||||
|
||||
{
|
||||
uint u;
|
||||
|
||||
cumul[0] = 0;
|
||||
for (u = 1; u <= maxSymbolValue + 1; u++)
|
||||
{
|
||||
if (normalizedCounter[u - 1] == -1)
|
||||
{
|
||||
cumul[u] = cumul[u - 1] + 1;
|
||||
tableSymbol[highThreshold--] = (byte)(u - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
cumul[u] = cumul[u - 1] + (ushort)(normalizedCounter[u - 1]);
|
||||
}
|
||||
}
|
||||
|
||||
cumul[maxSymbolValue + 1] = tableSize + 1;
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
uint position = 0;
|
||||
uint symbol;
|
||||
|
||||
for (symbol = 0; symbol <= maxSymbolValue; symbol++)
|
||||
{
|
||||
int nbOccurrences;
|
||||
int freq = normalizedCounter[symbol];
|
||||
|
||||
for (nbOccurrences = 0; nbOccurrences < freq; nbOccurrences++)
|
||||
{
|
||||
tableSymbol[position] = (byte)(symbol);
|
||||
position = (position + step) & tableMask;
|
||||
while (position > highThreshold)
|
||||
{
|
||||
position = (position + step) & tableMask;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert(position == 0);
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
uint u;
|
||||
|
||||
for (u = 0; u < tableSize; u++)
|
||||
{
|
||||
byte s = tableSymbol[u];
|
||||
|
||||
tableU16[cumul[s]++] = (ushort)(tableSize + u);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
uint total = 0;
|
||||
uint s;
|
||||
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
switch (normalizedCounter[s])
|
||||
{
|
||||
case 0:
|
||||
{
|
||||
symbolTT[s].deltaNbBits = ((tableLog + 1) << 16) - (uint)((1 << (int)tableLog));
|
||||
}
|
||||
|
||||
break;
|
||||
case -1:
|
||||
case 1:
|
||||
{
|
||||
symbolTT[s].deltaNbBits = (tableLog << 16) - (uint)((1 << (int)tableLog));
|
||||
}
|
||||
|
||||
symbolTT[s].deltaFindState = (int)(total - 1);
|
||||
total++;
|
||||
break;
|
||||
default:
|
||||
{
|
||||
uint maxBitsOut = tableLog - BIT_highbit32((uint)(normalizedCounter[s] - 1));
|
||||
uint minStatePlus = (uint)(normalizedCounter[s] << (int)maxBitsOut);
|
||||
|
||||
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
|
||||
symbolTT[s].deltaFindState = (int)(total - (ushort)(normalizedCounter[s]));
|
||||
total += (uint)(normalizedCounter[s]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*! FSE_buildCTable():
|
||||
Builds `ct`, which must be already allocated, using FSE_createCTable().
|
||||
@return : 0, or an errorCode, which can be tested using FSE_isError() */
|
||||
public static nuint FSE_buildCTable(uint* ct, short* normalizedCounter, uint maxSymbolValue, uint tableLog)
|
||||
{
|
||||
byte* tableSymbol = stackalloc byte[4096];
|
||||
|
||||
return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, (void*)tableSymbol, (nuint)(sizeof(byte) * 4096));
|
||||
}
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding
|
||||
****************************************************************/
|
||||
public static nuint FSE_NCountWriteBound(uint maxSymbolValue, uint tableLog)
|
||||
{
|
||||
nuint maxHeaderSize = (((maxSymbolValue + 1) * tableLog) >> 3) + 3;
|
||||
|
||||
return maxSymbolValue != 0 ? maxHeaderSize : 512;
|
||||
}
|
||||
|
||||
private static nuint FSE_writeNCount_generic(void* header, nuint headerBufferSize, short* normalizedCounter, uint maxSymbolValue, uint tableLog, uint writeIsSafe)
|
||||
{
|
||||
byte* ostart = (byte*)(header);
|
||||
byte* @out = ostart;
|
||||
byte* oend = ostart + headerBufferSize;
|
||||
int nbBits;
|
||||
int tableSize = 1 << (int)tableLog;
|
||||
int remaining;
|
||||
int threshold;
|
||||
uint bitStream = 0;
|
||||
int bitCount = 0;
|
||||
uint symbol = 0;
|
||||
uint alphabetSize = maxSymbolValue + 1;
|
||||
int previousIs0 = 0;
|
||||
|
||||
bitStream += (tableLog - 5) << bitCount;
|
||||
bitCount += 4;
|
||||
remaining = tableSize + 1;
|
||||
threshold = tableSize;
|
||||
nbBits = (int)(tableLog + 1);
|
||||
while ((symbol < alphabetSize) && (remaining > 1))
|
||||
{
|
||||
if (previousIs0 != 0)
|
||||
{
|
||||
uint start = symbol;
|
||||
|
||||
while ((symbol < alphabetSize) && (normalizedCounter[symbol]) == 0)
|
||||
{
|
||||
symbol++;
|
||||
}
|
||||
|
||||
if (symbol == alphabetSize)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
while (symbol >= start + 24)
|
||||
{
|
||||
start += 24;
|
||||
bitStream += 0xFFFFU << bitCount;
|
||||
if (writeIsSafe == 0 && (@out > oend - 2))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)));
|
||||
}
|
||||
|
||||
@out[0] = (byte)(bitStream);
|
||||
@out[1] = (byte)(bitStream >> 8);
|
||||
@out += 2;
|
||||
bitStream >>= 16;
|
||||
}
|
||||
|
||||
while (symbol >= start + 3)
|
||||
{
|
||||
start += 3;
|
||||
bitStream += (uint)(3 << bitCount);
|
||||
bitCount += 2;
|
||||
}
|
||||
|
||||
bitStream += (symbol - start) << bitCount;
|
||||
bitCount += 2;
|
||||
if (bitCount > 16)
|
||||
{
|
||||
if (writeIsSafe == 0 && (@out > oend - 2))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)));
|
||||
}
|
||||
|
||||
@out[0] = (byte)(bitStream);
|
||||
@out[1] = (byte)(bitStream >> 8);
|
||||
@out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
int count = normalizedCounter[symbol++];
|
||||
int max = (2 * threshold - 1) - remaining;
|
||||
|
||||
remaining -= count < 0 ? -count : count;
|
||||
count++;
|
||||
if (count >= threshold)
|
||||
{
|
||||
count += max;
|
||||
}
|
||||
|
||||
bitStream += (uint)(count << bitCount);
|
||||
bitCount += nbBits;
|
||||
bitCount -= ((count < max) ? 1 : 0);
|
||||
previousIs0 = ((count == 1) ? 1 : 0);
|
||||
if (remaining < 1)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
|
||||
while (remaining < threshold)
|
||||
{
|
||||
nbBits--;
|
||||
threshold >>= 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (bitCount > 16)
|
||||
{
|
||||
if (writeIsSafe == 0 && (@out > oend - 2))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)));
|
||||
}
|
||||
|
||||
@out[0] = (byte)(bitStream);
|
||||
@out[1] = (byte)(bitStream >> 8);
|
||||
@out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
}
|
||||
}
|
||||
|
||||
if (remaining != 1)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
|
||||
assert(symbol <= alphabetSize);
|
||||
if (writeIsSafe == 0 && (@out > oend - 2))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)));
|
||||
}
|
||||
|
||||
@out[0] = (byte)(bitStream);
|
||||
@out[1] = (byte)(bitStream >> 8);
|
||||
@out += (bitCount + 7) / 8;
|
||||
return (nuint)((@out - ostart));
|
||||
}
|
||||
|
||||
/*! FSE_writeNCount():
|
||||
Compactly save 'normalizedCounter' into 'buffer'.
|
||||
@return : size of the compressed table,
|
||||
or an errorCode, which can be tested using FSE_isError(). */
|
||||
public static nuint FSE_writeNCount(void* buffer, nuint bufferSize, short* normalizedCounter, uint maxSymbolValue, uint tableLog)
|
||||
{
|
||||
if (tableLog > (uint)((14 - 2)))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)));
|
||||
}
|
||||
|
||||
if (tableLog < 5)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
|
||||
if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
|
||||
{
|
||||
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
|
||||
}
|
||||
|
||||
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
|
||||
}
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE Compression Code
|
||||
****************************************************************/
|
||||
public static uint* FSE_createCTable(uint maxSymbolValue, uint tableLog)
|
||||
{
|
||||
nuint size;
|
||||
|
||||
if (tableLog > 15)
|
||||
{
|
||||
tableLog = 15;
|
||||
}
|
||||
|
||||
size = ((uint)(1 + (1 << (int)((tableLog) - 1))) + (((maxSymbolValue) + 1) * 2)) * (nuint)(sizeof(uint));
|
||||
return (uint*)(malloc(size));
|
||||
}
|
||||
|
||||
public static void FSE_freeCTable(uint* ct)
|
||||
{
|
||||
free((void*)(ct));
|
||||
}
|
||||
|
||||
/* provides the minimum logSize to safely represent a distribution */
|
||||
[InlineMethod.Inline]
|
||||
private static uint FSE_minTableLog(nuint srcSize, uint maxSymbolValue)
|
||||
{
|
||||
uint minBitsSrc = BIT_highbit32((uint)(srcSize)) + 1;
|
||||
uint minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
|
||||
uint minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
|
||||
|
||||
assert(srcSize > 1);
|
||||
return minBits;
|
||||
}
|
||||
|
||||
/* *****************************************
|
||||
* FSE advanced API
|
||||
***************************************** */
|
||||
public static uint FSE_optimalTableLog_internal(uint maxTableLog, nuint srcSize, uint maxSymbolValue, uint minus)
|
||||
{
|
||||
uint maxBitsSrc = BIT_highbit32((uint)(srcSize - 1)) - minus;
|
||||
uint tableLog = maxTableLog;
|
||||
uint minBits = FSE_minTableLog(srcSize, maxSymbolValue);
|
||||
|
||||
assert(srcSize > 1);
|
||||
if (tableLog == 0)
|
||||
{
|
||||
tableLog = (uint)((13 - 2));
|
||||
}
|
||||
|
||||
if (maxBitsSrc < tableLog)
|
||||
{
|
||||
tableLog = maxBitsSrc;
|
||||
}
|
||||
|
||||
if (minBits > tableLog)
|
||||
{
|
||||
tableLog = minBits;
|
||||
}
|
||||
|
||||
if (tableLog < 5)
|
||||
{
|
||||
tableLog = 5;
|
||||
}
|
||||
|
||||
if (tableLog > (uint)((14 - 2)))
|
||||
{
|
||||
tableLog = (uint)((14 - 2));
|
||||
}
|
||||
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
/*! FSE_optimalTableLog():
|
||||
dynamically downsize 'tableLog' when conditions are met.
|
||||
It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
|
||||
@return : recommended tableLog (necessarily <= 'maxTableLog') */
|
||||
public static uint FSE_optimalTableLog(uint maxTableLog, nuint srcSize, uint maxSymbolValue)
|
||||
{
|
||||
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
|
||||
}
|
||||
|
||||
/* Secondary normalization method.
|
||||
To be used when primary method fails. */
|
||||
private static nuint FSE_normalizeM2(short* norm, uint tableLog, uint* count, nuint total, uint maxSymbolValue, short lowProbCount)
|
||||
{
|
||||
short NOT_YET_ASSIGNED = (short)-2;
|
||||
uint s;
|
||||
uint distributed = 0;
|
||||
uint ToDistribute;
|
||||
uint lowThreshold = (uint)(total >> (int)tableLog);
|
||||
uint lowOne = (uint)((total * 3) >> (int)(tableLog + 1));
|
||||
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
if (count[s] == 0)
|
||||
{
|
||||
norm[s] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (count[s] <= lowThreshold)
|
||||
{
|
||||
norm[s] = lowProbCount;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
|
||||
if (count[s] <= lowOne)
|
||||
{
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
|
||||
norm[s] = NOT_YET_ASSIGNED;
|
||||
}
|
||||
|
||||
ToDistribute = (uint)((1 << (int)tableLog)) - distributed;
|
||||
if (ToDistribute == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((total / ToDistribute) > lowOne)
|
||||
{
|
||||
lowOne = (uint)((total * 3) / (ToDistribute * 2));
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne))
|
||||
{
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
ToDistribute = (uint)((1 << (int)tableLog)) - distributed;
|
||||
}
|
||||
|
||||
if (distributed == maxSymbolValue + 1)
|
||||
{
|
||||
uint maxV = 0, maxC = 0;
|
||||
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
if (count[s] > maxC)
|
||||
{
|
||||
maxV = s;
|
||||
maxC = count[s];
|
||||
}
|
||||
}
|
||||
|
||||
norm[maxV] += (short)(short)(ToDistribute);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (total == 0)
|
||||
{
|
||||
for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1))
|
||||
{
|
||||
if (norm[s] > 0)
|
||||
{
|
||||
ToDistribute--;
|
||||
norm[s]++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
ulong vStepLog = 62 - tableLog;
|
||||
ulong mid = (1UL << (int)(vStepLog - 1)) - 1;
|
||||
ulong rStep = (((((ulong)(1) << (int)vStepLog) * ToDistribute) + mid) / ((uint)(total)));
|
||||
ulong tmpTotal = mid;
|
||||
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
if (norm[s] == NOT_YET_ASSIGNED)
|
||||
{
|
||||
ulong end = tmpTotal + (count[s] * rStep);
|
||||
uint sStart = (uint)(tmpTotal >> (int)vStepLog);
|
||||
uint sEnd = (uint)(end >> (int)vStepLog);
|
||||
uint weight = sEnd - sStart;
|
||||
|
||||
if (weight < 1)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
|
||||
norm[s] = (short)(weight);
|
||||
tmpTotal = end;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*! FSE_normalizeCount():
|
||||
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
|
||||
'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
|
||||
useLowProbCount is a boolean parameter which trades off compressed size for
|
||||
faster header decoding. When it is set to 1, the compressed data will be slightly
|
||||
smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
|
||||
faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
|
||||
is a good default, since header deserialization makes a big speed difference.
|
||||
Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
|
||||
@return : tableLog,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
public static nuint FSE_normalizeCount(short* normalizedCounter, uint tableLog, uint* count, nuint total, uint maxSymbolValue, uint useLowProbCount)
|
||||
{
|
||||
if (tableLog == 0)
|
||||
{
|
||||
tableLog = (uint)((13 - 2));
|
||||
}
|
||||
|
||||
if (tableLog < 5)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
|
||||
if (tableLog > (uint)((14 - 2)))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)));
|
||||
}
|
||||
|
||||
if (tableLog < FSE_minTableLog(total, maxSymbolValue))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
|
||||
short lowProbCount = (short)(useLowProbCount != 0 ? -1 : 1);
|
||||
ulong scale = 62 - tableLog;
|
||||
ulong step = (((ulong)(1) << 62) / ((uint)(total)));
|
||||
ulong vStep = 1UL << (int)(scale - 20);
|
||||
int stillToDistribute = 1 << (int)tableLog;
|
||||
uint s;
|
||||
uint largest = 0;
|
||||
short largestP = 0;
|
||||
uint lowThreshold = (uint)(total >> (int)tableLog);
|
||||
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
if (count[s] == total)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (count[s] == 0)
|
||||
{
|
||||
normalizedCounter[s] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (count[s] <= lowThreshold)
|
||||
{
|
||||
normalizedCounter[s] = lowProbCount;
|
||||
stillToDistribute--;
|
||||
}
|
||||
else
|
||||
{
|
||||
short proba = (short)((count[s] * step) >> (int)scale);
|
||||
|
||||
if (proba < 8)
|
||||
{
|
||||
ulong restToBeat = vStep * rtbTable[proba];
|
||||
|
||||
proba += (short)((((count[s] * step) - ((ulong)(proba) << (int)scale) > restToBeat) ? 1 : 0));
|
||||
}
|
||||
|
||||
if (proba > largestP)
|
||||
{
|
||||
largestP = proba;
|
||||
largest = s;
|
||||
}
|
||||
|
||||
normalizedCounter[s] = proba;
|
||||
stillToDistribute -= proba;
|
||||
}
|
||||
}
|
||||
|
||||
if (-stillToDistribute >= (normalizedCounter[largest] >> 1))
|
||||
{
|
||||
nuint errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
|
||||
|
||||
if ((ERR_isError(errorCode)) != 0)
|
||||
{
|
||||
return errorCode;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
normalizedCounter[largest] += (short)(short)(stillToDistribute);
|
||||
}
|
||||
}
|
||||
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
/* fake FSE_CTable, for raw (uncompressed) input */
|
||||
public static nuint FSE_buildCTable_raw(uint* ct, uint nbBits)
|
||||
{
|
||||
uint tableSize = (uint)(1 << (int)nbBits);
|
||||
uint tableMask = tableSize - 1;
|
||||
uint maxSymbolValue = tableMask;
|
||||
void* ptr = (void*)ct;
|
||||
ushort* tableU16 = ((ushort*)(ptr)) + 2;
|
||||
void* FSCT = (void*)(((uint*)(ptr)) + 1 + (tableSize >> 1));
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)(FSCT);
|
||||
uint s;
|
||||
|
||||
if (nbBits < 1)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
|
||||
tableU16[-2] = (ushort)(nbBits);
|
||||
tableU16[-1] = (ushort)(maxSymbolValue);
|
||||
for (s = 0; s < tableSize; s++)
|
||||
{
|
||||
tableU16[s] = (ushort)(tableSize + s);
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
uint deltaNbBits = (nbBits << 16) - (uint)((1 << (int)nbBits));
|
||||
|
||||
for (s = 0; s <= maxSymbolValue; s++)
|
||||
{
|
||||
symbolTT[s].deltaNbBits = deltaNbBits;
|
||||
symbolTT[s].deltaFindState = (int)(s - 1);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* fake FSE_CTable, for rle input (always same symbol) */
|
||||
public static nuint FSE_buildCTable_rle(uint* ct, byte symbolValue)
|
||||
{
|
||||
void* ptr = (void*)ct;
|
||||
ushort* tableU16 = ((ushort*)(ptr)) + 2;
|
||||
void* FSCTptr = (void*)((uint*)(ptr) + 2);
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)(FSCTptr);
|
||||
|
||||
tableU16[-2] = (ushort)(0);
|
||||
tableU16[-1] = (ushort)(symbolValue);
|
||||
tableU16[0] = 0;
|
||||
tableU16[1] = 0;
|
||||
symbolTT[symbolValue].deltaNbBits = 0;
|
||||
symbolTT[symbolValue].deltaFindState = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
private static nuint FSE_compress_usingCTable_generic(void* dst, nuint dstSize, void* src, nuint srcSize, uint* ct, uint fast)
|
||||
{
|
||||
byte* istart = (byte*)(src);
|
||||
byte* iend = istart + srcSize;
|
||||
byte* ip = iend;
|
||||
BIT_CStream_t bitC;
|
||||
FSE_CState_t CState1, CState2;
|
||||
|
||||
if (srcSize <= 2)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
nuint initError = BIT_initCStream(&bitC, dst, dstSize);
|
||||
|
||||
if ((ERR_isError(initError)) != 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if ((srcSize & 1) != 0)
|
||||
{
|
||||
FSE_initCState2(&CState1, ct, *--ip);
|
||||
FSE_initCState2(&CState2, ct, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
if (fast != 0)
|
||||
{
|
||||
BIT_flushBitsFast(&bitC);
|
||||
}
|
||||
else
|
||||
{
|
||||
BIT_flushBits(&bitC);
|
||||
}
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
FSE_initCState2(&CState2, ct, *--ip);
|
||||
FSE_initCState2(&CState1, ct, *--ip);
|
||||
}
|
||||
|
||||
srcSize -= 2;
|
||||
if (((nuint)(sizeof(nuint)) * 8 > (uint)((14 - 2) * 4 + 7)) && (srcSize & 2) != 0)
|
||||
{
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
if (fast != 0)
|
||||
{
|
||||
BIT_flushBitsFast(&bitC);
|
||||
}
|
||||
else
|
||||
{
|
||||
BIT_flushBits(&bitC);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
while (ip > istart)
|
||||
{
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
if ((nuint)(sizeof(nuint)) * 8 < (uint)((14 - 2) * 2 + 7))
|
||||
{
|
||||
if (fast != 0)
|
||||
{
|
||||
BIT_flushBitsFast(&bitC);
|
||||
}
|
||||
else
|
||||
{
|
||||
BIT_flushBits(&bitC);
|
||||
}
|
||||
}
|
||||
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
if ((nuint)(sizeof(nuint)) * 8 > (uint)((14 - 2) * 4 + 7))
|
||||
{
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
}
|
||||
|
||||
if (fast != 0)
|
||||
{
|
||||
BIT_flushBitsFast(&bitC);
|
||||
}
|
||||
else
|
||||
{
|
||||
BIT_flushBits(&bitC);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
FSE_flushCState(&bitC, &CState2);
|
||||
FSE_flushCState(&bitC, &CState1);
|
||||
return BIT_closeCStream(&bitC);
|
||||
}
|
||||
|
||||
/*! FSE_compress_usingCTable():
|
||||
Compress `src` using `ct` into `dst` which must be already allocated.
|
||||
@return : size of compressed data (<= `dstCapacity`),
|
||||
or 0 if compressed data could not fit into `dst`,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
public static nuint FSE_compress_usingCTable(void* dst, nuint dstSize, void* src, nuint srcSize, uint* ct)
|
||||
{
|
||||
uint fast = (((dstSize >= ((srcSize) + ((srcSize) >> 7) + 4 + (nuint)(sizeof(nuint))))) ? 1U : 0U);
|
||||
|
||||
if (fast != 0)
|
||||
{
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/*-*****************************************
|
||||
* Tool functions
|
||||
******************************************/
|
||||
public static nuint FSE_compressBound(nuint size)
|
||||
{
|
||||
return (512 + ((size) + ((size) >> 7) + 4 + (nuint)(sizeof(nuint))));
|
||||
}
|
||||
|
||||
/* FSE_compress_wksp() :
|
||||
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* `wkspSize` size must be `(1<<tableLog)`.
|
||||
*/
|
||||
public static nuint FSE_compress_wksp(void* dst, nuint dstSize, void* src, nuint srcSize, uint maxSymbolValue, uint tableLog, void* workSpace, nuint wkspSize)
|
||||
{
|
||||
byte* ostart = (byte*)(dst);
|
||||
byte* op = ostart;
|
||||
byte* oend = ostart + dstSize;
|
||||
uint* count = stackalloc uint[256];
|
||||
short* norm = stackalloc short[256];
|
||||
uint* CTable = (uint*)(workSpace);
|
||||
nuint CTableSize = ((uint)(1 + (1 << (int)((tableLog) - 1))) + (((maxSymbolValue) + 1) * 2));
|
||||
void* scratchBuffer = (void*)(CTable + CTableSize);
|
||||
nuint scratchBufferSize = wkspSize - (CTableSize * (nuint)(4));
|
||||
|
||||
if (wkspSize < (((uint)(1 + (1 << (int)((tableLog) - 1))) + (((maxSymbolValue) + 1) * 2)) + (uint)(((tableLog > 12) ? (1 << (int)(tableLog - 2)) : 1024))))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)));
|
||||
}
|
||||
|
||||
if (srcSize <= 1)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (maxSymbolValue == 0)
|
||||
{
|
||||
maxSymbolValue = 255;
|
||||
}
|
||||
|
||||
if (tableLog == 0)
|
||||
{
|
||||
tableLog = (uint)((13 - 2));
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
nuint maxCount = HIST_count_wksp((uint*)count, &maxSymbolValue, src, srcSize, scratchBuffer, scratchBufferSize);
|
||||
|
||||
if ((ERR_isError(maxCount)) != 0)
|
||||
{
|
||||
return maxCount;
|
||||
}
|
||||
|
||||
if (maxCount == srcSize)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (maxCount == 1)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (maxCount < (srcSize >> 7))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
|
||||
|
||||
{
|
||||
nuint _var_err__ = FSE_normalizeCount((short*)norm, tableLog, (uint*)count, srcSize, maxSymbolValue, ((srcSize >= 2048) ? 1U : 0U));
|
||||
|
||||
if ((ERR_isError(_var_err__)) != 0)
|
||||
{
|
||||
return _var_err__;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
nuint nc_err = FSE_writeNCount((void*)op, (nuint)(oend - op), (short*)norm, maxSymbolValue, tableLog);
|
||||
|
||||
if ((ERR_isError(nc_err)) != 0)
|
||||
{
|
||||
return nc_err;
|
||||
}
|
||||
|
||||
op += nc_err;
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
nuint _var_err__ = FSE_buildCTable_wksp(CTable, (short*)norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize);
|
||||
|
||||
if ((ERR_isError(_var_err__)) != 0)
|
||||
{
|
||||
return _var_err__;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
nuint cSize = FSE_compress_usingCTable((void*)op, (nuint)(oend - op), src, srcSize, CTable);
|
||||
|
||||
if ((ERR_isError(cSize)) != 0)
|
||||
{
|
||||
return cSize;
|
||||
}
|
||||
|
||||
if (cSize == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
if ((nuint)(op - ostart) >= srcSize - 1)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (nuint)(op - ostart);
|
||||
}
|
||||
|
||||
/*-*****************************************
|
||||
* FSE advanced functions
|
||||
******************************************/
|
||||
/*! FSE_compress2() :
|
||||
Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
|
||||
Both parameters can be defined as '0' to mean : use default value
|
||||
@return : size of compressed data
|
||||
Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
|
||||
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
|
||||
if FSE_isError(return), it's an error code.
|
||||
*/
|
||||
public static nuint FSE_compress2(void* dst, nuint dstCapacity, void* src, nuint srcSize, uint maxSymbolValue, uint tableLog)
|
||||
{
|
||||
fseWkspMax_t scratchBuffer;
|
||||
|
||||
if (tableLog > (uint)((14 - 2)))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)));
|
||||
}
|
||||
|
||||
return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, (void*)&scratchBuffer, (nuint)(sizeof(fseWkspMax_t)));
|
||||
}
|
||||
|
||||
/*-****************************************
|
||||
* FSE simple functions
|
||||
******************************************/
|
||||
/*! FSE_compress() :
|
||||
Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
|
||||
'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
|
||||
@return : size of compressed data (<= dstCapacity).
|
||||
Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
|
||||
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
|
||||
if FSE_isError(return), compression failed (more details using FSE_getErrorName())
|
||||
*/
|
||||
public static nuint FSE_compress(void* dst, nuint dstCapacity, void* src, nuint srcSize)
|
||||
{
|
||||
return FSE_compress2(dst, dstCapacity, src, srcSize, 255, (uint)((13 - 2)));
|
||||
}
|
||||
}
|
||||
}
|
||||
446
src/SharpCompress/Compressors/Zstd/Unsafe/FseDecompress.cs
Normal file
446
src/SharpCompress/Compressors/Zstd/Unsafe/FseDecompress.cs
Normal file
@@ -0,0 +1,446 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using static ZstdSharp.UnsafeHelper;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public static unsafe partial class Methods
|
||||
{
|
||||
/* Function templates */
|
||||
public static uint* FSE_createDTable(uint tableLog)
|
||||
{
|
||||
if (tableLog > 15)
|
||||
{
|
||||
tableLog = 15;
|
||||
}
|
||||
|
||||
return (uint*)(malloc((uint)((1 + (1 << (int)(tableLog)))) * (nuint)(sizeof(uint))));
|
||||
}
|
||||
|
||||
public static void FSE_freeDTable(uint* dt)
|
||||
{
|
||||
free((void*)(dt));
|
||||
}
|
||||
|
||||
private static nuint FSE_buildDTable_internal(uint* dt, short* normalizedCounter, uint maxSymbolValue, uint tableLog, void* workSpace, nuint wkspSize)
|
||||
{
|
||||
void* tdPtr = (void*)(dt + 1);
|
||||
FSE_decode_t* tableDecode = (FSE_decode_t*)(tdPtr);
|
||||
ushort* symbolNext = (ushort*)(workSpace);
|
||||
byte* spread = (byte*)(symbolNext + maxSymbolValue + 1);
|
||||
uint maxSV1 = maxSymbolValue + 1;
|
||||
uint tableSize = (uint)(1 << (int)tableLog);
|
||||
uint highThreshold = tableSize - 1;
|
||||
|
||||
if (((nuint)(sizeof(short)) * (maxSymbolValue + 1) + (1UL << (int)tableLog) + 8) > wkspSize)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)));
|
||||
}
|
||||
|
||||
if (maxSymbolValue > 255)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)));
|
||||
}
|
||||
|
||||
if (tableLog > (uint)((14 - 2)))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)));
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
FSE_DTableHeader DTableH;
|
||||
|
||||
DTableH.tableLog = (ushort)(tableLog);
|
||||
DTableH.fastMode = 1;
|
||||
|
||||
{
|
||||
short largeLimit = (short)(1 << (int)(tableLog - 1));
|
||||
uint s;
|
||||
|
||||
for (s = 0; s < maxSV1; s++)
|
||||
{
|
||||
if (normalizedCounter[s] == -1)
|
||||
{
|
||||
tableDecode[highThreshold--].symbol = (byte)(s);
|
||||
symbolNext[s] = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (normalizedCounter[s] >= largeLimit)
|
||||
{
|
||||
DTableH.fastMode = 0;
|
||||
}
|
||||
|
||||
symbolNext[s] = (ushort)(normalizedCounter[s]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
memcpy((void*)(dt), (void*)(&DTableH), ((nuint)(sizeof(FSE_DTableHeader))));
|
||||
}
|
||||
|
||||
if (highThreshold == tableSize - 1)
|
||||
{
|
||||
nuint tableMask = tableSize - 1;
|
||||
nuint step = (((tableSize) >> 1) + ((tableSize) >> 3) + 3);
|
||||
|
||||
|
||||
{
|
||||
ulong add = 0x0101010101010101UL;
|
||||
nuint pos = 0;
|
||||
ulong sv = 0;
|
||||
uint s;
|
||||
|
||||
for (s = 0; s < maxSV1; ++s , sv += add)
|
||||
{
|
||||
int i;
|
||||
int n = normalizedCounter[s];
|
||||
|
||||
MEM_write64((void*)(spread + pos), sv);
|
||||
for (i = 8; i < n; i += 8)
|
||||
{
|
||||
MEM_write64((void*)(spread + pos + i), sv);
|
||||
}
|
||||
|
||||
pos += (nuint)n;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
nuint position = 0;
|
||||
nuint s;
|
||||
nuint unroll = 2;
|
||||
|
||||
assert(tableSize % unroll == 0);
|
||||
for (s = 0; s < (nuint)(tableSize); s += unroll)
|
||||
{
|
||||
nuint u;
|
||||
|
||||
for (u = 0; u < unroll; ++u)
|
||||
{
|
||||
nuint uPosition = (position + (u * step)) & tableMask;
|
||||
|
||||
tableDecode[uPosition].symbol = spread[s + u];
|
||||
}
|
||||
|
||||
position = (position + (unroll * step)) & tableMask;
|
||||
}
|
||||
|
||||
assert(position == 0);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
uint tableMask = tableSize - 1;
|
||||
uint step = (((tableSize) >> 1) + ((tableSize) >> 3) + 3);
|
||||
uint s, position = 0;
|
||||
|
||||
for (s = 0; s < maxSV1; s++)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < normalizedCounter[s]; i++)
|
||||
{
|
||||
tableDecode[position].symbol = (byte)(s);
|
||||
position = (position + step) & tableMask;
|
||||
while (position > highThreshold)
|
||||
{
|
||||
position = (position + step) & tableMask;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (position != 0)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
uint u;
|
||||
|
||||
for (u = 0; u < tableSize; u++)
|
||||
{
|
||||
byte symbol = (byte)(tableDecode[u].symbol);
|
||||
uint nextState = symbolNext[symbol]++;
|
||||
|
||||
tableDecode[u].nbBits = (byte)(tableLog - BIT_highbit32(nextState));
|
||||
tableDecode[u].newState = (ushort)((nextState << (int)(tableDecode[u].nbBits)) - tableSize);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
public static nuint FSE_buildDTable_wksp(uint* dt, short* normalizedCounter, uint maxSymbolValue, uint tableLog, void* workSpace, nuint wkspSize)
|
||||
{
|
||||
return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
|
||||
}
|
||||
|
||||
/*-*******************************************************
|
||||
* Decompression (Byte symbols)
|
||||
*********************************************************/
|
||||
public static nuint FSE_buildDTable_rle(uint* dt, byte symbolValue)
|
||||
{
|
||||
void* ptr = (void*)dt;
|
||||
FSE_DTableHeader* DTableH = (FSE_DTableHeader*)(ptr);
|
||||
void* dPtr = (void*)(dt + 1);
|
||||
FSE_decode_t* cell = (FSE_decode_t*)(dPtr);
|
||||
|
||||
DTableH->tableLog = 0;
|
||||
DTableH->fastMode = 0;
|
||||
cell->newState = 0;
|
||||
cell->symbol = symbolValue;
|
||||
cell->nbBits = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
public static nuint FSE_buildDTable_raw(uint* dt, uint nbBits)
|
||||
{
|
||||
void* ptr = (void*)dt;
|
||||
FSE_DTableHeader* DTableH = (FSE_DTableHeader*)(ptr);
|
||||
void* dPtr = (void*)(dt + 1);
|
||||
FSE_decode_t* dinfo = (FSE_decode_t*)(dPtr);
|
||||
uint tableSize = (uint)(1 << (int)nbBits);
|
||||
uint tableMask = tableSize - 1;
|
||||
uint maxSV1 = tableMask + 1;
|
||||
uint s;
|
||||
|
||||
if (nbBits < 1)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
|
||||
DTableH->tableLog = (ushort)(nbBits);
|
||||
DTableH->fastMode = 1;
|
||||
for (s = 0; s < maxSV1; s++)
|
||||
{
|
||||
dinfo[s].newState = 0;
|
||||
dinfo[s].symbol = (byte)(s);
|
||||
dinfo[s].nbBits = (byte)(nbBits);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint FSE_decompress_usingDTable_generic(void* dst, nuint maxDstSize, void* cSrc, nuint cSrcSize, uint* dt, uint fast)
|
||||
{
|
||||
byte* ostart = (byte*)(dst);
|
||||
byte* op = ostart;
|
||||
byte* omax = op + maxDstSize;
|
||||
byte* olimit = omax - 3;
|
||||
BIT_DStream_t bitD;
|
||||
FSE_DState_t state1;
|
||||
FSE_DState_t state2;
|
||||
|
||||
|
||||
{
|
||||
nuint _var_err__ = BIT_initDStream(&bitD, cSrc, cSrcSize);
|
||||
|
||||
if ((ERR_isError(_var_err__)) != 0)
|
||||
{
|
||||
return _var_err__;
|
||||
}
|
||||
}
|
||||
|
||||
FSE_initDState(&state1, &bitD, dt);
|
||||
FSE_initDState(&state2, &bitD, dt);
|
||||
for (; ((BIT_reloadDStream(&bitD) == BIT_DStream_status.BIT_DStream_unfinished) && (op < olimit)); op += 4)
|
||||
{
|
||||
op[0] = fast != 0 ? FSE_decodeSymbolFast(&state1, &bitD) : FSE_decodeSymbol(&state1, &bitD);
|
||||
if ((uint)((14 - 2) * 2 + 7) > (nuint)(sizeof(nuint)) * 8)
|
||||
{
|
||||
BIT_reloadDStream(&bitD);
|
||||
}
|
||||
|
||||
op[1] = fast != 0 ? FSE_decodeSymbolFast(&state2, &bitD) : FSE_decodeSymbol(&state2, &bitD);
|
||||
if ((uint)((14 - 2) * 4 + 7) > (nuint)(sizeof(nuint)) * 8)
|
||||
{
|
||||
if (BIT_reloadDStream(&bitD) > BIT_DStream_status.BIT_DStream_unfinished)
|
||||
{
|
||||
op += 2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
op[2] = fast != 0 ? FSE_decodeSymbolFast(&state1, &bitD) : FSE_decodeSymbol(&state1, &bitD);
|
||||
if ((uint)((14 - 2) * 2 + 7) > (nuint)(sizeof(nuint)) * 8)
|
||||
{
|
||||
BIT_reloadDStream(&bitD);
|
||||
}
|
||||
|
||||
op[3] = fast != 0 ? FSE_decodeSymbolFast(&state2, &bitD) : FSE_decodeSymbol(&state2, &bitD);
|
||||
}
|
||||
|
||||
while (1 != 0)
|
||||
{
|
||||
if (op > (omax - 2))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)));
|
||||
}
|
||||
|
||||
*op++ = fast != 0 ? FSE_decodeSymbolFast(&state1, &bitD) : FSE_decodeSymbol(&state1, &bitD);
|
||||
if (BIT_reloadDStream(&bitD) == BIT_DStream_status.BIT_DStream_overflow)
|
||||
{
|
||||
*op++ = fast != 0 ? FSE_decodeSymbolFast(&state2, &bitD) : FSE_decodeSymbol(&state2, &bitD);
|
||||
break;
|
||||
}
|
||||
|
||||
if (op > (omax - 2))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)));
|
||||
}
|
||||
|
||||
*op++ = fast != 0 ? FSE_decodeSymbolFast(&state2, &bitD) : FSE_decodeSymbol(&state2, &bitD);
|
||||
if (BIT_reloadDStream(&bitD) == BIT_DStream_status.BIT_DStream_overflow)
|
||||
{
|
||||
*op++ = fast != 0 ? FSE_decodeSymbolFast(&state1, &bitD) : FSE_decodeSymbol(&state1, &bitD);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return (nuint)(op - ostart);
|
||||
}
|
||||
|
||||
/*! FSE_decompress_usingDTable():
|
||||
Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
|
||||
into `dst` which must be already allocated.
|
||||
@return : size of regenerated data (necessarily <= `dstCapacity`),
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
public static nuint FSE_decompress_usingDTable(void* dst, nuint originalSize, void* cSrc, nuint cSrcSize, uint* dt)
|
||||
{
|
||||
void* ptr = (void*)dt;
|
||||
FSE_DTableHeader* DTableH = (FSE_DTableHeader*)(ptr);
|
||||
uint fastMode = DTableH->fastMode;
|
||||
|
||||
if (fastMode != 0)
|
||||
{
|
||||
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
|
||||
}
|
||||
|
||||
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
|
||||
}
|
||||
|
||||
public static nuint FSE_decompress_wksp(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize, uint maxLog, void* workSpace, nuint wkspSize)
|
||||
{
|
||||
return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static nuint FSE_decompress_wksp_body(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize, uint maxLog, void* workSpace, nuint wkspSize, int bmi2)
|
||||
{
|
||||
byte* istart = (byte*)(cSrc);
|
||||
byte* ip = istart;
|
||||
uint tableLog;
|
||||
uint maxSymbolValue = 255;
|
||||
FSE_DecompressWksp* wksp = (FSE_DecompressWksp*)(workSpace);
|
||||
|
||||
if (wkspSize < (nuint)(sizeof(FSE_DecompressWksp)))
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)));
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
nuint NCountLength = FSE_readNCount_bmi2((short*)wksp->ncount, &maxSymbolValue, &tableLog, (void*)istart, cSrcSize, bmi2);
|
||||
|
||||
if ((ERR_isError(NCountLength)) != 0)
|
||||
{
|
||||
return NCountLength;
|
||||
}
|
||||
|
||||
if (tableLog > maxLog)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)));
|
||||
}
|
||||
|
||||
assert(NCountLength <= cSrcSize);
|
||||
ip += NCountLength;
|
||||
cSrcSize -= NCountLength;
|
||||
}
|
||||
|
||||
if ((((uint)((1 + (1 << (int)(tableLog)))) + ((((nuint)(sizeof(short)) * (maxSymbolValue + 1) + (1UL << (int)tableLog) + 8) + (nuint)(sizeof(uint)) - 1) / (nuint)(sizeof(uint))) + (uint)((255 + 1) / 2) + 1) * (nuint)(sizeof(uint))) > wkspSize)
|
||||
{
|
||||
return (unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)));
|
||||
}
|
||||
|
||||
workSpace = wksp->dtable + (1 + (1 << (int)(tableLog)));
|
||||
wkspSize -= (nuint)(sizeof(FSE_DecompressWksp)) + ((uint)((1 + (1 << (int)(tableLog)))) * (nuint)(sizeof(uint)));
|
||||
|
||||
{
|
||||
nuint _var_err__ = FSE_buildDTable_internal((uint*)wksp->dtable, (short*)wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize);
|
||||
|
||||
if ((ERR_isError(_var_err__)) != 0)
|
||||
{
|
||||
return _var_err__;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
void* ptr = (void*)wksp->dtable;
|
||||
FSE_DTableHeader* DTableH = (FSE_DTableHeader*)(ptr);
|
||||
uint fastMode = DTableH->fastMode;
|
||||
|
||||
if (fastMode != 0)
|
||||
{
|
||||
return FSE_decompress_usingDTable_generic(dst, dstCapacity, (void*)ip, cSrcSize, (uint*)wksp->dtable, 1);
|
||||
}
|
||||
|
||||
return FSE_decompress_usingDTable_generic(dst, dstCapacity, (void*)ip, cSrcSize, (uint*)wksp->dtable, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
private static nuint FSE_decompress_wksp_body_default(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize, uint maxLog, void* workSpace, nuint wkspSize)
|
||||
{
|
||||
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
|
||||
}
|
||||
|
||||
private static nuint FSE_decompress_wksp_body_bmi2(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize, uint maxLog, void* workSpace, nuint wkspSize)
|
||||
{
|
||||
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
|
||||
}
|
||||
|
||||
public static nuint FSE_decompress_wksp_bmi2(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize, uint maxLog, void* workSpace, nuint wkspSize, int bmi2)
|
||||
{
|
||||
if (bmi2 != 0)
|
||||
{
|
||||
return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
|
||||
}
|
||||
|
||||
return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
|
||||
}
|
||||
|
||||
/*! FSE_buildDTable():
|
||||
Builds 'dt', which must be already allocated, using FSE_createDTable().
|
||||
return : 0, or an errorCode, which can be tested using FSE_isError() */
|
||||
public static nuint FSE_buildDTable(uint* dt, short* normalizedCounter, uint maxSymbolValue, uint tableLog)
|
||||
{
|
||||
uint* wksp = stackalloc uint[8322];
|
||||
|
||||
return FSE_buildDTable_wksp(dt, normalizedCounter, maxSymbolValue, tableLog, (void*)wksp, (nuint)(sizeof(uint) * 8322));
|
||||
}
|
||||
|
||||
/*! FSE_decompress():
|
||||
Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
|
||||
into already allocated destination buffer 'dst', of size 'dstCapacity'.
|
||||
@return : size of regenerated data (<= maxDstSize),
|
||||
or an error code, which can be tested using FSE_isError() .
|
||||
|
||||
** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
|
||||
Why ? : making this distinction requires a header.
|
||||
Header management is intentionally delegated to the user layer, which can better manage special cases.
|
||||
*/
|
||||
public static nuint FSE_decompress(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize)
|
||||
{
|
||||
uint* wksp = stackalloc uint[5380];
|
||||
|
||||
return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, (uint)((14 - 2)), (void*)wksp, (nuint)(sizeof(uint) * 5380));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public enum HIST_checkInput_e
|
||||
{
|
||||
trustInput,
|
||||
checkMaxSymbolValue,
|
||||
}
|
||||
}
|
||||
13
src/SharpCompress/Compressors/Zstd/Unsafe/HUF_CElt_s.cs
Normal file
13
src/SharpCompress/Compressors/Zstd/Unsafe/HUF_CElt_s.cs
Normal file
@@ -0,0 +1,13 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/* static allocation of HUF's Compression Table */
|
||||
/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
|
||||
public partial struct HUF_CElt_s
|
||||
{
|
||||
public ushort val;
|
||||
|
||||
public byte nbBits;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public unsafe partial struct HUF_CompressWeightsWksp
|
||||
{
|
||||
public fixed uint CTable[59];
|
||||
|
||||
public fixed uint scratchBuffer[30];
|
||||
|
||||
public fixed uint count[13];
|
||||
|
||||
public fixed short norm[13];
|
||||
}
|
||||
}
|
||||
16
src/SharpCompress/Compressors/Zstd/Unsafe/HUF_DEltX1.cs
Normal file
16
src/SharpCompress/Compressors/Zstd/Unsafe/HUF_DEltX1.cs
Normal file
@@ -0,0 +1,16 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/*-***************************/
|
||||
/* single-symbol decoding */
|
||||
/*-***************************/
|
||||
public partial struct HUF_DEltX1
|
||||
{
|
||||
/* single-symbol decoding */
|
||||
public byte @byte;
|
||||
|
||||
/* single-symbol decoding */
|
||||
public byte nbBits;
|
||||
}
|
||||
}
|
||||
19
src/SharpCompress/Compressors/Zstd/Unsafe/HUF_DEltX2.cs
Normal file
19
src/SharpCompress/Compressors/Zstd/Unsafe/HUF_DEltX2.cs
Normal file
@@ -0,0 +1,19 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
/* *************************/
|
||||
/* double-symbols decoding */
|
||||
/* *************************/
|
||||
public partial struct HUF_DEltX2
|
||||
{
|
||||
/* double-symbols decoding */
|
||||
public ushort sequence;
|
||||
|
||||
/* double-symbols decoding */
|
||||
public byte nbBits;
|
||||
|
||||
/* double-symbols decoding */
|
||||
public byte length;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public unsafe partial struct HUF_ReadDTableX1_Workspace
|
||||
{
|
||||
public fixed uint rankVal[16];
|
||||
|
||||
public fixed uint rankStart[16];
|
||||
|
||||
public fixed uint statsWksp[218];
|
||||
|
||||
public fixed byte symbols[256];
|
||||
|
||||
public fixed byte huffWeight[256];
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,389 @@
|
||||
using InlineIL;
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
using static InlineIL.IL.Emit;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public unsafe partial struct HUF_ReadDTableX2_Workspace
|
||||
{
|
||||
public _rankVal_e__FixedBuffer rankVal;
|
||||
|
||||
public fixed uint rankStats[13];
|
||||
|
||||
public fixed uint rankStart0[14];
|
||||
|
||||
public _sortedSymbol_e__FixedBuffer sortedSymbol;
|
||||
|
||||
public fixed byte weightList[256];
|
||||
|
||||
public fixed uint calleeWksp[218];
|
||||
|
||||
public unsafe partial struct _rankVal_e__FixedBuffer
|
||||
{
|
||||
public rankValCol_t e0;
|
||||
public rankValCol_t e1;
|
||||
public rankValCol_t e2;
|
||||
public rankValCol_t e3;
|
||||
public rankValCol_t e4;
|
||||
public rankValCol_t e5;
|
||||
public rankValCol_t e6;
|
||||
public rankValCol_t e7;
|
||||
public rankValCol_t e8;
|
||||
public rankValCol_t e9;
|
||||
public rankValCol_t e10;
|
||||
public rankValCol_t e11;
|
||||
|
||||
public ref rankValCol_t this[int index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + (uint)index);
|
||||
}
|
||||
|
||||
public ref rankValCol_t this[uint index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + index);
|
||||
}
|
||||
|
||||
public ref rankValCol_t this[nuint index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + (uint)index);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
public static implicit operator rankValCol_t*(in _rankVal_e__FixedBuffer t)
|
||||
{
|
||||
Ldarg_0();
|
||||
Ldflda(new FieldRef(typeof(_rankVal_e__FixedBuffer), nameof(e0)));
|
||||
return IL.ReturnPointer<rankValCol_t>();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
public static rankValCol_t* operator +(in _rankVal_e__FixedBuffer t, uint index)
|
||||
{
|
||||
Ldarg_0();
|
||||
Ldflda(new FieldRef(typeof(_rankVal_e__FixedBuffer), nameof(e0)));
|
||||
Ldarg_1();
|
||||
Conv_I();
|
||||
Sizeof<rankValCol_t>();
|
||||
Conv_I();
|
||||
Mul();
|
||||
Add();
|
||||
return IL.ReturnPointer<rankValCol_t>();
|
||||
}
|
||||
}
|
||||
|
||||
public unsafe partial struct _sortedSymbol_e__FixedBuffer
|
||||
{
|
||||
public sortedSymbol_t e0;
|
||||
public sortedSymbol_t e1;
|
||||
public sortedSymbol_t e2;
|
||||
public sortedSymbol_t e3;
|
||||
public sortedSymbol_t e4;
|
||||
public sortedSymbol_t e5;
|
||||
public sortedSymbol_t e6;
|
||||
public sortedSymbol_t e7;
|
||||
public sortedSymbol_t e8;
|
||||
public sortedSymbol_t e9;
|
||||
public sortedSymbol_t e10;
|
||||
public sortedSymbol_t e11;
|
||||
public sortedSymbol_t e12;
|
||||
public sortedSymbol_t e13;
|
||||
public sortedSymbol_t e14;
|
||||
public sortedSymbol_t e15;
|
||||
public sortedSymbol_t e16;
|
||||
public sortedSymbol_t e17;
|
||||
public sortedSymbol_t e18;
|
||||
public sortedSymbol_t e19;
|
||||
public sortedSymbol_t e20;
|
||||
public sortedSymbol_t e21;
|
||||
public sortedSymbol_t e22;
|
||||
public sortedSymbol_t e23;
|
||||
public sortedSymbol_t e24;
|
||||
public sortedSymbol_t e25;
|
||||
public sortedSymbol_t e26;
|
||||
public sortedSymbol_t e27;
|
||||
public sortedSymbol_t e28;
|
||||
public sortedSymbol_t e29;
|
||||
public sortedSymbol_t e30;
|
||||
public sortedSymbol_t e31;
|
||||
public sortedSymbol_t e32;
|
||||
public sortedSymbol_t e33;
|
||||
public sortedSymbol_t e34;
|
||||
public sortedSymbol_t e35;
|
||||
public sortedSymbol_t e36;
|
||||
public sortedSymbol_t e37;
|
||||
public sortedSymbol_t e38;
|
||||
public sortedSymbol_t e39;
|
||||
public sortedSymbol_t e40;
|
||||
public sortedSymbol_t e41;
|
||||
public sortedSymbol_t e42;
|
||||
public sortedSymbol_t e43;
|
||||
public sortedSymbol_t e44;
|
||||
public sortedSymbol_t e45;
|
||||
public sortedSymbol_t e46;
|
||||
public sortedSymbol_t e47;
|
||||
public sortedSymbol_t e48;
|
||||
public sortedSymbol_t e49;
|
||||
public sortedSymbol_t e50;
|
||||
public sortedSymbol_t e51;
|
||||
public sortedSymbol_t e52;
|
||||
public sortedSymbol_t e53;
|
||||
public sortedSymbol_t e54;
|
||||
public sortedSymbol_t e55;
|
||||
public sortedSymbol_t e56;
|
||||
public sortedSymbol_t e57;
|
||||
public sortedSymbol_t e58;
|
||||
public sortedSymbol_t e59;
|
||||
public sortedSymbol_t e60;
|
||||
public sortedSymbol_t e61;
|
||||
public sortedSymbol_t e62;
|
||||
public sortedSymbol_t e63;
|
||||
public sortedSymbol_t e64;
|
||||
public sortedSymbol_t e65;
|
||||
public sortedSymbol_t e66;
|
||||
public sortedSymbol_t e67;
|
||||
public sortedSymbol_t e68;
|
||||
public sortedSymbol_t e69;
|
||||
public sortedSymbol_t e70;
|
||||
public sortedSymbol_t e71;
|
||||
public sortedSymbol_t e72;
|
||||
public sortedSymbol_t e73;
|
||||
public sortedSymbol_t e74;
|
||||
public sortedSymbol_t e75;
|
||||
public sortedSymbol_t e76;
|
||||
public sortedSymbol_t e77;
|
||||
public sortedSymbol_t e78;
|
||||
public sortedSymbol_t e79;
|
||||
public sortedSymbol_t e80;
|
||||
public sortedSymbol_t e81;
|
||||
public sortedSymbol_t e82;
|
||||
public sortedSymbol_t e83;
|
||||
public sortedSymbol_t e84;
|
||||
public sortedSymbol_t e85;
|
||||
public sortedSymbol_t e86;
|
||||
public sortedSymbol_t e87;
|
||||
public sortedSymbol_t e88;
|
||||
public sortedSymbol_t e89;
|
||||
public sortedSymbol_t e90;
|
||||
public sortedSymbol_t e91;
|
||||
public sortedSymbol_t e92;
|
||||
public sortedSymbol_t e93;
|
||||
public sortedSymbol_t e94;
|
||||
public sortedSymbol_t e95;
|
||||
public sortedSymbol_t e96;
|
||||
public sortedSymbol_t e97;
|
||||
public sortedSymbol_t e98;
|
||||
public sortedSymbol_t e99;
|
||||
public sortedSymbol_t e100;
|
||||
public sortedSymbol_t e101;
|
||||
public sortedSymbol_t e102;
|
||||
public sortedSymbol_t e103;
|
||||
public sortedSymbol_t e104;
|
||||
public sortedSymbol_t e105;
|
||||
public sortedSymbol_t e106;
|
||||
public sortedSymbol_t e107;
|
||||
public sortedSymbol_t e108;
|
||||
public sortedSymbol_t e109;
|
||||
public sortedSymbol_t e110;
|
||||
public sortedSymbol_t e111;
|
||||
public sortedSymbol_t e112;
|
||||
public sortedSymbol_t e113;
|
||||
public sortedSymbol_t e114;
|
||||
public sortedSymbol_t e115;
|
||||
public sortedSymbol_t e116;
|
||||
public sortedSymbol_t e117;
|
||||
public sortedSymbol_t e118;
|
||||
public sortedSymbol_t e119;
|
||||
public sortedSymbol_t e120;
|
||||
public sortedSymbol_t e121;
|
||||
public sortedSymbol_t e122;
|
||||
public sortedSymbol_t e123;
|
||||
public sortedSymbol_t e124;
|
||||
public sortedSymbol_t e125;
|
||||
public sortedSymbol_t e126;
|
||||
public sortedSymbol_t e127;
|
||||
public sortedSymbol_t e128;
|
||||
public sortedSymbol_t e129;
|
||||
public sortedSymbol_t e130;
|
||||
public sortedSymbol_t e131;
|
||||
public sortedSymbol_t e132;
|
||||
public sortedSymbol_t e133;
|
||||
public sortedSymbol_t e134;
|
||||
public sortedSymbol_t e135;
|
||||
public sortedSymbol_t e136;
|
||||
public sortedSymbol_t e137;
|
||||
public sortedSymbol_t e138;
|
||||
public sortedSymbol_t e139;
|
||||
public sortedSymbol_t e140;
|
||||
public sortedSymbol_t e141;
|
||||
public sortedSymbol_t e142;
|
||||
public sortedSymbol_t e143;
|
||||
public sortedSymbol_t e144;
|
||||
public sortedSymbol_t e145;
|
||||
public sortedSymbol_t e146;
|
||||
public sortedSymbol_t e147;
|
||||
public sortedSymbol_t e148;
|
||||
public sortedSymbol_t e149;
|
||||
public sortedSymbol_t e150;
|
||||
public sortedSymbol_t e151;
|
||||
public sortedSymbol_t e152;
|
||||
public sortedSymbol_t e153;
|
||||
public sortedSymbol_t e154;
|
||||
public sortedSymbol_t e155;
|
||||
public sortedSymbol_t e156;
|
||||
public sortedSymbol_t e157;
|
||||
public sortedSymbol_t e158;
|
||||
public sortedSymbol_t e159;
|
||||
public sortedSymbol_t e160;
|
||||
public sortedSymbol_t e161;
|
||||
public sortedSymbol_t e162;
|
||||
public sortedSymbol_t e163;
|
||||
public sortedSymbol_t e164;
|
||||
public sortedSymbol_t e165;
|
||||
public sortedSymbol_t e166;
|
||||
public sortedSymbol_t e167;
|
||||
public sortedSymbol_t e168;
|
||||
public sortedSymbol_t e169;
|
||||
public sortedSymbol_t e170;
|
||||
public sortedSymbol_t e171;
|
||||
public sortedSymbol_t e172;
|
||||
public sortedSymbol_t e173;
|
||||
public sortedSymbol_t e174;
|
||||
public sortedSymbol_t e175;
|
||||
public sortedSymbol_t e176;
|
||||
public sortedSymbol_t e177;
|
||||
public sortedSymbol_t e178;
|
||||
public sortedSymbol_t e179;
|
||||
public sortedSymbol_t e180;
|
||||
public sortedSymbol_t e181;
|
||||
public sortedSymbol_t e182;
|
||||
public sortedSymbol_t e183;
|
||||
public sortedSymbol_t e184;
|
||||
public sortedSymbol_t e185;
|
||||
public sortedSymbol_t e186;
|
||||
public sortedSymbol_t e187;
|
||||
public sortedSymbol_t e188;
|
||||
public sortedSymbol_t e189;
|
||||
public sortedSymbol_t e190;
|
||||
public sortedSymbol_t e191;
|
||||
public sortedSymbol_t e192;
|
||||
public sortedSymbol_t e193;
|
||||
public sortedSymbol_t e194;
|
||||
public sortedSymbol_t e195;
|
||||
public sortedSymbol_t e196;
|
||||
public sortedSymbol_t e197;
|
||||
public sortedSymbol_t e198;
|
||||
public sortedSymbol_t e199;
|
||||
public sortedSymbol_t e200;
|
||||
public sortedSymbol_t e201;
|
||||
public sortedSymbol_t e202;
|
||||
public sortedSymbol_t e203;
|
||||
public sortedSymbol_t e204;
|
||||
public sortedSymbol_t e205;
|
||||
public sortedSymbol_t e206;
|
||||
public sortedSymbol_t e207;
|
||||
public sortedSymbol_t e208;
|
||||
public sortedSymbol_t e209;
|
||||
public sortedSymbol_t e210;
|
||||
public sortedSymbol_t e211;
|
||||
public sortedSymbol_t e212;
|
||||
public sortedSymbol_t e213;
|
||||
public sortedSymbol_t e214;
|
||||
public sortedSymbol_t e215;
|
||||
public sortedSymbol_t e216;
|
||||
public sortedSymbol_t e217;
|
||||
public sortedSymbol_t e218;
|
||||
public sortedSymbol_t e219;
|
||||
public sortedSymbol_t e220;
|
||||
public sortedSymbol_t e221;
|
||||
public sortedSymbol_t e222;
|
||||
public sortedSymbol_t e223;
|
||||
public sortedSymbol_t e224;
|
||||
public sortedSymbol_t e225;
|
||||
public sortedSymbol_t e226;
|
||||
public sortedSymbol_t e227;
|
||||
public sortedSymbol_t e228;
|
||||
public sortedSymbol_t e229;
|
||||
public sortedSymbol_t e230;
|
||||
public sortedSymbol_t e231;
|
||||
public sortedSymbol_t e232;
|
||||
public sortedSymbol_t e233;
|
||||
public sortedSymbol_t e234;
|
||||
public sortedSymbol_t e235;
|
||||
public sortedSymbol_t e236;
|
||||
public sortedSymbol_t e237;
|
||||
public sortedSymbol_t e238;
|
||||
public sortedSymbol_t e239;
|
||||
public sortedSymbol_t e240;
|
||||
public sortedSymbol_t e241;
|
||||
public sortedSymbol_t e242;
|
||||
public sortedSymbol_t e243;
|
||||
public sortedSymbol_t e244;
|
||||
public sortedSymbol_t e245;
|
||||
public sortedSymbol_t e246;
|
||||
public sortedSymbol_t e247;
|
||||
public sortedSymbol_t e248;
|
||||
public sortedSymbol_t e249;
|
||||
public sortedSymbol_t e250;
|
||||
public sortedSymbol_t e251;
|
||||
public sortedSymbol_t e252;
|
||||
public sortedSymbol_t e253;
|
||||
public sortedSymbol_t e254;
|
||||
public sortedSymbol_t e255;
|
||||
|
||||
public ref sortedSymbol_t this[int index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + (uint)index);
|
||||
}
|
||||
|
||||
public ref sortedSymbol_t this[uint index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + index);
|
||||
}
|
||||
|
||||
public ref sortedSymbol_t this[nuint index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + (uint)index);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
public static implicit operator sortedSymbol_t*(in _sortedSymbol_e__FixedBuffer t)
|
||||
{
|
||||
Ldarg_0();
|
||||
Ldflda(new FieldRef(typeof(_sortedSymbol_e__FixedBuffer), nameof(e0)));
|
||||
return IL.ReturnPointer<sortedSymbol_t>();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
public static sortedSymbol_t* operator +(in _sortedSymbol_e__FixedBuffer t, uint index)
|
||||
{
|
||||
Ldarg_0();
|
||||
Ldflda(new FieldRef(typeof(_sortedSymbol_e__FixedBuffer), nameof(e0)));
|
||||
Ldarg_1();
|
||||
Conv_I();
|
||||
Sizeof<sortedSymbol_t>();
|
||||
Conv_I();
|
||||
Mul();
|
||||
Add();
|
||||
return IL.ReturnPointer<sortedSymbol_t>();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
using System;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public unsafe partial struct HUF_WriteCTableWksp
|
||||
{
|
||||
public HUF_CompressWeightsWksp wksp;
|
||||
|
||||
/* precomputed conversion table */
|
||||
public fixed byte bitsToWeight[13];
|
||||
|
||||
public fixed byte huffWeight[255];
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,657 @@
|
||||
using InlineIL;
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
using static InlineIL.IL.Emit;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public partial struct HUF_buildCTable_wksp_tables
|
||||
{
|
||||
public _huffNodeTbl_e__FixedBuffer huffNodeTbl;
|
||||
|
||||
public _rankPosition_e__FixedBuffer rankPosition;
|
||||
|
||||
public unsafe partial struct _huffNodeTbl_e__FixedBuffer
|
||||
{
|
||||
public nodeElt_s e0;
|
||||
public nodeElt_s e1;
|
||||
public nodeElt_s e2;
|
||||
public nodeElt_s e3;
|
||||
public nodeElt_s e4;
|
||||
public nodeElt_s e5;
|
||||
public nodeElt_s e6;
|
||||
public nodeElt_s e7;
|
||||
public nodeElt_s e8;
|
||||
public nodeElt_s e9;
|
||||
public nodeElt_s e10;
|
||||
public nodeElt_s e11;
|
||||
public nodeElt_s e12;
|
||||
public nodeElt_s e13;
|
||||
public nodeElt_s e14;
|
||||
public nodeElt_s e15;
|
||||
public nodeElt_s e16;
|
||||
public nodeElt_s e17;
|
||||
public nodeElt_s e18;
|
||||
public nodeElt_s e19;
|
||||
public nodeElt_s e20;
|
||||
public nodeElt_s e21;
|
||||
public nodeElt_s e22;
|
||||
public nodeElt_s e23;
|
||||
public nodeElt_s e24;
|
||||
public nodeElt_s e25;
|
||||
public nodeElt_s e26;
|
||||
public nodeElt_s e27;
|
||||
public nodeElt_s e28;
|
||||
public nodeElt_s e29;
|
||||
public nodeElt_s e30;
|
||||
public nodeElt_s e31;
|
||||
public nodeElt_s e32;
|
||||
public nodeElt_s e33;
|
||||
public nodeElt_s e34;
|
||||
public nodeElt_s e35;
|
||||
public nodeElt_s e36;
|
||||
public nodeElt_s e37;
|
||||
public nodeElt_s e38;
|
||||
public nodeElt_s e39;
|
||||
public nodeElt_s e40;
|
||||
public nodeElt_s e41;
|
||||
public nodeElt_s e42;
|
||||
public nodeElt_s e43;
|
||||
public nodeElt_s e44;
|
||||
public nodeElt_s e45;
|
||||
public nodeElt_s e46;
|
||||
public nodeElt_s e47;
|
||||
public nodeElt_s e48;
|
||||
public nodeElt_s e49;
|
||||
public nodeElt_s e50;
|
||||
public nodeElt_s e51;
|
||||
public nodeElt_s e52;
|
||||
public nodeElt_s e53;
|
||||
public nodeElt_s e54;
|
||||
public nodeElt_s e55;
|
||||
public nodeElt_s e56;
|
||||
public nodeElt_s e57;
|
||||
public nodeElt_s e58;
|
||||
public nodeElt_s e59;
|
||||
public nodeElt_s e60;
|
||||
public nodeElt_s e61;
|
||||
public nodeElt_s e62;
|
||||
public nodeElt_s e63;
|
||||
public nodeElt_s e64;
|
||||
public nodeElt_s e65;
|
||||
public nodeElt_s e66;
|
||||
public nodeElt_s e67;
|
||||
public nodeElt_s e68;
|
||||
public nodeElt_s e69;
|
||||
public nodeElt_s e70;
|
||||
public nodeElt_s e71;
|
||||
public nodeElt_s e72;
|
||||
public nodeElt_s e73;
|
||||
public nodeElt_s e74;
|
||||
public nodeElt_s e75;
|
||||
public nodeElt_s e76;
|
||||
public nodeElt_s e77;
|
||||
public nodeElt_s e78;
|
||||
public nodeElt_s e79;
|
||||
public nodeElt_s e80;
|
||||
public nodeElt_s e81;
|
||||
public nodeElt_s e82;
|
||||
public nodeElt_s e83;
|
||||
public nodeElt_s e84;
|
||||
public nodeElt_s e85;
|
||||
public nodeElt_s e86;
|
||||
public nodeElt_s e87;
|
||||
public nodeElt_s e88;
|
||||
public nodeElt_s e89;
|
||||
public nodeElt_s e90;
|
||||
public nodeElt_s e91;
|
||||
public nodeElt_s e92;
|
||||
public nodeElt_s e93;
|
||||
public nodeElt_s e94;
|
||||
public nodeElt_s e95;
|
||||
public nodeElt_s e96;
|
||||
public nodeElt_s e97;
|
||||
public nodeElt_s e98;
|
||||
public nodeElt_s e99;
|
||||
public nodeElt_s e100;
|
||||
public nodeElt_s e101;
|
||||
public nodeElt_s e102;
|
||||
public nodeElt_s e103;
|
||||
public nodeElt_s e104;
|
||||
public nodeElt_s e105;
|
||||
public nodeElt_s e106;
|
||||
public nodeElt_s e107;
|
||||
public nodeElt_s e108;
|
||||
public nodeElt_s e109;
|
||||
public nodeElt_s e110;
|
||||
public nodeElt_s e111;
|
||||
public nodeElt_s e112;
|
||||
public nodeElt_s e113;
|
||||
public nodeElt_s e114;
|
||||
public nodeElt_s e115;
|
||||
public nodeElt_s e116;
|
||||
public nodeElt_s e117;
|
||||
public nodeElt_s e118;
|
||||
public nodeElt_s e119;
|
||||
public nodeElt_s e120;
|
||||
public nodeElt_s e121;
|
||||
public nodeElt_s e122;
|
||||
public nodeElt_s e123;
|
||||
public nodeElt_s e124;
|
||||
public nodeElt_s e125;
|
||||
public nodeElt_s e126;
|
||||
public nodeElt_s e127;
|
||||
public nodeElt_s e128;
|
||||
public nodeElt_s e129;
|
||||
public nodeElt_s e130;
|
||||
public nodeElt_s e131;
|
||||
public nodeElt_s e132;
|
||||
public nodeElt_s e133;
|
||||
public nodeElt_s e134;
|
||||
public nodeElt_s e135;
|
||||
public nodeElt_s e136;
|
||||
public nodeElt_s e137;
|
||||
public nodeElt_s e138;
|
||||
public nodeElt_s e139;
|
||||
public nodeElt_s e140;
|
||||
public nodeElt_s e141;
|
||||
public nodeElt_s e142;
|
||||
public nodeElt_s e143;
|
||||
public nodeElt_s e144;
|
||||
public nodeElt_s e145;
|
||||
public nodeElt_s e146;
|
||||
public nodeElt_s e147;
|
||||
public nodeElt_s e148;
|
||||
public nodeElt_s e149;
|
||||
public nodeElt_s e150;
|
||||
public nodeElt_s e151;
|
||||
public nodeElt_s e152;
|
||||
public nodeElt_s e153;
|
||||
public nodeElt_s e154;
|
||||
public nodeElt_s e155;
|
||||
public nodeElt_s e156;
|
||||
public nodeElt_s e157;
|
||||
public nodeElt_s e158;
|
||||
public nodeElt_s e159;
|
||||
public nodeElt_s e160;
|
||||
public nodeElt_s e161;
|
||||
public nodeElt_s e162;
|
||||
public nodeElt_s e163;
|
||||
public nodeElt_s e164;
|
||||
public nodeElt_s e165;
|
||||
public nodeElt_s e166;
|
||||
public nodeElt_s e167;
|
||||
public nodeElt_s e168;
|
||||
public nodeElt_s e169;
|
||||
public nodeElt_s e170;
|
||||
public nodeElt_s e171;
|
||||
public nodeElt_s e172;
|
||||
public nodeElt_s e173;
|
||||
public nodeElt_s e174;
|
||||
public nodeElt_s e175;
|
||||
public nodeElt_s e176;
|
||||
public nodeElt_s e177;
|
||||
public nodeElt_s e178;
|
||||
public nodeElt_s e179;
|
||||
public nodeElt_s e180;
|
||||
public nodeElt_s e181;
|
||||
public nodeElt_s e182;
|
||||
public nodeElt_s e183;
|
||||
public nodeElt_s e184;
|
||||
public nodeElt_s e185;
|
||||
public nodeElt_s e186;
|
||||
public nodeElt_s e187;
|
||||
public nodeElt_s e188;
|
||||
public nodeElt_s e189;
|
||||
public nodeElt_s e190;
|
||||
public nodeElt_s e191;
|
||||
public nodeElt_s e192;
|
||||
public nodeElt_s e193;
|
||||
public nodeElt_s e194;
|
||||
public nodeElt_s e195;
|
||||
public nodeElt_s e196;
|
||||
public nodeElt_s e197;
|
||||
public nodeElt_s e198;
|
||||
public nodeElt_s e199;
|
||||
public nodeElt_s e200;
|
||||
public nodeElt_s e201;
|
||||
public nodeElt_s e202;
|
||||
public nodeElt_s e203;
|
||||
public nodeElt_s e204;
|
||||
public nodeElt_s e205;
|
||||
public nodeElt_s e206;
|
||||
public nodeElt_s e207;
|
||||
public nodeElt_s e208;
|
||||
public nodeElt_s e209;
|
||||
public nodeElt_s e210;
|
||||
public nodeElt_s e211;
|
||||
public nodeElt_s e212;
|
||||
public nodeElt_s e213;
|
||||
public nodeElt_s e214;
|
||||
public nodeElt_s e215;
|
||||
public nodeElt_s e216;
|
||||
public nodeElt_s e217;
|
||||
public nodeElt_s e218;
|
||||
public nodeElt_s e219;
|
||||
public nodeElt_s e220;
|
||||
public nodeElt_s e221;
|
||||
public nodeElt_s e222;
|
||||
public nodeElt_s e223;
|
||||
public nodeElt_s e224;
|
||||
public nodeElt_s e225;
|
||||
public nodeElt_s e226;
|
||||
public nodeElt_s e227;
|
||||
public nodeElt_s e228;
|
||||
public nodeElt_s e229;
|
||||
public nodeElt_s e230;
|
||||
public nodeElt_s e231;
|
||||
public nodeElt_s e232;
|
||||
public nodeElt_s e233;
|
||||
public nodeElt_s e234;
|
||||
public nodeElt_s e235;
|
||||
public nodeElt_s e236;
|
||||
public nodeElt_s e237;
|
||||
public nodeElt_s e238;
|
||||
public nodeElt_s e239;
|
||||
public nodeElt_s e240;
|
||||
public nodeElt_s e241;
|
||||
public nodeElt_s e242;
|
||||
public nodeElt_s e243;
|
||||
public nodeElt_s e244;
|
||||
public nodeElt_s e245;
|
||||
public nodeElt_s e246;
|
||||
public nodeElt_s e247;
|
||||
public nodeElt_s e248;
|
||||
public nodeElt_s e249;
|
||||
public nodeElt_s e250;
|
||||
public nodeElt_s e251;
|
||||
public nodeElt_s e252;
|
||||
public nodeElt_s e253;
|
||||
public nodeElt_s e254;
|
||||
public nodeElt_s e255;
|
||||
public nodeElt_s e256;
|
||||
public nodeElt_s e257;
|
||||
public nodeElt_s e258;
|
||||
public nodeElt_s e259;
|
||||
public nodeElt_s e260;
|
||||
public nodeElt_s e261;
|
||||
public nodeElt_s e262;
|
||||
public nodeElt_s e263;
|
||||
public nodeElt_s e264;
|
||||
public nodeElt_s e265;
|
||||
public nodeElt_s e266;
|
||||
public nodeElt_s e267;
|
||||
public nodeElt_s e268;
|
||||
public nodeElt_s e269;
|
||||
public nodeElt_s e270;
|
||||
public nodeElt_s e271;
|
||||
public nodeElt_s e272;
|
||||
public nodeElt_s e273;
|
||||
public nodeElt_s e274;
|
||||
public nodeElt_s e275;
|
||||
public nodeElt_s e276;
|
||||
public nodeElt_s e277;
|
||||
public nodeElt_s e278;
|
||||
public nodeElt_s e279;
|
||||
public nodeElt_s e280;
|
||||
public nodeElt_s e281;
|
||||
public nodeElt_s e282;
|
||||
public nodeElt_s e283;
|
||||
public nodeElt_s e284;
|
||||
public nodeElt_s e285;
|
||||
public nodeElt_s e286;
|
||||
public nodeElt_s e287;
|
||||
public nodeElt_s e288;
|
||||
public nodeElt_s e289;
|
||||
public nodeElt_s e290;
|
||||
public nodeElt_s e291;
|
||||
public nodeElt_s e292;
|
||||
public nodeElt_s e293;
|
||||
public nodeElt_s e294;
|
||||
public nodeElt_s e295;
|
||||
public nodeElt_s e296;
|
||||
public nodeElt_s e297;
|
||||
public nodeElt_s e298;
|
||||
public nodeElt_s e299;
|
||||
public nodeElt_s e300;
|
||||
public nodeElt_s e301;
|
||||
public nodeElt_s e302;
|
||||
public nodeElt_s e303;
|
||||
public nodeElt_s e304;
|
||||
public nodeElt_s e305;
|
||||
public nodeElt_s e306;
|
||||
public nodeElt_s e307;
|
||||
public nodeElt_s e308;
|
||||
public nodeElt_s e309;
|
||||
public nodeElt_s e310;
|
||||
public nodeElt_s e311;
|
||||
public nodeElt_s e312;
|
||||
public nodeElt_s e313;
|
||||
public nodeElt_s e314;
|
||||
public nodeElt_s e315;
|
||||
public nodeElt_s e316;
|
||||
public nodeElt_s e317;
|
||||
public nodeElt_s e318;
|
||||
public nodeElt_s e319;
|
||||
public nodeElt_s e320;
|
||||
public nodeElt_s e321;
|
||||
public nodeElt_s e322;
|
||||
public nodeElt_s e323;
|
||||
public nodeElt_s e324;
|
||||
public nodeElt_s e325;
|
||||
public nodeElt_s e326;
|
||||
public nodeElt_s e327;
|
||||
public nodeElt_s e328;
|
||||
public nodeElt_s e329;
|
||||
public nodeElt_s e330;
|
||||
public nodeElt_s e331;
|
||||
public nodeElt_s e332;
|
||||
public nodeElt_s e333;
|
||||
public nodeElt_s e334;
|
||||
public nodeElt_s e335;
|
||||
public nodeElt_s e336;
|
||||
public nodeElt_s e337;
|
||||
public nodeElt_s e338;
|
||||
public nodeElt_s e339;
|
||||
public nodeElt_s e340;
|
||||
public nodeElt_s e341;
|
||||
public nodeElt_s e342;
|
||||
public nodeElt_s e343;
|
||||
public nodeElt_s e344;
|
||||
public nodeElt_s e345;
|
||||
public nodeElt_s e346;
|
||||
public nodeElt_s e347;
|
||||
public nodeElt_s e348;
|
||||
public nodeElt_s e349;
|
||||
public nodeElt_s e350;
|
||||
public nodeElt_s e351;
|
||||
public nodeElt_s e352;
|
||||
public nodeElt_s e353;
|
||||
public nodeElt_s e354;
|
||||
public nodeElt_s e355;
|
||||
public nodeElt_s e356;
|
||||
public nodeElt_s e357;
|
||||
public nodeElt_s e358;
|
||||
public nodeElt_s e359;
|
||||
public nodeElt_s e360;
|
||||
public nodeElt_s e361;
|
||||
public nodeElt_s e362;
|
||||
public nodeElt_s e363;
|
||||
public nodeElt_s e364;
|
||||
public nodeElt_s e365;
|
||||
public nodeElt_s e366;
|
||||
public nodeElt_s e367;
|
||||
public nodeElt_s e368;
|
||||
public nodeElt_s e369;
|
||||
public nodeElt_s e370;
|
||||
public nodeElt_s e371;
|
||||
public nodeElt_s e372;
|
||||
public nodeElt_s e373;
|
||||
public nodeElt_s e374;
|
||||
public nodeElt_s e375;
|
||||
public nodeElt_s e376;
|
||||
public nodeElt_s e377;
|
||||
public nodeElt_s e378;
|
||||
public nodeElt_s e379;
|
||||
public nodeElt_s e380;
|
||||
public nodeElt_s e381;
|
||||
public nodeElt_s e382;
|
||||
public nodeElt_s e383;
|
||||
public nodeElt_s e384;
|
||||
public nodeElt_s e385;
|
||||
public nodeElt_s e386;
|
||||
public nodeElt_s e387;
|
||||
public nodeElt_s e388;
|
||||
public nodeElt_s e389;
|
||||
public nodeElt_s e390;
|
||||
public nodeElt_s e391;
|
||||
public nodeElt_s e392;
|
||||
public nodeElt_s e393;
|
||||
public nodeElt_s e394;
|
||||
public nodeElt_s e395;
|
||||
public nodeElt_s e396;
|
||||
public nodeElt_s e397;
|
||||
public nodeElt_s e398;
|
||||
public nodeElt_s e399;
|
||||
public nodeElt_s e400;
|
||||
public nodeElt_s e401;
|
||||
public nodeElt_s e402;
|
||||
public nodeElt_s e403;
|
||||
public nodeElt_s e404;
|
||||
public nodeElt_s e405;
|
||||
public nodeElt_s e406;
|
||||
public nodeElt_s e407;
|
||||
public nodeElt_s e408;
|
||||
public nodeElt_s e409;
|
||||
public nodeElt_s e410;
|
||||
public nodeElt_s e411;
|
||||
public nodeElt_s e412;
|
||||
public nodeElt_s e413;
|
||||
public nodeElt_s e414;
|
||||
public nodeElt_s e415;
|
||||
public nodeElt_s e416;
|
||||
public nodeElt_s e417;
|
||||
public nodeElt_s e418;
|
||||
public nodeElt_s e419;
|
||||
public nodeElt_s e420;
|
||||
public nodeElt_s e421;
|
||||
public nodeElt_s e422;
|
||||
public nodeElt_s e423;
|
||||
public nodeElt_s e424;
|
||||
public nodeElt_s e425;
|
||||
public nodeElt_s e426;
|
||||
public nodeElt_s e427;
|
||||
public nodeElt_s e428;
|
||||
public nodeElt_s e429;
|
||||
public nodeElt_s e430;
|
||||
public nodeElt_s e431;
|
||||
public nodeElt_s e432;
|
||||
public nodeElt_s e433;
|
||||
public nodeElt_s e434;
|
||||
public nodeElt_s e435;
|
||||
public nodeElt_s e436;
|
||||
public nodeElt_s e437;
|
||||
public nodeElt_s e438;
|
||||
public nodeElt_s e439;
|
||||
public nodeElt_s e440;
|
||||
public nodeElt_s e441;
|
||||
public nodeElt_s e442;
|
||||
public nodeElt_s e443;
|
||||
public nodeElt_s e444;
|
||||
public nodeElt_s e445;
|
||||
public nodeElt_s e446;
|
||||
public nodeElt_s e447;
|
||||
public nodeElt_s e448;
|
||||
public nodeElt_s e449;
|
||||
public nodeElt_s e450;
|
||||
public nodeElt_s e451;
|
||||
public nodeElt_s e452;
|
||||
public nodeElt_s e453;
|
||||
public nodeElt_s e454;
|
||||
public nodeElt_s e455;
|
||||
public nodeElt_s e456;
|
||||
public nodeElt_s e457;
|
||||
public nodeElt_s e458;
|
||||
public nodeElt_s e459;
|
||||
public nodeElt_s e460;
|
||||
public nodeElt_s e461;
|
||||
public nodeElt_s e462;
|
||||
public nodeElt_s e463;
|
||||
public nodeElt_s e464;
|
||||
public nodeElt_s e465;
|
||||
public nodeElt_s e466;
|
||||
public nodeElt_s e467;
|
||||
public nodeElt_s e468;
|
||||
public nodeElt_s e469;
|
||||
public nodeElt_s e470;
|
||||
public nodeElt_s e471;
|
||||
public nodeElt_s e472;
|
||||
public nodeElt_s e473;
|
||||
public nodeElt_s e474;
|
||||
public nodeElt_s e475;
|
||||
public nodeElt_s e476;
|
||||
public nodeElt_s e477;
|
||||
public nodeElt_s e478;
|
||||
public nodeElt_s e479;
|
||||
public nodeElt_s e480;
|
||||
public nodeElt_s e481;
|
||||
public nodeElt_s e482;
|
||||
public nodeElt_s e483;
|
||||
public nodeElt_s e484;
|
||||
public nodeElt_s e485;
|
||||
public nodeElt_s e486;
|
||||
public nodeElt_s e487;
|
||||
public nodeElt_s e488;
|
||||
public nodeElt_s e489;
|
||||
public nodeElt_s e490;
|
||||
public nodeElt_s e491;
|
||||
public nodeElt_s e492;
|
||||
public nodeElt_s e493;
|
||||
public nodeElt_s e494;
|
||||
public nodeElt_s e495;
|
||||
public nodeElt_s e496;
|
||||
public nodeElt_s e497;
|
||||
public nodeElt_s e498;
|
||||
public nodeElt_s e499;
|
||||
public nodeElt_s e500;
|
||||
public nodeElt_s e501;
|
||||
public nodeElt_s e502;
|
||||
public nodeElt_s e503;
|
||||
public nodeElt_s e504;
|
||||
public nodeElt_s e505;
|
||||
public nodeElt_s e506;
|
||||
public nodeElt_s e507;
|
||||
public nodeElt_s e508;
|
||||
public nodeElt_s e509;
|
||||
public nodeElt_s e510;
|
||||
public nodeElt_s e511;
|
||||
|
||||
public ref nodeElt_s this[int index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + (uint)index);
|
||||
}
|
||||
|
||||
public ref nodeElt_s this[uint index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + index);
|
||||
}
|
||||
|
||||
public ref nodeElt_s this[nuint index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + (uint)index);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
public static implicit operator nodeElt_s*(in _huffNodeTbl_e__FixedBuffer t)
|
||||
{
|
||||
Ldarg_0();
|
||||
Ldflda(new FieldRef(typeof(_huffNodeTbl_e__FixedBuffer), nameof(e0)));
|
||||
return IL.ReturnPointer<nodeElt_s>();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
public static nodeElt_s* operator +(in _huffNodeTbl_e__FixedBuffer t, uint index)
|
||||
{
|
||||
Ldarg_0();
|
||||
Ldflda(new FieldRef(typeof(_huffNodeTbl_e__FixedBuffer), nameof(e0)));
|
||||
Ldarg_1();
|
||||
Conv_I();
|
||||
Sizeof<nodeElt_s>();
|
||||
Conv_I();
|
||||
Mul();
|
||||
Add();
|
||||
return IL.ReturnPointer<nodeElt_s>();
|
||||
}
|
||||
}
|
||||
|
||||
public unsafe partial struct _rankPosition_e__FixedBuffer
|
||||
{
|
||||
public rankPos e0;
|
||||
public rankPos e1;
|
||||
public rankPos e2;
|
||||
public rankPos e3;
|
||||
public rankPos e4;
|
||||
public rankPos e5;
|
||||
public rankPos e6;
|
||||
public rankPos e7;
|
||||
public rankPos e8;
|
||||
public rankPos e9;
|
||||
public rankPos e10;
|
||||
public rankPos e11;
|
||||
public rankPos e12;
|
||||
public rankPos e13;
|
||||
public rankPos e14;
|
||||
public rankPos e15;
|
||||
public rankPos e16;
|
||||
public rankPos e17;
|
||||
public rankPos e18;
|
||||
public rankPos e19;
|
||||
public rankPos e20;
|
||||
public rankPos e21;
|
||||
public rankPos e22;
|
||||
public rankPos e23;
|
||||
public rankPos e24;
|
||||
public rankPos e25;
|
||||
public rankPos e26;
|
||||
public rankPos e27;
|
||||
public rankPos e28;
|
||||
public rankPos e29;
|
||||
public rankPos e30;
|
||||
public rankPos e31;
|
||||
|
||||
public ref rankPos this[int index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + (uint)index);
|
||||
}
|
||||
|
||||
public ref rankPos this[uint index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + index);
|
||||
}
|
||||
|
||||
public ref rankPos this[nuint index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + (uint)index);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
public static implicit operator rankPos*(in _rankPosition_e__FixedBuffer t)
|
||||
{
|
||||
Ldarg_0();
|
||||
Ldflda(new FieldRef(typeof(_rankPosition_e__FixedBuffer), nameof(e0)));
|
||||
return IL.ReturnPointer<rankPos>();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
public static rankPos* operator +(in _rankPosition_e__FixedBuffer t, uint index)
|
||||
{
|
||||
Ldarg_0();
|
||||
Ldflda(new FieldRef(typeof(_rankPosition_e__FixedBuffer), nameof(e0)));
|
||||
Ldarg_1();
|
||||
Conv_I();
|
||||
Sizeof<rankPos>();
|
||||
Conv_I();
|
||||
Mul();
|
||||
Add();
|
||||
return IL.ReturnPointer<rankPos>();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,322 @@
|
||||
using InlineIL;
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
using static InlineIL.IL.Emit;
|
||||
|
||||
namespace ZstdSharp.Unsafe
|
||||
{
|
||||
public unsafe partial struct HUF_compress_tables_t
|
||||
{
|
||||
public fixed uint count[256];
|
||||
|
||||
public _CTable_e__FixedBuffer CTable;
|
||||
|
||||
public _wksps_e__Union wksps;
|
||||
|
||||
public unsafe partial struct _CTable_e__FixedBuffer
|
||||
{
|
||||
public HUF_CElt_s e0;
|
||||
public HUF_CElt_s e1;
|
||||
public HUF_CElt_s e2;
|
||||
public HUF_CElt_s e3;
|
||||
public HUF_CElt_s e4;
|
||||
public HUF_CElt_s e5;
|
||||
public HUF_CElt_s e6;
|
||||
public HUF_CElt_s e7;
|
||||
public HUF_CElt_s e8;
|
||||
public HUF_CElt_s e9;
|
||||
public HUF_CElt_s e10;
|
||||
public HUF_CElt_s e11;
|
||||
public HUF_CElt_s e12;
|
||||
public HUF_CElt_s e13;
|
||||
public HUF_CElt_s e14;
|
||||
public HUF_CElt_s e15;
|
||||
public HUF_CElt_s e16;
|
||||
public HUF_CElt_s e17;
|
||||
public HUF_CElt_s e18;
|
||||
public HUF_CElt_s e19;
|
||||
public HUF_CElt_s e20;
|
||||
public HUF_CElt_s e21;
|
||||
public HUF_CElt_s e22;
|
||||
public HUF_CElt_s e23;
|
||||
public HUF_CElt_s e24;
|
||||
public HUF_CElt_s e25;
|
||||
public HUF_CElt_s e26;
|
||||
public HUF_CElt_s e27;
|
||||
public HUF_CElt_s e28;
|
||||
public HUF_CElt_s e29;
|
||||
public HUF_CElt_s e30;
|
||||
public HUF_CElt_s e31;
|
||||
public HUF_CElt_s e32;
|
||||
public HUF_CElt_s e33;
|
||||
public HUF_CElt_s e34;
|
||||
public HUF_CElt_s e35;
|
||||
public HUF_CElt_s e36;
|
||||
public HUF_CElt_s e37;
|
||||
public HUF_CElt_s e38;
|
||||
public HUF_CElt_s e39;
|
||||
public HUF_CElt_s e40;
|
||||
public HUF_CElt_s e41;
|
||||
public HUF_CElt_s e42;
|
||||
public HUF_CElt_s e43;
|
||||
public HUF_CElt_s e44;
|
||||
public HUF_CElt_s e45;
|
||||
public HUF_CElt_s e46;
|
||||
public HUF_CElt_s e47;
|
||||
public HUF_CElt_s e48;
|
||||
public HUF_CElt_s e49;
|
||||
public HUF_CElt_s e50;
|
||||
public HUF_CElt_s e51;
|
||||
public HUF_CElt_s e52;
|
||||
public HUF_CElt_s e53;
|
||||
public HUF_CElt_s e54;
|
||||
public HUF_CElt_s e55;
|
||||
public HUF_CElt_s e56;
|
||||
public HUF_CElt_s e57;
|
||||
public HUF_CElt_s e58;
|
||||
public HUF_CElt_s e59;
|
||||
public HUF_CElt_s e60;
|
||||
public HUF_CElt_s e61;
|
||||
public HUF_CElt_s e62;
|
||||
public HUF_CElt_s e63;
|
||||
public HUF_CElt_s e64;
|
||||
public HUF_CElt_s e65;
|
||||
public HUF_CElt_s e66;
|
||||
public HUF_CElt_s e67;
|
||||
public HUF_CElt_s e68;
|
||||
public HUF_CElt_s e69;
|
||||
public HUF_CElt_s e70;
|
||||
public HUF_CElt_s e71;
|
||||
public HUF_CElt_s e72;
|
||||
public HUF_CElt_s e73;
|
||||
public HUF_CElt_s e74;
|
||||
public HUF_CElt_s e75;
|
||||
public HUF_CElt_s e76;
|
||||
public HUF_CElt_s e77;
|
||||
public HUF_CElt_s e78;
|
||||
public HUF_CElt_s e79;
|
||||
public HUF_CElt_s e80;
|
||||
public HUF_CElt_s e81;
|
||||
public HUF_CElt_s e82;
|
||||
public HUF_CElt_s e83;
|
||||
public HUF_CElt_s e84;
|
||||
public HUF_CElt_s e85;
|
||||
public HUF_CElt_s e86;
|
||||
public HUF_CElt_s e87;
|
||||
public HUF_CElt_s e88;
|
||||
public HUF_CElt_s e89;
|
||||
public HUF_CElt_s e90;
|
||||
public HUF_CElt_s e91;
|
||||
public HUF_CElt_s e92;
|
||||
public HUF_CElt_s e93;
|
||||
public HUF_CElt_s e94;
|
||||
public HUF_CElt_s e95;
|
||||
public HUF_CElt_s e96;
|
||||
public HUF_CElt_s e97;
|
||||
public HUF_CElt_s e98;
|
||||
public HUF_CElt_s e99;
|
||||
public HUF_CElt_s e100;
|
||||
public HUF_CElt_s e101;
|
||||
public HUF_CElt_s e102;
|
||||
public HUF_CElt_s e103;
|
||||
public HUF_CElt_s e104;
|
||||
public HUF_CElt_s e105;
|
||||
public HUF_CElt_s e106;
|
||||
public HUF_CElt_s e107;
|
||||
public HUF_CElt_s e108;
|
||||
public HUF_CElt_s e109;
|
||||
public HUF_CElt_s e110;
|
||||
public HUF_CElt_s e111;
|
||||
public HUF_CElt_s e112;
|
||||
public HUF_CElt_s e113;
|
||||
public HUF_CElt_s e114;
|
||||
public HUF_CElt_s e115;
|
||||
public HUF_CElt_s e116;
|
||||
public HUF_CElt_s e117;
|
||||
public HUF_CElt_s e118;
|
||||
public HUF_CElt_s e119;
|
||||
public HUF_CElt_s e120;
|
||||
public HUF_CElt_s e121;
|
||||
public HUF_CElt_s e122;
|
||||
public HUF_CElt_s e123;
|
||||
public HUF_CElt_s e124;
|
||||
public HUF_CElt_s e125;
|
||||
public HUF_CElt_s e126;
|
||||
public HUF_CElt_s e127;
|
||||
public HUF_CElt_s e128;
|
||||
public HUF_CElt_s e129;
|
||||
public HUF_CElt_s e130;
|
||||
public HUF_CElt_s e131;
|
||||
public HUF_CElt_s e132;
|
||||
public HUF_CElt_s e133;
|
||||
public HUF_CElt_s e134;
|
||||
public HUF_CElt_s e135;
|
||||
public HUF_CElt_s e136;
|
||||
public HUF_CElt_s e137;
|
||||
public HUF_CElt_s e138;
|
||||
public HUF_CElt_s e139;
|
||||
public HUF_CElt_s e140;
|
||||
public HUF_CElt_s e141;
|
||||
public HUF_CElt_s e142;
|
||||
public HUF_CElt_s e143;
|
||||
public HUF_CElt_s e144;
|
||||
public HUF_CElt_s e145;
|
||||
public HUF_CElt_s e146;
|
||||
public HUF_CElt_s e147;
|
||||
public HUF_CElt_s e148;
|
||||
public HUF_CElt_s e149;
|
||||
public HUF_CElt_s e150;
|
||||
public HUF_CElt_s e151;
|
||||
public HUF_CElt_s e152;
|
||||
public HUF_CElt_s e153;
|
||||
public HUF_CElt_s e154;
|
||||
public HUF_CElt_s e155;
|
||||
public HUF_CElt_s e156;
|
||||
public HUF_CElt_s e157;
|
||||
public HUF_CElt_s e158;
|
||||
public HUF_CElt_s e159;
|
||||
public HUF_CElt_s e160;
|
||||
public HUF_CElt_s e161;
|
||||
public HUF_CElt_s e162;
|
||||
public HUF_CElt_s e163;
|
||||
public HUF_CElt_s e164;
|
||||
public HUF_CElt_s e165;
|
||||
public HUF_CElt_s e166;
|
||||
public HUF_CElt_s e167;
|
||||
public HUF_CElt_s e168;
|
||||
public HUF_CElt_s e169;
|
||||
public HUF_CElt_s e170;
|
||||
public HUF_CElt_s e171;
|
||||
public HUF_CElt_s e172;
|
||||
public HUF_CElt_s e173;
|
||||
public HUF_CElt_s e174;
|
||||
public HUF_CElt_s e175;
|
||||
public HUF_CElt_s e176;
|
||||
public HUF_CElt_s e177;
|
||||
public HUF_CElt_s e178;
|
||||
public HUF_CElt_s e179;
|
||||
public HUF_CElt_s e180;
|
||||
public HUF_CElt_s e181;
|
||||
public HUF_CElt_s e182;
|
||||
public HUF_CElt_s e183;
|
||||
public HUF_CElt_s e184;
|
||||
public HUF_CElt_s e185;
|
||||
public HUF_CElt_s e186;
|
||||
public HUF_CElt_s e187;
|
||||
public HUF_CElt_s e188;
|
||||
public HUF_CElt_s e189;
|
||||
public HUF_CElt_s e190;
|
||||
public HUF_CElt_s e191;
|
||||
public HUF_CElt_s e192;
|
||||
public HUF_CElt_s e193;
|
||||
public HUF_CElt_s e194;
|
||||
public HUF_CElt_s e195;
|
||||
public HUF_CElt_s e196;
|
||||
public HUF_CElt_s e197;
|
||||
public HUF_CElt_s e198;
|
||||
public HUF_CElt_s e199;
|
||||
public HUF_CElt_s e200;
|
||||
public HUF_CElt_s e201;
|
||||
public HUF_CElt_s e202;
|
||||
public HUF_CElt_s e203;
|
||||
public HUF_CElt_s e204;
|
||||
public HUF_CElt_s e205;
|
||||
public HUF_CElt_s e206;
|
||||
public HUF_CElt_s e207;
|
||||
public HUF_CElt_s e208;
|
||||
public HUF_CElt_s e209;
|
||||
public HUF_CElt_s e210;
|
||||
public HUF_CElt_s e211;
|
||||
public HUF_CElt_s e212;
|
||||
public HUF_CElt_s e213;
|
||||
public HUF_CElt_s e214;
|
||||
public HUF_CElt_s e215;
|
||||
public HUF_CElt_s e216;
|
||||
public HUF_CElt_s e217;
|
||||
public HUF_CElt_s e218;
|
||||
public HUF_CElt_s e219;
|
||||
public HUF_CElt_s e220;
|
||||
public HUF_CElt_s e221;
|
||||
public HUF_CElt_s e222;
|
||||
public HUF_CElt_s e223;
|
||||
public HUF_CElt_s e224;
|
||||
public HUF_CElt_s e225;
|
||||
public HUF_CElt_s e226;
|
||||
public HUF_CElt_s e227;
|
||||
public HUF_CElt_s e228;
|
||||
public HUF_CElt_s e229;
|
||||
public HUF_CElt_s e230;
|
||||
public HUF_CElt_s e231;
|
||||
public HUF_CElt_s e232;
|
||||
public HUF_CElt_s e233;
|
||||
public HUF_CElt_s e234;
|
||||
public HUF_CElt_s e235;
|
||||
public HUF_CElt_s e236;
|
||||
public HUF_CElt_s e237;
|
||||
public HUF_CElt_s e238;
|
||||
public HUF_CElt_s e239;
|
||||
public HUF_CElt_s e240;
|
||||
public HUF_CElt_s e241;
|
||||
public HUF_CElt_s e242;
|
||||
public HUF_CElt_s e243;
|
||||
public HUF_CElt_s e244;
|
||||
public HUF_CElt_s e245;
|
||||
public HUF_CElt_s e246;
|
||||
public HUF_CElt_s e247;
|
||||
public HUF_CElt_s e248;
|
||||
public HUF_CElt_s e249;
|
||||
public HUF_CElt_s e250;
|
||||
public HUF_CElt_s e251;
|
||||
public HUF_CElt_s e252;
|
||||
public HUF_CElt_s e253;
|
||||
public HUF_CElt_s e254;
|
||||
public HUF_CElt_s e255;
|
||||
|
||||
public ref HUF_CElt_s this[int index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + (uint)index);
|
||||
}
|
||||
|
||||
public ref HUF_CElt_s this[uint index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + index);
|
||||
}
|
||||
|
||||
public ref HUF_CElt_s this[nuint index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
get => ref *(this + (uint)index);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
public static implicit operator HUF_CElt_s*(in _CTable_e__FixedBuffer t)
|
||||
{
|
||||
Ldarg_0();
|
||||
Ldflda(new FieldRef(typeof(_CTable_e__FixedBuffer), nameof(e0)));
|
||||
return IL.ReturnPointer<HUF_CElt_s>();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
[InlineMethod.Inline]
|
||||
public static HUF_CElt_s* operator +(in _CTable_e__FixedBuffer t, uint index)
|
||||
{
|
||||
Ldarg_0();
|
||||
Ldflda(new FieldRef(typeof(_CTable_e__FixedBuffer), nameof(e0)));
|
||||
Ldarg_1();
|
||||
Conv_I();
|
||||
Sizeof<HUF_CElt_s>();
|
||||
Conv_I();
|
||||
Mul();
|
||||
Add();
|
||||
return IL.ReturnPointer<HUF_CElt_s>();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user