Compare commits

...

7 Commits

Author SHA1 Message Date
Adam Hathcock
b010cce1ca Merge branch 'master' into adam/zstd 2025-10-13 17:03:58 +01:00
Adam Hathcock
ee2cbc8051 fmt 2025-10-13 17:02:41 +01:00
Adam Hathcock
906baf18d2 fix namespaces 2025-10-13 17:02:21 +01:00
Adam Hathcock
0a7ffd003b ran formatting 2025-10-13 16:42:22 +01:00
Adam Hathcock
b545973c55 stuff compiles now 2025-10-13 16:41:58 +01:00
Adam Hathcock
999af800af add more non legacy stuff 2025-10-13 16:21:05 +01:00
Adam Hathcock
5b5336f456 add first pass of zstdsharp into library. Full fat framework doesn't compile. Marked lib as not CLS compliant again 2025-10-13 13:02:48 +01:00
254 changed files with 57676 additions and 2056 deletions

View File

@@ -7,8 +7,8 @@
<PackageVersion Include="Microsoft.NET.Test.Sdk" Version="17.13.0" />
<PackageVersion Include="Mono.Posix.NETStandard" Version="1.0.0" />
<PackageVersion Include="SimpleExec" Version="12.0.0" />
<PackageVersion Include="System.Buffers" Version="4.6.0" />
<PackageVersion Include="System.Memory" Version="4.6.0" />
<PackageVersion Include="System.Buffers" Version="4.6.1" />
<PackageVersion Include="System.Memory" Version="4.6.3" />
<PackageVersion Include="System.Text.Encoding.CodePages" Version="8.0.0" />
<PackageVersion Include="xunit" Version="2.9.3" />
<PackageVersion Include="xunit.runner.visualstudio" Version="3.1.5" />

View File

@@ -1,7 +1,7 @@
using System;
using System.Runtime.CompilerServices;
[assembly: CLSCompliant(true)]
[assembly: CLSCompliant(false)]
[assembly: InternalsVisibleTo(
"SharpCompress.Test,PublicKey=0024000004800000940000000602000000240000525341310004000001000100158bebf1433f76dffc356733c138babea7a47536c65ed8009b16372c6f4edbb20554db74a62687f56b97c20a6ce8c4b123280279e33c894e7b3aa93ab3c573656fde4db576cfe07dba09619ead26375b25d2c4a8e43f7be257d712b0dd2eb546f67adb09281338618a58ac834fc038dd7e2740a7ab3591826252e4f4516306dc"
)]

View File

@@ -7,54 +7,53 @@ using System.Threading.Tasks;
using SharpCompress.Common.GZip;
using SharpCompress.Common.Tar;
namespace SharpCompress.Common.Arc
namespace SharpCompress.Common.Arc;
public class ArcEntry : Entry
{
public class ArcEntry : Entry
private readonly ArcFilePart? _filePart;
internal ArcEntry(ArcFilePart? filePart)
{
private readonly ArcFilePart? _filePart;
internal ArcEntry(ArcFilePart? filePart)
{
_filePart = filePart;
}
public override long Crc
{
get
{
if (_filePart == null)
{
return 0;
}
return _filePart.Header.Crc16;
}
}
public override string? Key => _filePart?.Header.Name;
public override string? LinkTarget => null;
public override long CompressedSize => _filePart?.Header.CompressedSize ?? 0;
public override CompressionType CompressionType =>
_filePart?.Header.CompressionMethod ?? CompressionType.Unknown;
public override long Size => throw new NotImplementedException();
public override DateTime? LastModifiedTime => null;
public override DateTime? CreatedTime => null;
public override DateTime? LastAccessedTime => null;
public override DateTime? ArchivedTime => null;
public override bool IsEncrypted => false;
public override bool IsDirectory => false;
public override bool IsSplitAfter => false;
internal override IEnumerable<FilePart> Parts => _filePart.Empty();
_filePart = filePart;
}
public override long Crc
{
get
{
if (_filePart == null)
{
return 0;
}
return _filePart.Header.Crc16;
}
}
public override string? Key => _filePart?.Header.Name;
public override string? LinkTarget => null;
public override long CompressedSize => _filePart?.Header.CompressedSize ?? 0;
public override CompressionType CompressionType =>
_filePart?.Header.CompressionMethod ?? CompressionType.Unknown;
public override long Size => throw new NotImplementedException();
public override DateTime? LastModifiedTime => null;
public override DateTime? CreatedTime => null;
public override DateTime? LastAccessedTime => null;
public override DateTime? ArchivedTime => null;
public override bool IsEncrypted => false;
public override bool IsDirectory => false;
public override bool IsSplitAfter => false;
internal override IEnumerable<FilePart> Parts => _filePart.Empty();
}

View File

@@ -3,74 +3,73 @@ using System.IO;
using System.Linq;
using System.Text;
namespace SharpCompress.Common.Arc
namespace SharpCompress.Common.Arc;
public class ArcEntryHeader
{
public class ArcEntryHeader
public ArchiveEncoding ArchiveEncoding { get; }
public CompressionType CompressionMethod { get; private set; }
public string? Name { get; private set; }
public long CompressedSize { get; private set; }
public DateTime DateTime { get; private set; }
public int Crc16 { get; private set; }
public long OriginalSize { get; private set; }
public long DataStartPosition { get; private set; }
public ArcEntryHeader(ArchiveEncoding archiveEncoding)
{
public ArchiveEncoding ArchiveEncoding { get; }
public CompressionType CompressionMethod { get; private set; }
public string? Name { get; private set; }
public long CompressedSize { get; private set; }
public DateTime DateTime { get; private set; }
public int Crc16 { get; private set; }
public long OriginalSize { get; private set; }
public long DataStartPosition { get; private set; }
this.ArchiveEncoding = archiveEncoding;
}
public ArcEntryHeader(ArchiveEncoding archiveEncoding)
public ArcEntryHeader? ReadHeader(Stream stream)
{
byte[] headerBytes = new byte[29];
if (stream.Read(headerBytes, 0, headerBytes.Length) != headerBytes.Length)
{
this.ArchiveEncoding = archiveEncoding;
return null;
}
DataStartPosition = stream.Position;
return LoadFrom(headerBytes);
}
public ArcEntryHeader? ReadHeader(Stream stream)
public ArcEntryHeader LoadFrom(byte[] headerBytes)
{
CompressionMethod = GetCompressionType(headerBytes[1]);
// Read name
int nameEnd = Array.IndexOf(headerBytes, (byte)0, 1); // Find null terminator
Name = Encoding.UTF8.GetString(headerBytes, 2, nameEnd > 0 ? nameEnd - 2 : 12);
int offset = 15;
CompressedSize = BitConverter.ToUInt32(headerBytes, offset);
offset += 4;
uint rawDateTime = BitConverter.ToUInt32(headerBytes, offset);
DateTime = ConvertToDateTime(rawDateTime);
offset += 4;
Crc16 = BitConverter.ToUInt16(headerBytes, offset);
offset += 2;
OriginalSize = BitConverter.ToUInt32(headerBytes, offset);
return this;
}
private CompressionType GetCompressionType(byte value)
{
return value switch
{
byte[] headerBytes = new byte[29];
if (stream.Read(headerBytes, 0, headerBytes.Length) != headerBytes.Length)
{
return null;
}
DataStartPosition = stream.Position;
return LoadFrom(headerBytes);
}
1 or 2 => CompressionType.None,
3 => CompressionType.RLE90,
4 => CompressionType.Squeezed,
5 or 6 or 7 or 8 => CompressionType.Crunched,
9 => CompressionType.Squashed,
10 => CompressionType.Crushed,
11 => CompressionType.Distilled,
_ => CompressionType.Unknown,
};
}
public ArcEntryHeader LoadFrom(byte[] headerBytes)
{
CompressionMethod = GetCompressionType(headerBytes[1]);
// Read name
int nameEnd = Array.IndexOf(headerBytes, (byte)0, 1); // Find null terminator
Name = Encoding.UTF8.GetString(headerBytes, 2, nameEnd > 0 ? nameEnd - 2 : 12);
int offset = 15;
CompressedSize = BitConverter.ToUInt32(headerBytes, offset);
offset += 4;
uint rawDateTime = BitConverter.ToUInt32(headerBytes, offset);
DateTime = ConvertToDateTime(rawDateTime);
offset += 4;
Crc16 = BitConverter.ToUInt16(headerBytes, offset);
offset += 2;
OriginalSize = BitConverter.ToUInt32(headerBytes, offset);
return this;
}
private CompressionType GetCompressionType(byte value)
{
return value switch
{
1 or 2 => CompressionType.None,
3 => CompressionType.RLE90,
4 => CompressionType.Squeezed,
5 or 6 or 7 or 8 => CompressionType.Crunched,
9 => CompressionType.Squashed,
10 => CompressionType.Crushed,
11 => CompressionType.Distilled,
_ => CompressionType.Unknown,
};
}
public static DateTime ConvertToDateTime(long rawDateTime)
{
// Convert Unix timestamp to DateTime (UTC)
return DateTimeOffset.FromUnixTimeSeconds(rawDateTime).UtcDateTime;
}
public static DateTime ConvertToDateTime(long rawDateTime)
{
// Convert Unix timestamp to DateTime (UTC)
return DateTimeOffset.FromUnixTimeSeconds(rawDateTime).UtcDateTime;
}
}

View File

@@ -13,63 +13,55 @@ using SharpCompress.Compressors.RLE90;
using SharpCompress.Compressors.Squeezed;
using SharpCompress.IO;
namespace SharpCompress.Common.Arc
namespace SharpCompress.Common.Arc;
public class ArcFilePart : FilePart
{
public class ArcFilePart : FilePart
private readonly Stream? _stream;
internal ArcFilePart(ArcEntryHeader localArcHeader, Stream? seekableStream)
: base(localArcHeader.ArchiveEncoding)
{
private readonly Stream? _stream;
internal ArcFilePart(ArcEntryHeader localArcHeader, Stream? seekableStream)
: base(localArcHeader.ArchiveEncoding)
{
_stream = seekableStream;
Header = localArcHeader;
}
internal ArcEntryHeader Header { get; set; }
internal override string? FilePartName => Header.Name;
internal override Stream GetCompressedStream()
{
if (_stream != null)
{
Stream compressedStream;
switch (Header.CompressionMethod)
{
case CompressionType.None:
compressedStream = new ReadOnlySubStream(
_stream,
Header.DataStartPosition,
Header.CompressedSize
);
break;
case CompressionType.RLE90:
compressedStream = new RunLength90Stream(
_stream,
(int)Header.CompressedSize
);
break;
case CompressionType.Squeezed:
compressedStream = new SqueezeStream(_stream, (int)Header.CompressedSize);
break;
case CompressionType.Crunched:
compressedStream = new ArcLzwStream(
_stream,
(int)Header.CompressedSize,
true
);
break;
default:
throw new NotSupportedException(
"CompressionMethod: " + Header.CompressionMethod
);
}
return compressedStream;
}
return _stream.NotNull();
}
internal override Stream? GetRawStream() => _stream;
_stream = seekableStream;
Header = localArcHeader;
}
internal ArcEntryHeader Header { get; set; }
internal override string? FilePartName => Header.Name;
internal override Stream GetCompressedStream()
{
if (_stream != null)
{
Stream compressedStream;
switch (Header.CompressionMethod)
{
case CompressionType.None:
compressedStream = new ReadOnlySubStream(
_stream,
Header.DataStartPosition,
Header.CompressedSize
);
break;
case CompressionType.RLE90:
compressedStream = new RunLength90Stream(_stream, (int)Header.CompressedSize);
break;
case CompressionType.Squeezed:
compressedStream = new SqueezeStream(_stream, (int)Header.CompressedSize);
break;
case CompressionType.Crunched:
compressedStream = new ArcLzwStream(_stream, (int)Header.CompressedSize, true);
break;
default:
throw new NotSupportedException(
"CompressionMethod: " + Header.CompressionMethod
);
}
return compressedStream;
}
return _stream.NotNull();
}
internal override Stream? GetRawStream() => _stream;
}

View File

@@ -6,11 +6,10 @@ using System.Text;
using System.Threading.Tasks;
using SharpCompress.Readers;
namespace SharpCompress.Common.Arc
namespace SharpCompress.Common.Arc;
public class ArcVolume : Volume
{
public class ArcVolume : Volume
{
public ArcVolume(Stream stream, ReaderOptions readerOptions, int index = 0)
: base(stream, readerOptions, index) { }
}
public ArcVolume(Stream stream, ReaderOptions readerOptions, int index = 0)
: base(stream, readerOptions, index) { }
}

View File

@@ -13,8 +13,8 @@ using SharpCompress.Compressors.PPMd;
using SharpCompress.Compressors.Reduce;
using SharpCompress.Compressors.Shrink;
using SharpCompress.Compressors.Xz;
using SharpCompress.Compressors.ZStandard;
using SharpCompress.IO;
using ZstdSharp;
namespace SharpCompress.Common.Zip;

View File

@@ -1,36 +1,35 @@
using System.IO;
namespace SharpCompress.Compressors.Filters
namespace SharpCompress.Compressors.Filters;
internal class DeltaFilter : Filter
{
internal class DeltaFilter : Filter
private const int DISTANCE_MIN = 1;
private const int DISTANCE_MAX = 256;
private const int DISTANCE_MASK = DISTANCE_MAX - 1;
private int _distance;
private byte[] _history;
private int _position;
public DeltaFilter(bool isEncoder, Stream baseStream, byte[] info)
: base(isEncoder, baseStream, 1)
{
private const int DISTANCE_MIN = 1;
private const int DISTANCE_MAX = 256;
private const int DISTANCE_MASK = DISTANCE_MAX - 1;
_distance = info[0];
_history = new byte[DISTANCE_MAX];
_position = 0;
}
private int _distance;
private byte[] _history;
private int _position;
protected override int Transform(byte[] buffer, int offset, int count)
{
var end = offset + count;
public DeltaFilter(bool isEncoder, Stream baseStream, byte[] info)
: base(isEncoder, baseStream, 1)
for (var i = offset; i < end; i++)
{
_distance = info[0];
_history = new byte[DISTANCE_MAX];
_position = 0;
buffer[i] += _history[(_distance + _position--) & DISTANCE_MASK];
_history[_position & DISTANCE_MASK] = buffer[i];
}
protected override int Transform(byte[] buffer, int offset, int count)
{
var end = offset + count;
for (var i = offset; i < end; i++)
{
buffer[i] += _history[(_distance + _position--) & DISTANCE_MASK];
_history[_position & DISTANCE_MASK] = buffer[i];
}
return count;
}
return count;
}
}

View File

@@ -7,7 +7,7 @@ using SharpCompress.Compressors.Deflate;
using SharpCompress.Compressors.Filters;
using SharpCompress.Compressors.LZMA.Utilites;
using SharpCompress.Compressors.PPMd;
using ZstdSharp;
using SharpCompress.Compressors.ZStandard;
namespace SharpCompress.Compressors.LZMA;

View File

@@ -1,65 +1,64 @@
namespace SharpCompress.Compressors.Lzw
namespace SharpCompress.Compressors.Lzw;
/// <summary>
/// This class contains constants used for LZW
/// </summary>
[System.Diagnostics.CodeAnalysis.SuppressMessage(
"Naming",
"CA1707:Identifiers should not contain underscores",
Justification = "kept for backwards compatibility"
)]
public sealed class LzwConstants
{
/// <summary>
/// This class contains constants used for LZW
/// Magic number found at start of LZW header: 0x1f 0x9d
/// </summary>
[System.Diagnostics.CodeAnalysis.SuppressMessage(
"Naming",
"CA1707:Identifiers should not contain underscores",
Justification = "kept for backwards compatibility"
)]
public sealed class LzwConstants
{
/// <summary>
/// Magic number found at start of LZW header: 0x1f 0x9d
/// </summary>
public const int MAGIC = 0x1f9d;
public const int MAGIC = 0x1f9d;
/// <summary>
/// Maximum number of bits per code
/// </summary>
public const int MAX_BITS = 16;
/// <summary>
/// Maximum number of bits per code
/// </summary>
public const int MAX_BITS = 16;
/* 3rd header byte:
* bit 0..4 Number of compression bits
* bit 5 Extended header
* bit 6 Free
* bit 7 Block mode
*/
/* 3rd header byte:
* bit 0..4 Number of compression bits
* bit 5 Extended header
* bit 6 Free
* bit 7 Block mode
*/
/// <summary>
/// Mask for 'number of compression bits'
/// </summary>
public const int BIT_MASK = 0x1f;
/// <summary>
/// Mask for 'number of compression bits'
/// </summary>
public const int BIT_MASK = 0x1f;
/// <summary>
/// Indicates the presence of a fourth header byte
/// </summary>
public const int EXTENDED_MASK = 0x20;
/// <summary>
/// Indicates the presence of a fourth header byte
/// </summary>
public const int EXTENDED_MASK = 0x20;
//public const int FREE_MASK = 0x40;
//public const int FREE_MASK = 0x40;
/// <summary>
/// Reserved bits
/// </summary>
public const int RESERVED_MASK = 0x60;
/// <summary>
/// Reserved bits
/// </summary>
public const int RESERVED_MASK = 0x60;
/// <summary>
/// Block compression: if table is full and compression rate is dropping,
/// clear the dictionary.
/// </summary>
public const int BLOCK_MODE_MASK = 0x80;
/// <summary>
/// Block compression: if table is full and compression rate is dropping,
/// clear the dictionary.
/// </summary>
public const int BLOCK_MODE_MASK = 0x80;
/// <summary>
/// LZW file header size (in bytes)
/// </summary>
public const int HDR_SIZE = 3;
/// <summary>
/// LZW file header size (in bytes)
/// </summary>
public const int HDR_SIZE = 3;
/// <summary>
/// Initial number of bits per code
/// </summary>
public const int INIT_BITS = 9;
/// <summary>
/// Initial number of bits per code
/// </summary>
public const int INIT_BITS = 9;
private LzwConstants() { }
}
private LzwConstants() { }
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,52 +1,51 @@
using System.Collections.Generic;
using System.Linq;
namespace SharpCompress.Compressors.RLE90
namespace SharpCompress.Compressors.RLE90;
public static class RLE
{
public static class RLE
private const byte DLE = 0x90;
/// <summary>
/// Unpacks an RLE compressed buffer.
/// Format: <char> DLE <count>, where count == 0 -> DLE
/// </summary>
/// <param name="compressedBuffer">The compressed buffer to unpack.</param>
/// <returns>A list of unpacked bytes.</returns>
public static List<byte> UnpackRLE(byte[] compressedBuffer)
{
private const byte DLE = 0x90;
var result = new List<byte>(compressedBuffer.Length * 2); // Optimized initial capacity
var countMode = false;
byte last = 0;
/// <summary>
/// Unpacks an RLE compressed buffer.
/// Format: <char> DLE <count>, where count == 0 -> DLE
/// </summary>
/// <param name="compressedBuffer">The compressed buffer to unpack.</param>
/// <returns>A list of unpacked bytes.</returns>
public static List<byte> UnpackRLE(byte[] compressedBuffer)
foreach (var c in compressedBuffer)
{
var result = new List<byte>(compressedBuffer.Length * 2); // Optimized initial capacity
var countMode = false;
byte last = 0;
foreach (var c in compressedBuffer)
if (!countMode)
{
if (!countMode)
if (c == DLE)
{
if (c == DLE)
{
countMode = true;
}
else
{
result.Add(c);
last = c;
}
countMode = true;
}
else
{
countMode = false;
if (c == 0)
{
result.Add(DLE);
}
else
{
result.AddRange(Enumerable.Repeat(last, c - 1));
}
result.Add(c);
last = c;
}
}
else
{
countMode = false;
if (c == 0)
{
result.Add(DLE);
}
else
{
result.AddRange(Enumerable.Repeat(last, c - 1));
}
}
return result;
}
return result;
}
}

View File

@@ -6,91 +6,90 @@ using System.Text;
using System.Threading.Tasks;
using SharpCompress.IO;
namespace SharpCompress.Compressors.RLE90
namespace SharpCompress.Compressors.RLE90;
public class RunLength90Stream : Stream, IStreamStack
{
public class RunLength90Stream : Stream, IStreamStack
#if DEBUG_STREAMS
long IStreamStack.InstanceId { get; set; }
#endif
int IStreamStack.DefaultBufferSize { get; set; }
Stream IStreamStack.BaseStream() => _stream;
int IStreamStack.BufferSize
{
get => 0;
set { }
}
int IStreamStack.BufferPosition
{
get => 0;
set { }
}
void IStreamStack.SetPosition(long position) { }
private readonly Stream _stream;
private const byte DLE = 0x90;
private int _compressedSize;
private bool _processed = false;
public RunLength90Stream(Stream stream, int compressedSize)
{
_stream = stream;
_compressedSize = compressedSize;
#if DEBUG_STREAMS
this.DebugConstruct(typeof(RunLength90Stream));
#endif
}
protected override void Dispose(bool disposing)
{
#if DEBUG_STREAMS
long IStreamStack.InstanceId { get; set; }
this.DebugDispose(typeof(RunLength90Stream));
#endif
int IStreamStack.DefaultBufferSize { get; set; }
Stream IStreamStack.BaseStream() => _stream;
int IStreamStack.BufferSize
{
get => 0;
set { }
}
int IStreamStack.BufferPosition
{
get => 0;
set { }
}
void IStreamStack.SetPosition(long position) { }
private readonly Stream _stream;
private const byte DLE = 0x90;
private int _compressedSize;
private bool _processed = false;
public RunLength90Stream(Stream stream, int compressedSize)
{
_stream = stream;
_compressedSize = compressedSize;
#if DEBUG_STREAMS
this.DebugConstruct(typeof(RunLength90Stream));
#endif
}
protected override void Dispose(bool disposing)
{
#if DEBUG_STREAMS
this.DebugDispose(typeof(RunLength90Stream));
#endif
base.Dispose(disposing);
}
public override bool CanRead => true;
public override bool CanSeek => false;
public override bool CanWrite => false;
public override long Length => throw new NotImplementedException();
public override long Position
{
get => _stream.Position;
set => throw new NotImplementedException();
}
public override void Flush() => throw new NotImplementedException();
public override int Read(byte[] buffer, int offset, int count)
{
if (_processed)
{
return 0;
}
_processed = true;
using var binaryReader = new BinaryReader(_stream);
byte[] compressedBuffer = binaryReader.ReadBytes(_compressedSize);
var unpacked = RLE.UnpackRLE(compressedBuffer);
unpacked.CopyTo(buffer);
return unpacked.Count;
}
public override long Seek(long offset, SeekOrigin origin) =>
throw new NotImplementedException();
public override void SetLength(long value) => throw new NotImplementedException();
public override void Write(byte[] buffer, int offset, int count) =>
throw new NotImplementedException();
base.Dispose(disposing);
}
public override bool CanRead => true;
public override bool CanSeek => false;
public override bool CanWrite => false;
public override long Length => throw new NotImplementedException();
public override long Position
{
get => _stream.Position;
set => throw new NotImplementedException();
}
public override void Flush() => throw new NotImplementedException();
public override int Read(byte[] buffer, int offset, int count)
{
if (_processed)
{
return 0;
}
_processed = true;
using var binaryReader = new BinaryReader(_stream);
byte[] compressedBuffer = binaryReader.ReadBytes(_compressedSize);
var unpacked = RLE.UnpackRLE(compressedBuffer);
unpacked.CopyTo(buffer);
return unpacked.Count;
}
public override long Seek(long offset, SeekOrigin origin) =>
throw new NotImplementedException();
public override void SetLength(long value) => throw new NotImplementedException();
public override void Write(byte[] buffer, int offset, int count) =>
throw new NotImplementedException();
}

View File

@@ -1,79 +1,78 @@
namespace SharpCompress.Compressors.Shrink
namespace SharpCompress.Compressors.Shrink;
internal class BitStream
{
internal class BitStream
private byte[] _src;
private int _srcLen;
private int _byteIdx;
private int _bitIdx;
private int _bitsLeft;
private ulong _bitBuffer;
private static uint[] _maskBits = new uint[17]
{
private byte[] _src;
private int _srcLen;
private int _byteIdx;
private int _bitIdx;
private int _bitsLeft;
private ulong _bitBuffer;
private static uint[] _maskBits = new uint[17]
{
0U,
1U,
3U,
7U,
15U,
31U,
63U,
(uint)sbyte.MaxValue,
(uint)byte.MaxValue,
511U,
1023U,
2047U,
4095U,
8191U,
16383U,
(uint)short.MaxValue,
(uint)ushort.MaxValue,
};
0U,
1U,
3U,
7U,
15U,
31U,
63U,
(uint)sbyte.MaxValue,
(uint)byte.MaxValue,
511U,
1023U,
2047U,
4095U,
8191U,
16383U,
(uint)short.MaxValue,
(uint)ushort.MaxValue,
};
public BitStream(byte[] src, int srcLen)
public BitStream(byte[] src, int srcLen)
{
_src = src;
_srcLen = srcLen;
_byteIdx = 0;
_bitIdx = 0;
}
public int BytesRead => (_byteIdx << 3) + _bitIdx;
private int NextByte()
{
if (_byteIdx >= _srcLen)
{
_src = src;
_srcLen = srcLen;
_byteIdx = 0;
_bitIdx = 0;
return 0;
}
public int BytesRead => (_byteIdx << 3) + _bitIdx;
return _src[_byteIdx++];
}
private int NextByte()
public int NextBits(int nbits)
{
var result = 0;
if (nbits > _bitsLeft)
{
if (_byteIdx >= _srcLen)
int num;
while (_bitsLeft <= 24 && (num = NextByte()) != 1234)
{
return 0;
_bitBuffer |= (ulong)num << _bitsLeft;
_bitsLeft += 8;
}
return _src[_byteIdx++];
}
result = (int)((long)_bitBuffer & (long)_maskBits[nbits]);
_bitBuffer >>= nbits;
_bitsLeft -= nbits;
return result;
}
public int NextBits(int nbits)
public bool Advance(int count)
{
if (_byteIdx > _srcLen)
{
var result = 0;
if (nbits > _bitsLeft)
{
int num;
while (_bitsLeft <= 24 && (num = NextByte()) != 1234)
{
_bitBuffer |= (ulong)num << _bitsLeft;
_bitsLeft += 8;
}
}
result = (int)((long)_bitBuffer & (long)_maskBits[nbits]);
_bitBuffer >>= nbits;
_bitsLeft -= nbits;
return result;
}
public bool Advance(int count)
{
if (_byteIdx > _srcLen)
{
return false;
}
return true;
return false;
}
return true;
}
}

View File

@@ -1,275 +1,297 @@
using System;
namespace SharpCompress.Compressors.Shrink
namespace SharpCompress.Compressors.Shrink;
public class HwUnshrink
{
public class HwUnshrink
private const int MIN_CODE_SIZE = 9;
private const int MAX_CODE_SIZE = 13;
private const ushort MAX_CODE = (ushort)((1U << MAX_CODE_SIZE) - 1);
private const ushort INVALID_CODE = ushort.MaxValue;
private const ushort CONTROL_CODE = 256;
private const ushort INC_CODE_SIZE = 1;
private const ushort PARTIAL_CLEAR = 2;
private const int HASH_BITS = MAX_CODE_SIZE + 1; // For a load factor of 0.5.
private const int HASHTAB_SIZE = 1 << HASH_BITS;
private const ushort UNKNOWN_LEN = ushort.MaxValue;
private struct CodeTabEntry
{
private const int MIN_CODE_SIZE = 9;
private const int MAX_CODE_SIZE = 13;
public int prefixCode; // INVALID_CODE means the entry is invalid.
public byte extByte;
public ushort len;
public int lastDstPos;
}
private const ushort MAX_CODE = (ushort)((1U << MAX_CODE_SIZE) - 1);
private const ushort INVALID_CODE = ushort.MaxValue;
private const ushort CONTROL_CODE = 256;
private const ushort INC_CODE_SIZE = 1;
private const ushort PARTIAL_CLEAR = 2;
private const int HASH_BITS = MAX_CODE_SIZE + 1; // For a load factor of 0.5.
private const int HASHTAB_SIZE = 1 << HASH_BITS;
private const ushort UNKNOWN_LEN = ushort.MaxValue;
private struct CodeTabEntry
private static void CodeTabInit(CodeTabEntry[] codeTab)
{
for (var i = 0; i <= byte.MaxValue; i++)
{
public int prefixCode; // INVALID_CODE means the entry is invalid.
public byte extByte;
public ushort len;
public int lastDstPos;
codeTab[i].prefixCode = (ushort)i;
codeTab[i].extByte = (byte)i;
codeTab[i].len = 1;
}
private static void CodeTabInit(CodeTabEntry[] codeTab)
for (var i = byte.MaxValue + 1; i <= MAX_CODE; i++)
{
for (var i = 0; i <= byte.MaxValue; i++)
{
codeTab[i].prefixCode = (ushort)i;
codeTab[i].extByte = (byte)i;
codeTab[i].len = 1;
}
codeTab[i].prefixCode = INVALID_CODE;
}
}
for (var i = byte.MaxValue + 1; i <= MAX_CODE; i++)
private static void UnshrinkPartialClear(CodeTabEntry[] codeTab, ref CodeQueue queue)
{
var isPrefix = new bool[MAX_CODE + 1];
int codeQueueSize;
// Scan for codes that have been used as a prefix.
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
{
if (codeTab[i].prefixCode != INVALID_CODE)
{
isPrefix[codeTab[i].prefixCode] = true;
}
}
// Clear "non-prefix" codes in the table; populate the code queue.
codeQueueSize = 0;
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
{
if (!isPrefix[i])
{
codeTab[i].prefixCode = INVALID_CODE;
queue.codes[codeQueueSize++] = (ushort)i;
}
}
private static void UnshrinkPartialClear(CodeTabEntry[] codeTab, ref CodeQueue queue)
queue.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
queue.nextIdx = 0;
}
private static bool ReadCode(
BitStream stream,
ref int codeSize,
CodeTabEntry[] codeTab,
ref CodeQueue queue,
out int nextCode
)
{
int code,
controlCode;
code = (int)stream.NextBits(codeSize);
if (!stream.Advance(codeSize))
{
var isPrefix = new bool[MAX_CODE + 1];
int codeQueueSize;
// Scan for codes that have been used as a prefix.
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
{
if (codeTab[i].prefixCode != INVALID_CODE)
{
isPrefix[codeTab[i].prefixCode] = true;
}
}
// Clear "non-prefix" codes in the table; populate the code queue.
codeQueueSize = 0;
for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++)
{
if (!isPrefix[i])
{
codeTab[i].prefixCode = INVALID_CODE;
queue.codes[codeQueueSize++] = (ushort)i;
}
}
queue.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
queue.nextIdx = 0;
nextCode = INVALID_CODE;
return false;
}
private static bool ReadCode(
BitStream stream,
ref int codeSize,
CodeTabEntry[] codeTab,
ref CodeQueue queue,
out int nextCode
)
// Handle regular codes (the common case).
if (code != CONTROL_CODE)
{
int code,
controlCode;
code = (int)stream.NextBits(codeSize);
if (!stream.Advance(codeSize))
{
nextCode = INVALID_CODE;
return false;
}
// Handle regular codes (the common case).
if (code != CONTROL_CODE)
{
nextCode = code;
return true;
}
// Handle control codes.
controlCode = (ushort)stream.NextBits(codeSize);
if (!stream.Advance(codeSize))
{
nextCode = INVALID_CODE;
return true;
}
if (controlCode == INC_CODE_SIZE && codeSize < MAX_CODE_SIZE)
{
codeSize++;
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
}
if (controlCode == PARTIAL_CLEAR)
{
UnshrinkPartialClear(codeTab, ref queue);
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
}
nextCode = code;
return true;
}
// Handle control codes.
controlCode = (ushort)stream.NextBits(codeSize);
if (!stream.Advance(codeSize))
{
nextCode = INVALID_CODE;
return true;
}
private static void CopyFromPrevPos(byte[] dst, int prevPos, int dstPos, int len)
if (controlCode == INC_CODE_SIZE && codeSize < MAX_CODE_SIZE)
{
if (dstPos + len > dst.Length)
{
// Not enough room in dst for the sloppy copy below.
Array.Copy(dst, prevPos, dst, dstPos, len);
return;
}
if (prevPos + len > dstPos)
{
// Benign one-byte overlap possible in the KwKwK case.
//assert(prevPos + len == dstPos + 1);
//assert(dst[prevPos] == dst[prevPos + len - 1]);
}
Buffer.BlockCopy(dst, prevPos, dst, dstPos, len);
codeSize++;
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
}
private static UnshrnkStatus OutputCode(
int code,
byte[] dst,
int dstPos,
int dstCap,
int prevCode,
CodeTabEntry[] codeTab,
ref CodeQueue queue,
out byte firstByte,
out int len
)
if (controlCode == PARTIAL_CLEAR)
{
int prefixCode;
UnshrinkPartialClear(codeTab, ref queue);
return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode);
}
//assert(code <= MAX_CODE && code != CONTROL_CODE);
//assert(dstPos < dstCap);
nextCode = INVALID_CODE;
return true;
}
private static void CopyFromPrevPos(byte[] dst, int prevPos, int dstPos, int len)
{
if (dstPos + len > dst.Length)
{
// Not enough room in dst for the sloppy copy below.
Array.Copy(dst, prevPos, dst, dstPos, len);
return;
}
if (prevPos + len > dstPos)
{
// Benign one-byte overlap possible in the KwKwK case.
//assert(prevPos + len == dstPos + 1);
//assert(dst[prevPos] == dst[prevPos + len - 1]);
}
Buffer.BlockCopy(dst, prevPos, dst, dstPos, len);
}
private static UnshrnkStatus OutputCode(
int code,
byte[] dst,
int dstPos,
int dstCap,
int prevCode,
CodeTabEntry[] codeTab,
ref CodeQueue queue,
out byte firstByte,
out int len
)
{
int prefixCode;
//assert(code <= MAX_CODE && code != CONTROL_CODE);
//assert(dstPos < dstCap);
firstByte = 0;
if (code <= byte.MaxValue)
{
// Output literal byte.
firstByte = (byte)code;
len = 1;
dst[dstPos] = (byte)code;
return UnshrnkStatus.Ok;
}
if (codeTab[code].prefixCode == INVALID_CODE || codeTab[code].prefixCode == code)
{
// Reject invalid codes. Self-referential codes may exist in the table but cannot be used.
firstByte = 0;
if (code <= byte.MaxValue)
{
// Output literal byte.
firstByte = (byte)code;
len = 1;
dst[dstPos] = (byte)code;
return UnshrnkStatus.Ok;
}
len = 0;
return UnshrnkStatus.Error;
}
if (codeTab[code].prefixCode == INVALID_CODE || codeTab[code].prefixCode == code)
{
// Reject invalid codes. Self-referential codes may exist in the table but cannot be used.
firstByte = 0;
len = 0;
return UnshrnkStatus.Error;
}
if (codeTab[code].len != UNKNOWN_LEN)
{
// Output string with known length (the common case).
if (dstCap - dstPos < codeTab[code].len)
{
firstByte = 0;
len = 0;
return UnshrnkStatus.Full;
}
CopyFromPrevPos(dst, codeTab[code].lastDstPos, dstPos, codeTab[code].len);
firstByte = dst[dstPos];
len = codeTab[code].len;
return UnshrnkStatus.Ok;
}
// Output a string of unknown length.
//assert(codeTab[code].len == UNKNOWN_LEN);
prefixCode = codeTab[code].prefixCode;
// assert(prefixCode > CONTROL_CODE);
if (prefixCode == queue.codes[queue.nextIdx])
{
// The prefix code hasn't been added yet, but we were just about to: the KwKwK case.
//assert(codeTab[prevCode].prefixCode != INVALID_CODE);
codeTab[prefixCode].prefixCode = prevCode;
codeTab[prefixCode].extByte = firstByte;
codeTab[prefixCode].len = (ushort)(codeTab[prevCode].len + 1);
codeTab[prefixCode].lastDstPos = codeTab[prevCode].lastDstPos;
dst[dstPos] = firstByte;
}
else if (codeTab[prefixCode].prefixCode == INVALID_CODE)
{
// The prefix code is still invalid.
firstByte = 0;
len = 0;
return UnshrnkStatus.Error;
}
// Output the prefix string, then the extension byte.
len = codeTab[prefixCode].len + 1;
if (dstCap - dstPos < len)
if (codeTab[code].len != UNKNOWN_LEN)
{
// Output string with known length (the common case).
if (dstCap - dstPos < codeTab[code].len)
{
firstByte = 0;
len = 0;
return UnshrnkStatus.Full;
}
CopyFromPrevPos(dst, codeTab[prefixCode].lastDstPos, dstPos, codeTab[prefixCode].len);
dst[dstPos + len - 1] = codeTab[code].extByte;
CopyFromPrevPos(dst, codeTab[code].lastDstPos, dstPos, codeTab[code].len);
firstByte = dst[dstPos];
// Update the code table now that the string has a length and pos.
//assert(prevCode != code);
codeTab[code].len = (ushort)len;
codeTab[code].lastDstPos = dstPos;
len = codeTab[code].len;
return UnshrnkStatus.Ok;
}
public static UnshrnkStatus Unshrink(
byte[] src,
int srcLen,
out int srcUsed,
byte[] dst,
int dstCap,
out int dstUsed
)
// Output a string of unknown length.
//assert(codeTab[code].len == UNKNOWN_LEN);
prefixCode = codeTab[code].prefixCode;
// assert(prefixCode > CONTROL_CODE);
if (prefixCode == queue.codes[queue.nextIdx])
{
var codeTab = new CodeTabEntry[HASHTAB_SIZE];
var queue = new CodeQueue();
var stream = new BitStream(src, srcLen);
int codeSize,
dstPos,
len;
int currCode,
prevCode,
newCode;
byte firstByte;
// The prefix code hasn't been added yet, but we were just about to: the KwKwK case.
//assert(codeTab[prevCode].prefixCode != INVALID_CODE);
codeTab[prefixCode].prefixCode = prevCode;
codeTab[prefixCode].extByte = firstByte;
codeTab[prefixCode].len = (ushort)(codeTab[prevCode].len + 1);
codeTab[prefixCode].lastDstPos = codeTab[prevCode].lastDstPos;
dst[dstPos] = firstByte;
}
else if (codeTab[prefixCode].prefixCode == INVALID_CODE)
{
// The prefix code is still invalid.
firstByte = 0;
len = 0;
return UnshrnkStatus.Error;
}
CodeTabInit(codeTab);
CodeQueueInit(ref queue);
codeSize = MIN_CODE_SIZE;
dstPos = 0;
// Output the prefix string, then the extension byte.
len = codeTab[prefixCode].len + 1;
if (dstCap - dstPos < len)
{
firstByte = 0;
len = 0;
return UnshrnkStatus.Full;
}
// Handle the first code separately since there is no previous code.
if (!ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
CopyFromPrevPos(dst, codeTab[prefixCode].lastDstPos, dstPos, codeTab[prefixCode].len);
dst[dstPos + len - 1] = codeTab[code].extByte;
firstByte = dst[dstPos];
// Update the code table now that the string has a length and pos.
//assert(prevCode != code);
codeTab[code].len = (ushort)len;
codeTab[code].lastDstPos = dstPos;
return UnshrnkStatus.Ok;
}
public static UnshrnkStatus Unshrink(
byte[] src,
int srcLen,
out int srcUsed,
byte[] dst,
int dstCap,
out int dstUsed
)
{
var codeTab = new CodeTabEntry[HASHTAB_SIZE];
var queue = new CodeQueue();
var stream = new BitStream(src, srcLen);
int codeSize,
dstPos,
len;
int currCode,
prevCode,
newCode;
byte firstByte;
CodeTabInit(codeTab);
CodeQueueInit(ref queue);
codeSize = MIN_CODE_SIZE;
dstPos = 0;
// Handle the first code separately since there is no previous code.
if (!ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
{
srcUsed = stream.BytesRead;
dstUsed = 0;
return UnshrnkStatus.Ok;
}
//assert(currCode != CONTROL_CODE);
if (currCode > byte.MaxValue)
{
srcUsed = stream.BytesRead;
dstUsed = 0;
return UnshrnkStatus.Error; // The first code must be a literal.
}
if (dstPos == dstCap)
{
srcUsed = stream.BytesRead;
dstUsed = 0;
return UnshrnkStatus.Full;
}
firstByte = (byte)currCode;
dst[dstPos] = (byte)currCode;
codeTab[currCode].lastDstPos = dstPos;
dstPos++;
prevCode = currCode;
while (ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
{
if (currCode == INVALID_CODE)
{
srcUsed = stream.BytesRead;
dstUsed = 0;
return UnshrnkStatus.Ok;
}
//assert(currCode != CONTROL_CODE);
if (currCode > byte.MaxValue)
{
srcUsed = stream.BytesRead;
dstUsed = 0;
return UnshrnkStatus.Error; // The first code must be a literal.
return UnshrnkStatus.Error;
}
if (dstPos == dstCap)
@@ -279,153 +301,130 @@ namespace SharpCompress.Compressors.Shrink
return UnshrnkStatus.Full;
}
firstByte = (byte)currCode;
dst[dstPos] = (byte)currCode;
codeTab[currCode].lastDstPos = dstPos;
dstPos++;
prevCode = currCode;
while (ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode))
// Handle KwKwK: next code used before being added.
if (currCode == queue.codes[queue.nextIdx])
{
if (currCode == INVALID_CODE)
if (codeTab[prevCode].prefixCode == INVALID_CODE)
{
// The previous code is no longer valid.
srcUsed = stream.BytesRead;
dstUsed = 0;
return UnshrnkStatus.Error;
}
if (dstPos == dstCap)
{
srcUsed = stream.BytesRead;
dstUsed = 0;
return UnshrnkStatus.Full;
}
// Handle KwKwK: next code used before being added.
if (currCode == queue.codes[queue.nextIdx])
{
if (codeTab[prevCode].prefixCode == INVALID_CODE)
{
// The previous code is no longer valid.
srcUsed = stream.BytesRead;
dstUsed = 0;
return UnshrnkStatus.Error;
}
// Extend the previous code with its first byte.
//assert(currCode != prevCode);
codeTab[currCode].prefixCode = prevCode;
codeTab[currCode].extByte = firstByte;
codeTab[currCode].len = (ushort)(codeTab[prevCode].len + 1);
codeTab[currCode].lastDstPos = codeTab[prevCode].lastDstPos;
//assert(dstPos < dstCap);
dst[dstPos] = firstByte;
}
// Output the string represented by the current code.
var status = OutputCode(
currCode,
dst,
dstPos,
dstCap,
prevCode,
codeTab,
ref queue,
out firstByte,
out len
);
if (status != UnshrnkStatus.Ok)
{
srcUsed = stream.BytesRead;
dstUsed = 0;
return status;
}
// Verify that the output matches walking the prefixes.
var c = currCode;
for (var i = 0; i < len; i++)
{
// assert(codeTab[c].len == len - i);
//assert(codeTab[c].extByte == dst[dstPos + len - i - 1]);
c = codeTab[c].prefixCode;
}
// Add a new code to the string table if there's room.
// The string is the previous code's string extended with the first byte of the current code's string.
newCode = CodeQueueRemoveNext(ref queue);
if (newCode != INVALID_CODE)
{
//assert(codeTab[prevCode].lastDstPos < dstPos);
codeTab[newCode].prefixCode = prevCode;
codeTab[newCode].extByte = firstByte;
codeTab[newCode].len = (ushort)(codeTab[prevCode].len + 1);
codeTab[newCode].lastDstPos = codeTab[prevCode].lastDstPos;
if (codeTab[prevCode].prefixCode == INVALID_CODE)
{
// prevCode was invalidated in a partial clearing. Until that code is re-used, the
// string represented by newCode is indeterminate.
codeTab[newCode].len = UNKNOWN_LEN;
}
// If prevCode was invalidated in a partial clearing, it's possible that newCode == prevCode,
// in which case it will never be used or cleared.
}
codeTab[currCode].lastDstPos = dstPos;
dstPos += len;
prevCode = currCode;
// Extend the previous code with its first byte.
//assert(currCode != prevCode);
codeTab[currCode].prefixCode = prevCode;
codeTab[currCode].extByte = firstByte;
codeTab[currCode].len = (ushort)(codeTab[prevCode].len + 1);
codeTab[currCode].lastDstPos = codeTab[prevCode].lastDstPos;
//assert(dstPos < dstCap);
dst[dstPos] = firstByte;
}
srcUsed = stream.BytesRead;
dstUsed = dstPos;
return UnshrnkStatus.Ok;
}
public enum UnshrnkStatus
{
Ok,
Full,
Error,
}
private struct CodeQueue
{
public int nextIdx;
public ushort[] codes;
}
private static void CodeQueueInit(ref CodeQueue q)
{
int codeQueueSize;
ushort code;
codeQueueSize = 0;
q.codes = new ushort[MAX_CODE - CONTROL_CODE + 2];
for (code = CONTROL_CODE + 1; code <= MAX_CODE; code++)
// Output the string represented by the current code.
var status = OutputCode(
currCode,
dst,
dstPos,
dstCap,
prevCode,
codeTab,
ref queue,
out firstByte,
out len
);
if (status != UnshrnkStatus.Ok)
{
q.codes[codeQueueSize++] = code;
srcUsed = stream.BytesRead;
dstUsed = 0;
return status;
}
//assert(codeQueueSize < q.codes.Length);
q.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
q.nextIdx = 0;
}
private static ushort CodeQueueNext(ref CodeQueue q) =>
//assert(q.nextIdx < q.codes.Length);
q.codes[q.nextIdx];
private static ushort CodeQueueRemoveNext(ref CodeQueue q)
{
var code = CodeQueueNext(ref q);
if (code != INVALID_CODE)
// Verify that the output matches walking the prefixes.
var c = currCode;
for (var i = 0; i < len; i++)
{
q.nextIdx++;
// assert(codeTab[c].len == len - i);
//assert(codeTab[c].extByte == dst[dstPos + len - i - 1]);
c = codeTab[c].prefixCode;
}
return code;
// Add a new code to the string table if there's room.
// The string is the previous code's string extended with the first byte of the current code's string.
newCode = CodeQueueRemoveNext(ref queue);
if (newCode != INVALID_CODE)
{
//assert(codeTab[prevCode].lastDstPos < dstPos);
codeTab[newCode].prefixCode = prevCode;
codeTab[newCode].extByte = firstByte;
codeTab[newCode].len = (ushort)(codeTab[prevCode].len + 1);
codeTab[newCode].lastDstPos = codeTab[prevCode].lastDstPos;
if (codeTab[prevCode].prefixCode == INVALID_CODE)
{
// prevCode was invalidated in a partial clearing. Until that code is re-used, the
// string represented by newCode is indeterminate.
codeTab[newCode].len = UNKNOWN_LEN;
}
// If prevCode was invalidated in a partial clearing, it's possible that newCode == prevCode,
// in which case it will never be used or cleared.
}
codeTab[currCode].lastDstPos = dstPos;
dstPos += len;
prevCode = currCode;
}
srcUsed = stream.BytesRead;
dstUsed = dstPos;
return UnshrnkStatus.Ok;
}
public enum UnshrnkStatus
{
Ok,
Full,
Error,
}
private struct CodeQueue
{
public int nextIdx;
public ushort[] codes;
}
private static void CodeQueueInit(ref CodeQueue q)
{
int codeQueueSize;
ushort code;
codeQueueSize = 0;
q.codes = new ushort[MAX_CODE - CONTROL_CODE + 2];
for (code = CONTROL_CODE + 1; code <= MAX_CODE; code++)
{
q.codes[codeQueueSize++] = code;
}
//assert(codeQueueSize < q.codes.Length);
q.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker.
q.nextIdx = 0;
}
private static ushort CodeQueueNext(ref CodeQueue q) =>
//assert(q.nextIdx < q.codes.Length);
q.codes[q.nextIdx];
private static ushort CodeQueueRemoveNext(ref CodeQueue q)
{
var code = CodeQueueNext(ref q);
if (code != INVALID_CODE)
{
q.nextIdx++;
}
return code;
}
}

View File

@@ -7,139 +7,138 @@ using System.Threading.Tasks;
using SharpCompress.Compressors.RLE90;
using SharpCompress.IO;
namespace SharpCompress.Compressors.Squeezed
namespace SharpCompress.Compressors.Squeezed;
public class SqueezeStream : Stream, IStreamStack
{
public class SqueezeStream : Stream, IStreamStack
#if DEBUG_STREAMS
long IStreamStack.InstanceId { get; set; }
#endif
int IStreamStack.DefaultBufferSize { get; set; }
Stream IStreamStack.BaseStream() => _stream;
int IStreamStack.BufferSize
{
get => 0;
set { }
}
int IStreamStack.BufferPosition
{
get => 0;
set { }
}
void IStreamStack.SetPosition(long position) { }
private readonly Stream _stream;
private readonly int _compressedSize;
private const int NUMVALS = 257;
private const int SPEOF = 256;
private bool _processed = false;
public SqueezeStream(Stream stream, int compressedSize)
{
_stream = stream;
_compressedSize = compressedSize;
#if DEBUG_STREAMS
this.DebugConstruct(typeof(SqueezeStream));
#endif
}
protected override void Dispose(bool disposing)
{
#if DEBUG_STREAMS
long IStreamStack.InstanceId { get; set; }
this.DebugDispose(typeof(SqueezeStream));
#endif
int IStreamStack.DefaultBufferSize { get; set; }
base.Dispose(disposing);
}
Stream IStreamStack.BaseStream() => _stream;
public override bool CanRead => true;
int IStreamStack.BufferSize
public override bool CanSeek => false;
public override bool CanWrite => false;
public override long Length => throw new NotImplementedException();
public override long Position
{
get => _stream.Position;
set => throw new NotImplementedException();
}
public override void Flush() => throw new NotImplementedException();
public override int Read(byte[] buffer, int offset, int count)
{
if (_processed)
{
get => 0;
set { }
return 0;
}
int IStreamStack.BufferPosition
_processed = true;
using var binaryReader = new BinaryReader(_stream);
// Read numnodes (equivalent to convert_u16!(numnodes, buf))
var numnodes = binaryReader.ReadUInt16();
// Validation: numnodes should be within bounds
if (numnodes >= NUMVALS)
{
get => 0;
set { }
throw new InvalidDataException(
$"Invalid number of nodes {numnodes} (max {NUMVALS - 1})"
);
}
void IStreamStack.SetPosition(long position) { }
private readonly Stream _stream;
private readonly int _compressedSize;
private const int NUMVALS = 257;
private const int SPEOF = 256;
private bool _processed = false;
public SqueezeStream(Stream stream, int compressedSize)
// Handle the case where no nodes exist
if (numnodes == 0)
{
_stream = stream;
_compressedSize = compressedSize;
#if DEBUG_STREAMS
this.DebugConstruct(typeof(SqueezeStream));
#endif
return 0;
}
protected override void Dispose(bool disposing)
// Build dnode (tree of nodes)
var dnode = new int[numnodes, 2];
for (int j = 0; j < numnodes; j++)
{
#if DEBUG_STREAMS
this.DebugDispose(typeof(SqueezeStream));
#endif
base.Dispose(disposing);
dnode[j, 0] = binaryReader.ReadInt16();
dnode[j, 1] = binaryReader.ReadInt16();
}
public override bool CanRead => true;
// Initialize BitReader for reading bits
var bitReader = new BitReader(_stream);
var decoded = new List<byte>();
public override bool CanSeek => false;
public override bool CanWrite => false;
public override long Length => throw new NotImplementedException();
public override long Position
int i = 0;
// Decode the buffer using the dnode tree
while (true)
{
get => _stream.Position;
set => throw new NotImplementedException();
}
public override void Flush() => throw new NotImplementedException();
public override int Read(byte[] buffer, int offset, int count)
{
if (_processed)
i = dnode[i, bitReader.ReadBit() ? 1 : 0];
if (i < 0)
{
return 0;
}
_processed = true;
using var binaryReader = new BinaryReader(_stream);
// Read numnodes (equivalent to convert_u16!(numnodes, buf))
var numnodes = binaryReader.ReadUInt16();
// Validation: numnodes should be within bounds
if (numnodes >= NUMVALS)
{
throw new InvalidDataException(
$"Invalid number of nodes {numnodes} (max {NUMVALS - 1})"
);
}
// Handle the case where no nodes exist
if (numnodes == 0)
{
return 0;
}
// Build dnode (tree of nodes)
var dnode = new int[numnodes, 2];
for (int j = 0; j < numnodes; j++)
{
dnode[j, 0] = binaryReader.ReadInt16();
dnode[j, 1] = binaryReader.ReadInt16();
}
// Initialize BitReader for reading bits
var bitReader = new BitReader(_stream);
var decoded = new List<byte>();
int i = 0;
// Decode the buffer using the dnode tree
while (true)
{
i = dnode[i, bitReader.ReadBit() ? 1 : 0];
if (i < 0)
i = (short)-(i + 1);
if (i == SPEOF)
{
i = (short)-(i + 1);
if (i == SPEOF)
{
break;
}
else
{
decoded.Add((byte)i);
i = 0;
}
break;
}
else
{
decoded.Add((byte)i);
i = 0;
}
}
// Unpack the decoded buffer using the RLE class
var unpacked = RLE.UnpackRLE(decoded.ToArray());
unpacked.CopyTo(buffer, 0);
return unpacked.Count();
}
public override long Seek(long offset, SeekOrigin origin) =>
throw new NotImplementedException();
public override void SetLength(long value) => throw new NotImplementedException();
public override void Write(byte[] buffer, int offset, int count) =>
throw new NotImplementedException();
// Unpack the decoded buffer using the RLE class
var unpacked = RLE.UnpackRLE(decoded.ToArray());
unpacked.CopyTo(buffer, 0);
return unpacked.Count();
}
public override long Seek(long offset, SeekOrigin origin) =>
throw new NotImplementedException();
public override void SetLength(long value) => throw new NotImplementedException();
public override void Write(byte[] buffer, int offset, int count) =>
throw new NotImplementedException();
}

View File

@@ -0,0 +1,311 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#if !NETCOREAPP3_0_OR_GREATER
using System.Runtime.CompilerServices;
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
// Some routines inspired by the Stanford Bit Twiddling Hacks by Sean Eron Anderson:
// http://graphics.stanford.edu/~seander/bithacks.html
namespace System.Numerics
{
/// <summary>
/// Utility methods for intrinsic bit-twiddling operations.
/// The methods use hardware intrinsics when available on the underlying platform,
/// otherwise they use optimized software fallbacks.
/// </summary>
public static unsafe class BitOperations
{
// hack: should be public because of inline
public static readonly byte* TrailingZeroCountDeBruijn = GetArrayPointer(
new byte[]
{
00,
01,
28,
02,
29,
14,
24,
03,
30,
22,
20,
15,
25,
17,
04,
08,
31,
27,
13,
23,
21,
19,
16,
07,
26,
12,
18,
06,
11,
05,
10,
09,
}
);
// hack: should be public because of inline
public static readonly byte* Log2DeBruijn = GetArrayPointer(
new byte[]
{
00,
09,
01,
10,
13,
21,
02,
29,
11,
14,
16,
18,
22,
25,
03,
30,
08,
12,
20,
28,
15,
17,
24,
07,
19,
27,
23,
06,
26,
05,
04,
31,
}
);
/// <summary>
/// Returns the integer (floor) log of the specified value, base 2.
/// Note that by convention, input value 0 returns 0 since log(0) is undefined.
/// </summary>
/// <param name="value">The value.</param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int Log2(uint value)
{
// The 0->0 contract is fulfilled by setting the LSB to 1.
// Log(1) is 0, and setting the LSB for values > 1 does not change the log2 result.
value |= 1;
// value lzcnt actual expected
// ..0001 31 31-31 0
// ..0010 30 31-30 1
// 0010.. 2 31-2 29
// 0100.. 1 31-1 30
// 1000.. 0 31-0 31
// Fallback contract is 0->0
// No AggressiveInlining due to large method size
// Has conventional contract 0->0 (Log(0) is undefined)
// Fill trailing zeros with ones, eg 00010010 becomes 00011111
value |= value >> 01;
value |= value >> 02;
value |= value >> 04;
value |= value >> 08;
value |= value >> 16;
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
return Log2DeBruijn[
// Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_1100_0100_1010_1100_1101_1101u
(int)((value * 0x07C4ACDDu) >> 27)
];
}
/// <summary>
/// Returns the integer (floor) log of the specified value, base 2.
/// Note that by convention, input value 0 returns 0 since log(0) is undefined.
/// </summary>
/// <param name="value">The value.</param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int Log2(ulong value)
{
value |= 1;
uint hi = (uint)(value >> 32);
if (hi == 0)
{
return Log2((uint)value);
}
return 32 + Log2(hi);
}
/// <summary>
/// Count the number of trailing zero bits in an integer value.
/// Similar in behavior to the x86 instruction TZCNT.
/// </summary>
/// <param name="value">The value.</param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int TrailingZeroCount(int value) => TrailingZeroCount((uint)value);
/// <summary>
/// Count the number of trailing zero bits in an integer value.
/// Similar in behavior to the x86 instruction TZCNT.
/// </summary>
/// <param name="value">The value.</param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int TrailingZeroCount(uint value)
{
// Unguarded fallback contract is 0->0, BSF contract is 0->undefined
if (value == 0)
{
return 32;
}
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
return TrailingZeroCountDeBruijn[
// Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_0111_1100_1011_0101_0011_0001u
(int)(((value & (uint)-(int)value) * 0x077CB531u) >> 27)
]; // Multi-cast mitigates redundant conv.u8
}
/// <summary>
/// Count the number of trailing zero bits in a mask.
/// Similar in behavior to the x86 instruction TZCNT.
/// </summary>
/// <param name="value">The value.</param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int TrailingZeroCount(long value) => TrailingZeroCount((ulong)value);
/// <summary>
/// Count the number of trailing zero bits in a mask.
/// Similar in behavior to the x86 instruction TZCNT.
/// </summary>
/// <param name="value">The value.</param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int TrailingZeroCount(ulong value)
{
uint lo = (uint)value;
if (lo == 0)
{
return 32 + TrailingZeroCount((uint)(value >> 32));
}
return TrailingZeroCount(lo);
}
/// <summary>
/// Rotates the specified value left by the specified number of bits.
/// Similar in behavior to the x86 instruction ROL.
/// </summary>
/// <param name="value">The value to rotate.</param>
/// <param name="offset">The number of bits to rotate by.
/// Any value outside the range [0..31] is treated as congruent mod 32.</param>
/// <returns>The rotated value.</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static uint RotateLeft(uint value, int offset) =>
(value << offset) | (value >> (32 - offset));
/// <summary>
/// Rotates the specified value left by the specified number of bits.
/// Similar in behavior to the x86 instruction ROL.
/// </summary>
/// <param name="value">The value to rotate.</param>
/// <param name="offset">The number of bits to rotate by.
/// Any value outside the range [0..63] is treated as congruent mod 64.</param>
/// <returns>The rotated value.</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static ulong RotateLeft(ulong value, int offset) =>
(value << offset) | (value >> (64 - offset));
/// <summary>
/// Rotates the specified value right by the specified number of bits.
/// Similar in behavior to the x86 instruction ROR.
/// </summary>
/// <param name="value">The value to rotate.</param>
/// <param name="offset">The number of bits to rotate by.
/// Any value outside the range [0..31] is treated as congruent mod 32.</param>
/// <returns>The rotated value.</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static uint RotateRight(uint value, int offset) =>
(value >> offset) | (value << (32 - offset));
/// <summary>
/// Rotates the specified value right by the specified number of bits.
/// Similar in behavior to the x86 instruction ROR.
/// </summary>
/// <param name="value">The value to rotate.</param>
/// <param name="offset">The number of bits to rotate by.
/// Any value outside the range [0..63] is treated as congruent mod 64.</param>
/// <returns>The rotated value.</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static ulong RotateRight(ulong value, int offset) =>
(value >> offset) | (value << (64 - offset));
/// <summary>
/// Count the number of leading zero bits in a mask.
/// Similar in behavior to the x86 instruction LZCNT.
/// </summary>
/// <param name="value">The value.</param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int LeadingZeroCount(uint value)
{
// Unguarded fallback contract is 0->31, BSR contract is 0->undefined
if (value == 0)
{
return 32;
}
// No AggressiveInlining due to large method size
// Has conventional contract 0->0 (Log(0) is undefined)
// Fill trailing zeros with ones, eg 00010010 becomes 00011111
value |= value >> 01;
value |= value >> 02;
value |= value >> 04;
value |= value >> 08;
value |= value >> 16;
// uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check
return 31
^ Log2DeBruijn[
// uint|long -> IntPtr cast on 32-bit platforms does expensive overflow checks not needed here
(int)((value * 0x07C4ACDDu) >> 27)
];
}
/// <summary>
/// Count the number of leading zero bits in a mask.
/// Similar in behavior to the x86 instruction LZCNT.
/// </summary>
/// <param name="value">The value.</param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int LeadingZeroCount(ulong value)
{
uint hi = (uint)(value >> 32);
if (hi == 0)
{
return 32 + LeadingZeroCount((uint)value);
}
return LeadingZeroCount(hi);
}
}
}
#endif

View File

@@ -0,0 +1,301 @@
using System;
using System.Buffers;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using SharpCompress.Compressors.ZStandard.Unsafe;
namespace SharpCompress.Compressors.ZStandard;
public class CompressionStream : Stream
{
private readonly Stream innerStream;
private readonly byte[] outputBuffer;
private readonly bool preserveCompressor;
private readonly bool leaveOpen;
private Compressor? compressor;
private ZSTD_outBuffer_s output;
public CompressionStream(
Stream stream,
int level = Compressor.DefaultCompressionLevel,
int bufferSize = 0,
bool leaveOpen = true
)
: this(stream, new Compressor(level), bufferSize, false, leaveOpen) { }
public CompressionStream(
Stream stream,
Compressor compressor,
int bufferSize = 0,
bool preserveCompressor = true,
bool leaveOpen = true
)
{
if (stream == null)
throw new ArgumentNullException(nameof(stream));
if (!stream.CanWrite)
throw new ArgumentException("Stream is not writable", nameof(stream));
if (bufferSize < 0)
throw new ArgumentOutOfRangeException(nameof(bufferSize));
innerStream = stream;
this.compressor = compressor;
this.preserveCompressor = preserveCompressor;
this.leaveOpen = leaveOpen;
var outputBufferSize =
bufferSize > 0
? bufferSize
: (int)Unsafe.Methods.ZSTD_CStreamOutSize().EnsureZstdSuccess();
outputBuffer = ArrayPool<byte>.Shared.Rent(outputBufferSize);
output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)outputBufferSize };
}
public void SetParameter(ZSTD_cParameter parameter, int value)
{
EnsureNotDisposed();
compressor.NotNull().SetParameter(parameter, value);
}
public int GetParameter(ZSTD_cParameter parameter)
{
EnsureNotDisposed();
return compressor.NotNull().GetParameter(parameter);
}
public void LoadDictionary(byte[] dict)
{
EnsureNotDisposed();
compressor.NotNull().LoadDictionary(dict);
}
~CompressionStream() => Dispose(false);
#if !NETSTANDARD2_0 && !NETFRAMEWORK
public override async ValueTask DisposeAsync()
#else
public async Task DisposeAsync()
#endif
{
if (compressor == null)
return;
try
{
await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_end).ConfigureAwait(false);
}
finally
{
ReleaseUnmanagedResources();
GC.SuppressFinalize(this);
}
}
protected override void Dispose(bool disposing)
{
if (compressor == null)
return;
try
{
if (disposing)
FlushInternal(ZSTD_EndDirective.ZSTD_e_end);
}
finally
{
ReleaseUnmanagedResources();
}
}
private void ReleaseUnmanagedResources()
{
if (!preserveCompressor)
{
compressor.NotNull().Dispose();
}
compressor = null;
if (outputBuffer != null)
{
ArrayPool<byte>.Shared.Return(outputBuffer);
}
if (!leaveOpen)
{
innerStream.Dispose();
}
}
public override void Flush() => FlushInternal(ZSTD_EndDirective.ZSTD_e_flush);
public override async Task FlushAsync(CancellationToken cancellationToken) =>
await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_flush, cancellationToken)
.ConfigureAwait(false);
private void FlushInternal(ZSTD_EndDirective directive) => WriteInternal(null, directive);
private async Task FlushInternalAsync(
ZSTD_EndDirective directive,
CancellationToken cancellationToken = default
) => await WriteInternalAsync(null, directive, cancellationToken).ConfigureAwait(false);
public override void Write(byte[] buffer, int offset, int count) =>
Write(new ReadOnlySpan<byte>(buffer, offset, count));
#if !NETSTANDARD2_0 && !NETFRAMEWORK
public override void Write(ReadOnlySpan<byte> buffer) =>
WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue);
#else
public void Write(ReadOnlySpan<byte> buffer) =>
WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue);
#endif
private void WriteInternal(ReadOnlySpan<byte> buffer, ZSTD_EndDirective directive)
{
EnsureNotDisposed();
var input = new ZSTD_inBuffer_s
{
pos = 0,
size = buffer != null ? (nuint)buffer.Length : 0,
};
nuint remaining;
do
{
output.pos = 0;
remaining = CompressStream(ref input, buffer, directive);
var written = (int)output.pos;
if (written > 0)
innerStream.Write(outputBuffer, 0, written);
} while (
directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0
);
}
#if !NETSTANDARD2_0 && !NETFRAMEWORK
private async ValueTask WriteInternalAsync(
ReadOnlyMemory<byte>? buffer,
ZSTD_EndDirective directive,
CancellationToken cancellationToken = default
)
#else
private async Task WriteInternalAsync(
ReadOnlyMemory<byte>? buffer,
ZSTD_EndDirective directive,
CancellationToken cancellationToken = default
)
#endif
{
EnsureNotDisposed();
var input = new ZSTD_inBuffer_s
{
pos = 0,
size = buffer.HasValue ? (nuint)buffer.Value.Length : 0,
};
nuint remaining;
do
{
output.pos = 0;
remaining = CompressStream(
ref input,
buffer.HasValue ? buffer.Value.Span : null,
directive
);
var written = (int)output.pos;
if (written > 0)
await innerStream
.WriteAsync(outputBuffer, 0, written, cancellationToken)
.ConfigureAwait(false);
} while (
directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0
);
}
#if !NETSTANDARD2_0 && !NETFRAMEWORK
public override Task WriteAsync(
byte[] buffer,
int offset,
int count,
CancellationToken cancellationToken
) => WriteAsync(new ReadOnlyMemory<byte>(buffer, offset, count), cancellationToken).AsTask();
public override async ValueTask WriteAsync(
ReadOnlyMemory<byte> buffer,
CancellationToken cancellationToken = default
) =>
await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken)
.ConfigureAwait(false);
#else
public override Task WriteAsync(
byte[] buffer,
int offset,
int count,
CancellationToken cancellationToken
) => WriteAsync(new ReadOnlyMemory<byte>(buffer, offset, count), cancellationToken);
public async Task WriteAsync(
ReadOnlyMemory<byte> buffer,
CancellationToken cancellationToken = default
) =>
await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken)
.ConfigureAwait(false);
#endif
internal unsafe nuint CompressStream(
ref ZSTD_inBuffer_s input,
ReadOnlySpan<byte> inputBuffer,
ZSTD_EndDirective directive
)
{
fixed (byte* inputBufferPtr = inputBuffer)
fixed (byte* outputBufferPtr = outputBuffer)
{
input.src = inputBufferPtr;
output.dst = outputBufferPtr;
return compressor
.NotNull()
.CompressStream(ref input, ref output, directive)
.EnsureZstdSuccess();
}
}
public override bool CanRead => false;
public override bool CanSeek => false;
public override bool CanWrite => true;
public override long Length => throw new NotSupportedException();
public override long Position
{
get => throw new NotSupportedException();
set => throw new NotSupportedException();
}
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
public override void SetLength(long value) => throw new NotSupportedException();
public override int Read(byte[] buffer, int offset, int count) =>
throw new NotSupportedException();
private void EnsureNotDisposed()
{
if (compressor == null)
throw new ObjectDisposedException(nameof(CompressionStream));
}
public void SetPledgedSrcSize(ulong pledgedSrcSize)
{
EnsureNotDisposed();
compressor.NotNull().SetPledgedSrcSize(pledgedSrcSize);
}
}

View File

@@ -0,0 +1,204 @@
using System;
using SharpCompress.Compressors.ZStandard.Unsafe;
namespace SharpCompress.Compressors.ZStandard;
public unsafe class Compressor : IDisposable
{
/// <summary>
/// Minimum negative compression level allowed
/// </summary>
public static int MinCompressionLevel => Unsafe.Methods.ZSTD_minCLevel();
/// <summary>
/// Maximum compression level available
/// </summary>
public static int MaxCompressionLevel => Unsafe.Methods.ZSTD_maxCLevel();
/// <summary>
/// Default compression level
/// </summary>
/// <see cref="Unsafe.Methods.ZSTD_defaultCLevel"/>
public const int DefaultCompressionLevel = 3;
private int level = DefaultCompressionLevel;
private readonly SafeCctxHandle handle;
public int Level
{
get => level;
set
{
if (level != value)
{
level = value;
SetParameter(ZSTD_cParameter.ZSTD_c_compressionLevel, value);
}
}
}
public void SetParameter(ZSTD_cParameter parameter, int value)
{
using var cctx = handle.Acquire();
Unsafe.Methods.ZSTD_CCtx_setParameter(cctx, parameter, value).EnsureZstdSuccess();
}
public int GetParameter(ZSTD_cParameter parameter)
{
using var cctx = handle.Acquire();
int value;
Unsafe.Methods.ZSTD_CCtx_getParameter(cctx, parameter, &value).EnsureZstdSuccess();
return value;
}
public void LoadDictionary(byte[] dict)
{
var dictReadOnlySpan = new ReadOnlySpan<byte>(dict);
LoadDictionary(dictReadOnlySpan);
}
public void LoadDictionary(ReadOnlySpan<byte> dict)
{
using var cctx = handle.Acquire();
fixed (byte* dictPtr = dict)
Unsafe
.Methods.ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint)dict.Length)
.EnsureZstdSuccess();
}
public Compressor(int level = DefaultCompressionLevel)
{
handle = SafeCctxHandle.Create();
Level = level;
}
public static int GetCompressBound(int length) =>
(int)Unsafe.Methods.ZSTD_compressBound((nuint)length);
public static ulong GetCompressBoundLong(ulong length) =>
Unsafe.Methods.ZSTD_compressBound((nuint)length);
public Span<byte> Wrap(ReadOnlySpan<byte> src)
{
var dest = new byte[GetCompressBound(src.Length)];
var length = Wrap(src, dest);
return new Span<byte>(dest, 0, length);
}
public int Wrap(byte[] src, byte[] dest, int offset) =>
Wrap(src, new Span<byte>(dest, offset, dest.Length - offset));
public int Wrap(ReadOnlySpan<byte> src, Span<byte> dest)
{
fixed (byte* srcPtr = src)
fixed (byte* destPtr = dest)
{
using var cctx = handle.Acquire();
return (int)
Unsafe
.Methods.ZSTD_compress2(
cctx,
destPtr,
(nuint)dest.Length,
srcPtr,
(nuint)src.Length
)
.EnsureZstdSuccess();
}
}
public int Wrap(ArraySegment<byte> src, ArraySegment<byte> dest) =>
Wrap((ReadOnlySpan<byte>)src, dest);
public int Wrap(
byte[] src,
int srcOffset,
int srcLength,
byte[] dst,
int dstOffset,
int dstLength
) =>
Wrap(
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
new Span<byte>(dst, dstOffset, dstLength)
);
public bool TryWrap(byte[] src, byte[] dest, int offset, out int written) =>
TryWrap(src, new Span<byte>(dest, offset, dest.Length - offset), out written);
public bool TryWrap(ReadOnlySpan<byte> src, Span<byte> dest, out int written)
{
fixed (byte* srcPtr = src)
fixed (byte* destPtr = dest)
{
nuint returnValue;
using (var cctx = handle.Acquire())
{
returnValue = Unsafe.Methods.ZSTD_compress2(
cctx,
destPtr,
(nuint)dest.Length,
srcPtr,
(nuint)src.Length
);
}
if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))
{
written = default;
return false;
}
returnValue.EnsureZstdSuccess();
written = (int)returnValue;
return true;
}
}
public bool TryWrap(ArraySegment<byte> src, ArraySegment<byte> dest, out int written) =>
TryWrap((ReadOnlySpan<byte>)src, dest, out written);
public bool TryWrap(
byte[] src,
int srcOffset,
int srcLength,
byte[] dst,
int dstOffset,
int dstLength,
out int written
) =>
TryWrap(
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
new Span<byte>(dst, dstOffset, dstLength),
out written
);
public void Dispose()
{
handle.Dispose();
GC.SuppressFinalize(this);
}
internal nuint CompressStream(
ref ZSTD_inBuffer_s input,
ref ZSTD_outBuffer_s output,
ZSTD_EndDirective directive
)
{
fixed (ZSTD_inBuffer_s* inputPtr = &input)
fixed (ZSTD_outBuffer_s* outputPtr = &output)
{
using var cctx = handle.Acquire();
return Unsafe
.Methods.ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive)
.EnsureZstdSuccess();
}
}
public void SetPledgedSrcSize(ulong pledgedSrcSize)
{
using var cctx = handle.Acquire();
Unsafe.Methods.ZSTD_CCtx_setPledgedSrcSize(cctx, pledgedSrcSize).EnsureZstdSuccess();
}
}

View File

@@ -0,0 +1,8 @@
namespace SharpCompress.Compressors.ZStandard;
internal class Constants
{
//NOTE: https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element#remarks
//NOTE: https://github.com/dotnet/runtime/blob/v5.0.0-rtm.20519.4/src/libraries/System.Private.CoreLib/src/System/Array.cs#L27
public const ulong MaxByteArrayLength = 0x7FFFFFC7;
}

View File

@@ -0,0 +1,293 @@
using System;
using System.Buffers;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using SharpCompress.Compressors.ZStandard.Unsafe;
namespace SharpCompress.Compressors.ZStandard;
public class DecompressionStream : Stream
{
private readonly Stream innerStream;
private readonly byte[] inputBuffer;
private readonly int inputBufferSize;
private readonly bool preserveDecompressor;
private readonly bool leaveOpen;
private readonly bool checkEndOfStream;
private Decompressor? decompressor;
private ZSTD_inBuffer_s input;
private nuint lastDecompressResult = 0;
private bool contextDrained = true;
public DecompressionStream(
Stream stream,
int bufferSize = 0,
bool checkEndOfStream = true,
bool leaveOpen = true
)
: this(stream, new Decompressor(), bufferSize, checkEndOfStream, false, leaveOpen) { }
public DecompressionStream(
Stream stream,
Decompressor decompressor,
int bufferSize = 0,
bool checkEndOfStream = true,
bool preserveDecompressor = true,
bool leaveOpen = true
)
{
if (stream == null)
throw new ArgumentNullException(nameof(stream));
if (!stream.CanRead)
throw new ArgumentException("Stream is not readable", nameof(stream));
if (bufferSize < 0)
throw new ArgumentOutOfRangeException(nameof(bufferSize));
innerStream = stream;
this.decompressor = decompressor;
this.preserveDecompressor = preserveDecompressor;
this.leaveOpen = leaveOpen;
this.checkEndOfStream = checkEndOfStream;
inputBufferSize =
bufferSize > 0
? bufferSize
: (int)Unsafe.Methods.ZSTD_DStreamInSize().EnsureZstdSuccess();
inputBuffer = ArrayPool<byte>.Shared.Rent(inputBufferSize);
input = new ZSTD_inBuffer_s { pos = (nuint)inputBufferSize, size = (nuint)inputBufferSize };
}
public void SetParameter(ZSTD_dParameter parameter, int value)
{
EnsureNotDisposed();
decompressor.NotNull().SetParameter(parameter, value);
}
public int GetParameter(ZSTD_dParameter parameter)
{
EnsureNotDisposed();
return decompressor.NotNull().GetParameter(parameter);
}
public void LoadDictionary(byte[] dict)
{
EnsureNotDisposed();
decompressor.NotNull().LoadDictionary(dict);
}
~DecompressionStream() => Dispose(false);
protected override void Dispose(bool disposing)
{
if (decompressor == null)
return;
if (!preserveDecompressor)
{
decompressor.Dispose();
}
decompressor = null;
if (inputBuffer != null)
{
ArrayPool<byte>.Shared.Return(inputBuffer);
}
if (!leaveOpen)
{
innerStream.Dispose();
}
}
public override int Read(byte[] buffer, int offset, int count) =>
Read(new Span<byte>(buffer, offset, count));
#if !NETSTANDARD2_0 && !NETFRAMEWORK
public override int Read(Span<byte> buffer)
#else
public int Read(Span<byte> buffer)
#endif
{
EnsureNotDisposed();
// Guard against infinite loop (output.pos would never become non-zero)
if (buffer.Length == 0)
{
return 0;
}
var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length };
while (true)
{
// If there is still input available, or there might be data buffered in the decompressor context, flush that out
while (input.pos < input.size || !contextDrained)
{
nuint oldInputPos = input.pos;
nuint result = DecompressStream(ref output, buffer);
if (output.pos > 0 || oldInputPos != input.pos)
{
// Keep result from last decompress call that made some progress, so we known if we're at end of frame
lastDecompressResult = result;
}
// If decompression filled the output buffer, there might still be data buffered in the decompressor context
contextDrained = output.pos < output.size;
// If we have data to return, return it immediately, so we won't stall on Read
if (output.pos > 0)
{
return (int)output.pos;
}
}
// Otherwise, read some more input
int bytesRead;
if ((bytesRead = innerStream.Read(inputBuffer, 0, inputBufferSize)) == 0)
{
if (checkEndOfStream && lastDecompressResult != 0)
{
throw new EndOfStreamException("Premature end of stream");
}
return 0;
}
input.size = (nuint)bytesRead;
input.pos = 0;
}
}
#if !NETSTANDARD2_0 && !NETFRAMEWORK
public override Task<int> ReadAsync(
byte[] buffer,
int offset,
int count,
CancellationToken cancellationToken
) => ReadAsync(new Memory<byte>(buffer, offset, count), cancellationToken).AsTask();
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default
)
#else
public override Task<int> ReadAsync(
byte[] buffer,
int offset,
int count,
CancellationToken cancellationToken
) => ReadAsync(new Memory<byte>(buffer, offset, count), cancellationToken);
public async Task<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default
)
#endif
{
EnsureNotDisposed();
// Guard against infinite loop (output.pos would never become non-zero)
if (buffer.Length == 0)
{
return 0;
}
var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length };
while (true)
{
// If there is still input available, or there might be data buffered in the decompressor context, flush that out
while (input.pos < input.size || !contextDrained)
{
nuint oldInputPos = input.pos;
nuint result = DecompressStream(ref output, buffer.Span);
if (output.pos > 0 || oldInputPos != input.pos)
{
// Keep result from last decompress call that made some progress, so we known if we're at end of frame
lastDecompressResult = result;
}
// If decompression filled the output buffer, there might still be data buffered in the decompressor context
contextDrained = output.pos < output.size;
// If we have data to return, return it immediately, so we won't stall on Read
if (output.pos > 0)
{
return (int)output.pos;
}
}
// Otherwise, read some more input
int bytesRead;
if (
(
bytesRead = await innerStream
.ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken)
.ConfigureAwait(false)
) == 0
)
{
if (checkEndOfStream && lastDecompressResult != 0)
{
throw new EndOfStreamException("Premature end of stream");
}
return 0;
}
input.size = (nuint)bytesRead;
input.pos = 0;
}
}
private unsafe nuint DecompressStream(ref ZSTD_outBuffer_s output, Span<byte> outputBuffer)
{
fixed (byte* inputBufferPtr = inputBuffer)
fixed (byte* outputBufferPtr = outputBuffer)
{
input.src = inputBufferPtr;
output.dst = outputBufferPtr;
return decompressor.NotNull().DecompressStream(ref input, ref output);
}
}
public override bool CanRead => true;
public override bool CanSeek => false;
public override bool CanWrite => false;
public override long Length => throw new NotSupportedException();
public override long Position
{
get => throw new NotSupportedException();
set => throw new NotSupportedException();
}
public override void Flush() => throw new NotSupportedException();
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
public override void SetLength(long value) => throw new NotSupportedException();
public override void Write(byte[] buffer, int offset, int count) =>
throw new NotSupportedException();
private void EnsureNotDisposed()
{
if (decompressor == null)
throw new ObjectDisposedException(nameof(DecompressionStream));
}
#if NETSTANDARD2_0 || NETFRAMEWORK
public virtual Task DisposeAsync()
{
try
{
Dispose();
return Task.CompletedTask;
}
catch (Exception exc)
{
return Task.FromException(exc);
}
}
#endif
}

View File

@@ -0,0 +1,176 @@
using System;
using SharpCompress.Compressors.ZStandard.Unsafe;
namespace SharpCompress.Compressors.ZStandard;
public unsafe class Decompressor : IDisposable
{
private readonly SafeDctxHandle handle;
public Decompressor()
{
handle = SafeDctxHandle.Create();
}
public void SetParameter(ZSTD_dParameter parameter, int value)
{
using var dctx = handle.Acquire();
Unsafe.Methods.ZSTD_DCtx_setParameter(dctx, parameter, value).EnsureZstdSuccess();
}
public int GetParameter(ZSTD_dParameter parameter)
{
using var dctx = handle.Acquire();
int value;
Unsafe.Methods.ZSTD_DCtx_getParameter(dctx, parameter, &value).EnsureZstdSuccess();
return value;
}
public void LoadDictionary(byte[] dict)
{
var dictReadOnlySpan = new ReadOnlySpan<byte>(dict);
this.LoadDictionary(dictReadOnlySpan);
}
public void LoadDictionary(ReadOnlySpan<byte> dict)
{
using var dctx = handle.Acquire();
fixed (byte* dictPtr = dict)
Unsafe
.Methods.ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint)dict.Length)
.EnsureZstdSuccess();
}
public static ulong GetDecompressedSize(ReadOnlySpan<byte> src)
{
fixed (byte* srcPtr = src)
return Unsafe
.Methods.ZSTD_decompressBound(srcPtr, (nuint)src.Length)
.EnsureContentSizeOk();
}
public static ulong GetDecompressedSize(ArraySegment<byte> src) =>
GetDecompressedSize((ReadOnlySpan<byte>)src);
public static ulong GetDecompressedSize(byte[] src, int srcOffset, int srcLength) =>
GetDecompressedSize(new ReadOnlySpan<byte>(src, srcOffset, srcLength));
public Span<byte> Unwrap(ReadOnlySpan<byte> src, int maxDecompressedSize = int.MaxValue)
{
var expectedDstSize = GetDecompressedSize(src);
if (expectedDstSize > (ulong)maxDecompressedSize)
throw new ZstdException(
ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall,
$"Decompressed content size {expectedDstSize} is greater than {nameof(maxDecompressedSize)} {maxDecompressedSize}"
);
if (expectedDstSize > Constants.MaxByteArrayLength)
throw new ZstdException(
ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall,
$"Decompressed content size {expectedDstSize} is greater than max possible byte array size {Constants.MaxByteArrayLength}"
);
var dest = new byte[expectedDstSize];
var length = Unwrap(src, dest);
return new Span<byte>(dest, 0, length);
}
public int Unwrap(byte[] src, byte[] dest, int offset) =>
Unwrap(src, new Span<byte>(dest, offset, dest.Length - offset));
public int Unwrap(ReadOnlySpan<byte> src, Span<byte> dest)
{
fixed (byte* srcPtr = src)
fixed (byte* destPtr = dest)
{
using var dctx = handle.Acquire();
return (int)
Unsafe
.Methods.ZSTD_decompressDCtx(
dctx,
destPtr,
(nuint)dest.Length,
srcPtr,
(nuint)src.Length
)
.EnsureZstdSuccess();
}
}
public int Unwrap(
byte[] src,
int srcOffset,
int srcLength,
byte[] dst,
int dstOffset,
int dstLength
) =>
Unwrap(
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
new Span<byte>(dst, dstOffset, dstLength)
);
public bool TryUnwrap(byte[] src, byte[] dest, int offset, out int written) =>
TryUnwrap(src, new Span<byte>(dest, offset, dest.Length - offset), out written);
public bool TryUnwrap(ReadOnlySpan<byte> src, Span<byte> dest, out int written)
{
fixed (byte* srcPtr = src)
fixed (byte* destPtr = dest)
{
nuint returnValue;
using (var dctx = handle.Acquire())
{
returnValue = Unsafe.Methods.ZSTD_decompressDCtx(
dctx,
destPtr,
(nuint)dest.Length,
srcPtr,
(nuint)src.Length
);
}
if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))
{
written = default;
return false;
}
returnValue.EnsureZstdSuccess();
written = (int)returnValue;
return true;
}
}
public bool TryUnwrap(
byte[] src,
int srcOffset,
int srcLength,
byte[] dst,
int dstOffset,
int dstLength,
out int written
) =>
TryUnwrap(
new ReadOnlySpan<byte>(src, srcOffset, srcLength),
new Span<byte>(dst, dstOffset, dstLength),
out written
);
public void Dispose()
{
handle.Dispose();
GC.SuppressFinalize(this);
}
internal nuint DecompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s output)
{
fixed (ZSTD_inBuffer_s* inputPtr = &input)
fixed (ZSTD_outBuffer_s* outputPtr = &output)
{
using var dctx = handle.Acquire();
return Unsafe
.Methods.ZSTD_decompressStream(dctx, outputPtr, inputPtr)
.EnsureZstdSuccess();
}
}
}

View File

@@ -0,0 +1,141 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Threading;
namespace SharpCompress.Compressors.ZStandard;
internal unsafe class JobThreadPool : IDisposable
{
private int numThreads;
private readonly List<JobThread> threads;
private readonly BlockingCollection<Job> queue;
private struct Job
{
public void* function;
public void* opaque;
}
private class JobThread
{
private Thread Thread { get; }
public CancellationTokenSource CancellationTokenSource { get; }
public JobThread(Thread thread)
{
CancellationTokenSource = new CancellationTokenSource();
Thread = thread;
}
public void Start()
{
Thread.Start(this);
}
public void Cancel()
{
CancellationTokenSource.Cancel();
}
public void Join()
{
Thread.Join();
}
}
private void Worker(object? obj)
{
if (obj is not JobThread poolThread)
return;
var cancellationToken = poolThread.CancellationTokenSource.Token;
while (!queue.IsCompleted && !cancellationToken.IsCancellationRequested)
{
try
{
if (queue.TryTake(out var job, -1, cancellationToken))
((delegate* managed<void*, void>)job.function)(job.opaque);
}
catch (InvalidOperationException) { }
catch (OperationCanceledException) { }
}
}
public JobThreadPool(int num, int queueSize)
{
numThreads = num;
queue = new BlockingCollection<Job>(queueSize + 1);
threads = new List<JobThread>(num);
for (var i = 0; i < numThreads; i++)
CreateThread();
}
private void CreateThread()
{
var poolThread = new JobThread(new Thread(Worker));
threads.Add(poolThread);
poolThread.Start();
}
public void Resize(int num)
{
lock (threads)
{
if (num < numThreads)
{
for (var i = numThreads - 1; i >= num; i--)
{
threads[i].Cancel();
threads.RemoveAt(i);
}
}
else
{
for (var i = numThreads; i < num; i++)
CreateThread();
}
}
numThreads = num;
}
public void Add(void* function, void* opaque)
{
queue.Add(new Job { function = function, opaque = opaque });
}
public bool TryAdd(void* function, void* opaque)
{
return queue.TryAdd(new Job { function = function, opaque = opaque });
}
public void Join(bool cancel = true)
{
queue.CompleteAdding();
List<JobThread> jobThreads;
lock (threads)
jobThreads = new List<JobThread>(threads);
if (cancel)
{
foreach (var thread in jobThreads)
thread.Cancel();
}
foreach (var thread in jobThreads)
thread.Join();
}
public void Dispose()
{
queue.Dispose();
}
public int Size()
{
// todo not implemented
// https://github.com/dotnet/runtime/issues/24200
return 0;
}
}

View File

@@ -0,0 +1,163 @@
using System;
using System.Runtime.InteropServices;
using SharpCompress.Compressors.ZStandard.Unsafe;
namespace SharpCompress.Compressors.ZStandard;
/// <summary>
/// Provides the base class for ZstdSharp <see cref="SafeHandle"/> implementations.
/// </summary>
/// <remarks>
/// Even though ZstdSharp is a managed library, its internals are using unmanaged
/// memory and we are using safe handles in the library's high-level API to ensure
/// proper disposal of unmanaged resources and increase safety.
/// </remarks>
/// <seealso cref="SafeCctxHandle"/>
/// <seealso cref="SafeDctxHandle"/>
internal abstract unsafe class SafeZstdHandle : SafeHandle
{
/// <summary>
/// Parameterless constructor is hidden. Use the static <c>Create</c> factory
/// method to create a new safe handle instance.
/// </summary>
protected SafeZstdHandle()
: base(IntPtr.Zero, true) { }
public sealed override bool IsInvalid => handle == IntPtr.Zero;
}
/// <summary>
/// Safely wraps an unmanaged Zstd compression context.
/// </summary>
internal sealed unsafe class SafeCctxHandle : SafeZstdHandle
{
/// <inheritdoc/>
private SafeCctxHandle() { }
/// <summary>
/// Creates a new instance of <see cref="SafeCctxHandle"/>.
/// </summary>
/// <returns></returns>
/// <exception cref="ZstdException">Creation failed.</exception>
public static SafeCctxHandle Create()
{
var safeHandle = new SafeCctxHandle();
bool success = false;
try
{
var cctx = Unsafe.Methods.ZSTD_createCCtx();
if (cctx == null)
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create cctx");
safeHandle.SetHandle((IntPtr)cctx);
success = true;
}
finally
{
if (!success)
{
safeHandle.SetHandleAsInvalid();
}
}
return safeHandle;
}
/// <summary>
/// Acquires a reference to the safe handle.
/// </summary>
/// <returns>
/// A <see cref="SafeHandleHolder{T}"/> instance that can be implicitly converted to a pointer
/// to <see cref="ZSTD_CCtx_s"/>.
/// </returns>
public SafeHandleHolder<ZSTD_CCtx_s> Acquire() => new(this);
protected override bool ReleaseHandle()
{
return Unsafe.Methods.ZSTD_freeCCtx((ZSTD_CCtx_s*)handle) == 0;
}
}
/// <summary>
/// Safely wraps an unmanaged Zstd compression context.
/// </summary>
internal sealed unsafe class SafeDctxHandle : SafeZstdHandle
{
/// <inheritdoc/>
private SafeDctxHandle() { }
/// <summary>
/// Creates a new instance of <see cref="SafeDctxHandle"/>.
/// </summary>
/// <returns></returns>
/// <exception cref="ZstdException">Creation failed.</exception>
public static SafeDctxHandle Create()
{
var safeHandle = new SafeDctxHandle();
bool success = false;
try
{
var dctx = Unsafe.Methods.ZSTD_createDCtx();
if (dctx == null)
throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create dctx");
safeHandle.SetHandle((IntPtr)dctx);
success = true;
}
finally
{
if (!success)
{
safeHandle.SetHandleAsInvalid();
}
}
return safeHandle;
}
/// <summary>
/// Acquires a reference to the safe handle.
/// </summary>
/// <returns>
/// A <see cref="SafeHandleHolder{T}"/> instance that can be implicitly converted to a pointer
/// to <see cref="ZSTD_DCtx_s"/>.
/// </returns>
public SafeHandleHolder<ZSTD_DCtx_s> Acquire() => new(this);
protected override bool ReleaseHandle()
{
return Unsafe.Methods.ZSTD_freeDCtx((ZSTD_DCtx_s*)handle) == 0;
}
}
/// <summary>
/// Provides a convenient interface to safely acquire pointers of a specific type
/// from a <see cref="SafeHandle"/>, by utilizing <see langword="using"/> blocks.
/// </summary>
/// <typeparam name="T">The type of pointers to return.</typeparam>
/// <remarks>
/// Safe handle holders can be <see cref="Dispose"/>d to decrement the safe handle's
/// reference count, and can be implicitly converted to pointers to <see cref="T"/>.
/// </remarks>
internal unsafe ref struct SafeHandleHolder<T>
where T : unmanaged
{
private readonly SafeHandle _handle;
private bool _refAdded;
public SafeHandleHolder(SafeHandle safeHandle)
{
_handle = safeHandle;
_refAdded = false;
safeHandle.DangerousAddRef(ref _refAdded);
}
public static implicit operator T*(SafeHandleHolder<T> holder) =>
(T*)holder._handle.DangerousGetHandle();
public void Dispose()
{
if (_refAdded)
{
_handle.DangerousRelease();
_refAdded = false;
}
}
}

View File

@@ -0,0 +1,22 @@
using System.Threading;
namespace SharpCompress.Compressors.ZStandard;
internal static unsafe class SynchronizationWrapper
{
private static object UnwrapObject(void** obj) => UnmanagedObject.Unwrap<object>(*obj);
public static void Init(void** obj) => *obj = UnmanagedObject.Wrap(new object());
public static void Free(void** obj) => UnmanagedObject.Free(*obj);
public static void Enter(void** obj) => Monitor.Enter(UnwrapObject(obj));
public static void Exit(void** obj) => Monitor.Exit(UnwrapObject(obj));
public static void Pulse(void** obj) => Monitor.Pulse(UnwrapObject(obj));
public static void PulseAll(void** obj) => Monitor.PulseAll(UnwrapObject(obj));
public static void Wait(void** mutex) => Monitor.Wait(UnwrapObject(mutex));
}

View File

@@ -0,0 +1,48 @@
using SharpCompress.Compressors.ZStandard.Unsafe;
namespace SharpCompress.Compressors.ZStandard;
public static unsafe class ThrowHelper
{
private const ulong ZSTD_CONTENTSIZE_UNKNOWN = unchecked(0UL - 1);
private const ulong ZSTD_CONTENTSIZE_ERROR = unchecked(0UL - 2);
public static nuint EnsureZstdSuccess(this nuint returnValue)
{
if (Unsafe.Methods.ZSTD_isError(returnValue))
ThrowException(returnValue, Unsafe.Methods.ZSTD_getErrorName(returnValue));
return returnValue;
}
public static nuint EnsureZdictSuccess(this nuint returnValue)
{
if (Unsafe.Methods.ZDICT_isError(returnValue))
ThrowException(returnValue, Unsafe.Methods.ZDICT_getErrorName(returnValue));
return returnValue;
}
public static ulong EnsureContentSizeOk(this ulong returnValue)
{
if (returnValue == ZSTD_CONTENTSIZE_UNKNOWN)
throw new ZstdException(
ZSTD_ErrorCode.ZSTD_error_GENERIC,
"Decompressed content size is not specified"
);
if (returnValue == ZSTD_CONTENTSIZE_ERROR)
throw new ZstdException(
ZSTD_ErrorCode.ZSTD_error_GENERIC,
"Decompressed content size cannot be determined (e.g. invalid magic number, srcSize too small)"
);
return returnValue;
}
private static void ThrowException(nuint returnValue, string message)
{
var code = 0 - returnValue;
throw new ZstdException((ZSTD_ErrorCode)code, message);
}
}

View File

@@ -0,0 +1,18 @@
using System;
using System.Runtime.InteropServices;
namespace SharpCompress.Compressors.ZStandard;
/*
* Wrap object to void* to make it unmanaged
*/
internal static unsafe class UnmanagedObject
{
public static void* Wrap(object obj) => (void*)GCHandle.ToIntPtr(GCHandle.Alloc(obj));
private static GCHandle UnwrapGcHandle(void* value) => GCHandle.FromIntPtr((IntPtr)value);
public static T Unwrap<T>(void* value) => (T)UnwrapGcHandle(value).Target!;
public static void Free(void* value) => UnwrapGcHandle(value).Free();
}

View File

@@ -0,0 +1,52 @@
using System.Runtime.CompilerServices;
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
/* custom memory allocation functions */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void* ZSTD_customMalloc(nuint size, ZSTD_customMem customMem)
{
if (customMem.customAlloc != null)
return ((delegate* managed<void*, nuint, void*>)customMem.customAlloc)(
customMem.opaque,
size
);
return malloc(size);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void* ZSTD_customCalloc(nuint size, ZSTD_customMem customMem)
{
if (customMem.customAlloc != null)
{
/* calloc implemented as malloc+memset;
* not as efficient as calloc, but next best guess for custom malloc */
void* ptr = ((delegate* managed<void*, nuint, void*>)customMem.customAlloc)(
customMem.opaque,
size
);
memset(ptr, 0, (uint)size);
return ptr;
}
return calloc(1, size);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
{
if (ptr != null)
{
if (customMem.customFree != null)
((delegate* managed<void*, void*, void>)customMem.customFree)(
customMem.opaque,
ptr
);
else
free(ptr);
}
}
}

View File

@@ -0,0 +1,14 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/* bitStream can mix input from multiple sources.
* A critical property of these streams is that they encode and decode in **reverse** direction.
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
*/
public unsafe struct BIT_CStream_t
{
public nuint bitContainer;
public uint bitPos;
public sbyte* startPtr;
public sbyte* ptr;
public sbyte* endPtr;
}

View File

@@ -0,0 +1,16 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public enum BIT_DStream_status
{
/* fully refilled */
BIT_DStream_unfinished = 0,
/* still some bits left in bitstream */
BIT_DStream_endOfBuffer = 1,
/* bitstream entirely consumed, bit-exact */
BIT_DStream_completed = 2,
/* user requested more bits than present in bitstream */
BIT_DStream_overflow = 3,
}

View File

@@ -0,0 +1,13 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/*-********************************************
* bitStream decoding API (read backward)
**********************************************/
public unsafe struct BIT_DStream_t
{
public nuint bitContainer;
public uint bitsConsumed;
public sbyte* ptr;
public sbyte* start;
public sbyte* limitPtr;
}

View File

@@ -0,0 +1,60 @@
using System;
using System.Numerics;
using System.Runtime.CompilerServices;
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint ZSTD_countTrailingZeros32(uint val)
{
assert(val != 0);
return (uint)BitOperations.TrailingZeroCount(val);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint ZSTD_countLeadingZeros32(uint val)
{
assert(val != 0);
return (uint)BitOperations.LeadingZeroCount(val);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint ZSTD_countTrailingZeros64(ulong val)
{
assert(val != 0);
return (uint)BitOperations.TrailingZeroCount(val);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint ZSTD_countLeadingZeros64(ulong val)
{
assert(val != 0);
return (uint)BitOperations.LeadingZeroCount(val);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint ZSTD_NbCommonBytes(nuint val)
{
assert(val != 0);
if (BitConverter.IsLittleEndian)
{
return MEM_64bits
? (uint)BitOperations.TrailingZeroCount(val) >> 3
: (uint)BitOperations.TrailingZeroCount((uint)val) >> 3;
}
return MEM_64bits
? (uint)BitOperations.LeadingZeroCount(val) >> 3
: (uint)BitOperations.LeadingZeroCount((uint)val) >> 3;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint ZSTD_highbit32(uint val)
{
assert(val != 0);
return (uint)BitOperations.Log2(val);
}
}

View File

@@ -0,0 +1,739 @@
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
#if NETCOREAPP3_0_OR_GREATER
using System.Runtime.Intrinsics.X86;
#endif
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
#if NET7_0_OR_GREATER
private static ReadOnlySpan<uint> Span_BIT_mask =>
new uint[32]
{
0,
1,
3,
7,
0xF,
0x1F,
0x3F,
0x7F,
0xFF,
0x1FF,
0x3FF,
0x7FF,
0xFFF,
0x1FFF,
0x3FFF,
0x7FFF,
0xFFFF,
0x1FFFF,
0x3FFFF,
0x7FFFF,
0xFFFFF,
0x1FFFFF,
0x3FFFFF,
0x7FFFFF,
0xFFFFFF,
0x1FFFFFF,
0x3FFFFFF,
0x7FFFFFF,
0xFFFFFFF,
0x1FFFFFFF,
0x3FFFFFFF,
0x7FFFFFFF,
};
private static uint* BIT_mask =>
(uint*)
System.Runtime.CompilerServices.Unsafe.AsPointer(
ref MemoryMarshal.GetReference(Span_BIT_mask)
);
#else
private static readonly uint* BIT_mask = GetArrayPointer(
new uint[32]
{
0,
1,
3,
7,
0xF,
0x1F,
0x3F,
0x7F,
0xFF,
0x1FF,
0x3FF,
0x7FF,
0xFFF,
0x1FFF,
0x3FFF,
0x7FFF,
0xFFFF,
0x1FFFF,
0x3FFFF,
0x7FFFF,
0xFFFFF,
0x1FFFFF,
0x3FFFFF,
0x7FFFFF,
0xFFFFFF,
0x1FFFFFF,
0x3FFFFFF,
0x7FFFFFF,
0xFFFFFFF,
0x1FFFFFFF,
0x3FFFFFFF,
0x7FFFFFFF,
}
);
#endif
/*-**************************************************************
* bitStream encoding
****************************************************************/
/*! BIT_initCStream() :
* `dstCapacity` must be > sizeof(size_t)
* @return : 0 if success,
* otherwise an error code (can be tested using ERR_isError()) */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_initCStream(ref BIT_CStream_t bitC, void* startPtr, nuint dstCapacity)
{
bitC.bitContainer = 0;
bitC.bitPos = 0;
bitC.startPtr = (sbyte*)startPtr;
bitC.ptr = bitC.startPtr;
bitC.endPtr = bitC.startPtr + dstCapacity - sizeof(nuint);
if (dstCapacity <= (nuint)sizeof(nuint))
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
return 0;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_getLowerBits(nuint bitContainer, uint nbBits)
{
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
#if NETCOREAPP3_1_OR_GREATER
if (Bmi2.X64.IsSupported)
{
return (nuint)Bmi2.X64.ZeroHighBits(bitContainer, nbBits);
}
if (Bmi2.IsSupported)
{
return Bmi2.ZeroHighBits((uint)bitContainer, nbBits);
}
#endif
return bitContainer & BIT_mask[nbBits];
}
/*! BIT_addBits() :
* can add up to 31 bits into `bitC`.
* Note : does not check for register overflow ! */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void BIT_addBits(
ref nuint bitC_bitContainer,
ref uint bitC_bitPos,
nuint value,
uint nbBits
)
{
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8));
bitC_bitContainer |= BIT_getLowerBits(value, nbBits) << (int)bitC_bitPos;
bitC_bitPos += nbBits;
}
/*! BIT_addBitsFast() :
* works only if `value` is _clean_,
* meaning all high bits above nbBits are 0 */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void BIT_addBitsFast(
ref nuint bitC_bitContainer,
ref uint bitC_bitPos,
nuint value,
uint nbBits
)
{
assert(value >> (int)nbBits == 0);
assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8));
bitC_bitContainer |= value << (int)bitC_bitPos;
bitC_bitPos += nbBits;
}
/*! BIT_flushBitsFast() :
* assumption : bitContainer has not overflowed
* unsafe version; does not check buffer overflow */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void BIT_flushBitsFast(
ref nuint bitC_bitContainer,
ref uint bitC_bitPos,
ref sbyte* bitC_ptr,
sbyte* bitC_endPtr
)
{
nuint nbBytes = bitC_bitPos >> 3;
assert(bitC_bitPos < (uint)(sizeof(nuint) * 8));
assert(bitC_ptr <= bitC_endPtr);
MEM_writeLEST(bitC_ptr, bitC_bitContainer);
bitC_ptr += nbBytes;
bitC_bitPos &= 7;
bitC_bitContainer >>= (int)(nbBytes * 8);
}
/*! BIT_flushBits() :
* assumption : bitContainer has not overflowed
* safe version; check for buffer overflow, and prevents it.
* note : does not signal buffer overflow.
* overflow will be revealed later on using BIT_closeCStream() */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void BIT_flushBits(
ref nuint bitC_bitContainer,
ref uint bitC_bitPos,
ref sbyte* bitC_ptr,
sbyte* bitC_endPtr
)
{
nuint nbBytes = bitC_bitPos >> 3;
assert(bitC_bitPos < (uint)(sizeof(nuint) * 8));
assert(bitC_ptr <= bitC_endPtr);
MEM_writeLEST(bitC_ptr, bitC_bitContainer);
bitC_ptr += nbBytes;
if (bitC_ptr > bitC_endPtr)
bitC_ptr = bitC_endPtr;
bitC_bitPos &= 7;
bitC_bitContainer >>= (int)(nbBytes * 8);
}
/*! BIT_closeCStream() :
* @return : size of CStream, in bytes,
* or 0 if it could not fit into dstBuffer */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_closeCStream(
ref nuint bitC_bitContainer,
ref uint bitC_bitPos,
sbyte* bitC_ptr,
sbyte* bitC_endPtr,
sbyte* bitC_startPtr
)
{
BIT_addBitsFast(ref bitC_bitContainer, ref bitC_bitPos, 1, 1);
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
if (bitC_ptr >= bitC_endPtr)
return 0;
return (nuint)(bitC_ptr - bitC_startPtr) + (nuint)(bitC_bitPos > 0 ? 1 : 0);
}
/*-********************************************************
* bitStream decoding
**********************************************************/
/*! BIT_initDStream() :
* Initialize a BIT_DStream_t.
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
* `srcSize` must be the *exact* size of the bitStream, in bytes.
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint srcSize)
{
if (srcSize < 1)
{
*bitD = new BIT_DStream_t();
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
}
bitD->start = (sbyte*)srcBuffer;
bitD->limitPtr = bitD->start + sizeof(nuint);
if (srcSize >= (nuint)sizeof(nuint))
{
bitD->ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
{
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
if (lastByte == 0)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
}
}
else
{
bitD->ptr = bitD->start;
bitD->bitContainer = *(byte*)bitD->start;
switch (srcSize)
{
case 7:
bitD->bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16;
goto case 6;
case 6:
bitD->bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24;
goto case 5;
case 5:
bitD->bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32;
goto case 4;
case 4:
bitD->bitContainer += (nuint)((byte*)srcBuffer)[3] << 24;
goto case 3;
case 3:
bitD->bitContainer += (nuint)((byte*)srcBuffer)[2] << 16;
goto case 2;
case 2:
bitD->bitContainer += (nuint)((byte*)srcBuffer)[1] << 8;
goto default;
default:
break;
}
{
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
if (lastByte == 0)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
}
bitD->bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8;
}
return srcSize;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_getUpperBits(nuint bitContainer, uint start)
{
return bitContainer >> (int)start;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_getMiddleBits(nuint bitContainer, uint start, uint nbBits)
{
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
assert(nbBits < sizeof(uint) * 32 / sizeof(uint));
#if NETCOREAPP3_1_OR_GREATER
if (Bmi2.X64.IsSupported)
{
return (nuint)Bmi2.X64.ZeroHighBits(bitContainer >> (int)(start & regMask), nbBits);
}
if (Bmi2.IsSupported)
{
return Bmi2.ZeroHighBits((uint)(bitContainer >> (int)(start & regMask)), nbBits);
}
#endif
return (nuint)(bitContainer >> (int)(start & regMask) & ((ulong)1 << (int)nbBits) - 1);
}
/*! BIT_lookBits() :
* Provides next n bits from local register.
* local register is not modified.
* On 32-bits, maxNbBits==24.
* On 64-bits, maxNbBits==56.
* @return : value extracted */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_lookBits(BIT_DStream_t* bitD, uint nbBits)
{
return BIT_getMiddleBits(
bitD->bitContainer,
(uint)(sizeof(nuint) * 8) - bitD->bitsConsumed - nbBits,
nbBits
);
}
/*! BIT_lookBitsFast() :
* unsafe version; only works if nbBits >= 1 */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_lookBitsFast(BIT_DStream_t* bitD, uint nbBits)
{
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
assert(nbBits >= 1);
return bitD->bitContainer
<< (int)(bitD->bitsConsumed & regMask)
>> (int)(regMask + 1 - nbBits & regMask);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void BIT_skipBits(BIT_DStream_t* bitD, uint nbBits)
{
bitD->bitsConsumed += nbBits;
}
/*! BIT_readBits() :
* Read (consume) next n bits from local register and update.
* Pay attention to not read more than nbBits contained into local register.
* @return : extracted value. */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_readBits(BIT_DStream_t* bitD, uint nbBits)
{
nuint value = BIT_lookBits(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
/*! BIT_readBitsFast() :
* unsafe version; only works if nbBits >= 1 */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_readBitsFast(BIT_DStream_t* bitD, uint nbBits)
{
nuint value = BIT_lookBitsFast(bitD, nbBits);
assert(nbBits >= 1);
BIT_skipBits(bitD, nbBits);
return value;
}
/*! BIT_reloadDStream_internal() :
* Simple variant of BIT_reloadDStream(), with two conditions:
* 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
* 2. look window is valid after shifted down : bitD->ptr >= bitD->start
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD)
{
assert(bitD->bitsConsumed <= (uint)(sizeof(nuint) * 8));
bitD->ptr -= bitD->bitsConsumed >> 3;
assert(bitD->ptr >= bitD->start);
bitD->bitsConsumed &= 7;
bitD->bitContainer = MEM_readLEST(bitD->ptr);
return BIT_DStream_status.BIT_DStream_unfinished;
}
/*! BIT_reloadDStreamFast() :
* Similar to BIT_reloadDStream(), but with two differences:
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
* point you must use BIT_reloadDStream() to reload.
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
{
if (bitD->ptr < bitD->limitPtr)
return BIT_DStream_status.BIT_DStream_overflow;
return BIT_reloadDStream_internal(bitD);
}
#if NET7_0_OR_GREATER
private static ReadOnlySpan<byte> Span_static_zeroFilled =>
new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 };
private static nuint* static_zeroFilled =>
(nuint*)
System.Runtime.CompilerServices.Unsafe.AsPointer(
ref MemoryMarshal.GetReference(Span_static_zeroFilled)
);
#else
private static readonly nuint* static_zeroFilled = (nuint*)GetArrayPointer(
new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }
);
#endif
/*! BIT_reloadDStream() :
* Refill `bitD` from buffer previously set in BIT_initDStream() .
* This function is safe, it guarantees it will not never beyond src buffer.
* @return : status of `BIT_DStream_t` internal register.
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
if (bitD->bitsConsumed > (uint)(sizeof(nuint) * 8))
{
bitD->ptr = (sbyte*)&static_zeroFilled[0];
return BIT_DStream_status.BIT_DStream_overflow;
}
assert(bitD->ptr >= bitD->start);
if (bitD->ptr >= bitD->limitPtr)
{
return BIT_reloadDStream_internal(bitD);
}
if (bitD->ptr == bitD->start)
{
if (bitD->bitsConsumed < (uint)(sizeof(nuint) * 8))
return BIT_DStream_status.BIT_DStream_endOfBuffer;
return BIT_DStream_status.BIT_DStream_completed;
}
{
uint nbBytes = bitD->bitsConsumed >> 3;
BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start)
{
nbBytes = (uint)(bitD->ptr - bitD->start);
result = BIT_DStream_status.BIT_DStream_endOfBuffer;
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes * 8;
bitD->bitContainer = MEM_readLEST(bitD->ptr);
return result;
}
}
/*! BIT_endOfDStream() :
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint BIT_endOfDStream(BIT_DStream_t* DStream)
{
return DStream->ptr == DStream->start && DStream->bitsConsumed == (uint)(sizeof(nuint) * 8)
? 1U
: 0U;
}
/*-********************************************************
* bitStream decoding
**********************************************************/
/*! BIT_initDStream() :
* Initialize a BIT_DStream_t.
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
* `srcSize` must be the *exact* size of the bitStream, in bytes.
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_initDStream(ref BIT_DStream_t bitD, void* srcBuffer, nuint srcSize)
{
if (srcSize < 1)
{
bitD = new BIT_DStream_t();
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
}
bitD.start = (sbyte*)srcBuffer;
bitD.limitPtr = bitD.start + sizeof(nuint);
if (srcSize >= (nuint)sizeof(nuint))
{
bitD.ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint);
bitD.bitContainer = MEM_readLEST(bitD.ptr);
{
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
if (lastByte == 0)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
}
}
else
{
bitD.ptr = bitD.start;
bitD.bitContainer = *(byte*)bitD.start;
switch (srcSize)
{
case 7:
bitD.bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16;
goto case 6;
case 6:
bitD.bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24;
goto case 5;
case 5:
bitD.bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32;
goto case 4;
case 4:
bitD.bitContainer += (nuint)((byte*)srcBuffer)[3] << 24;
goto case 3;
case 3:
bitD.bitContainer += (nuint)((byte*)srcBuffer)[2] << 16;
goto case 2;
case 2:
bitD.bitContainer += (nuint)((byte*)srcBuffer)[1] << 8;
goto default;
default:
break;
}
{
byte lastByte = ((byte*)srcBuffer)[srcSize - 1];
bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0;
if (lastByte == 0)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
}
bitD.bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8;
}
return srcSize;
}
/*! BIT_lookBits() :
* Provides next n bits from local register.
* local register is not modified.
* On 32-bits, maxNbBits==24.
* On 64-bits, maxNbBits==56.
* @return : value extracted */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_lookBits(nuint bitD_bitContainer, uint bitD_bitsConsumed, uint nbBits)
{
return BIT_getMiddleBits(
bitD_bitContainer,
(uint)(sizeof(nuint) * 8) - bitD_bitsConsumed - nbBits,
nbBits
);
}
/*! BIT_lookBitsFast() :
* unsafe version; only works if nbBits >= 1 */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_lookBitsFast(
nuint bitD_bitContainer,
uint bitD_bitsConsumed,
uint nbBits
)
{
uint regMask = (uint)(sizeof(nuint) * 8 - 1);
assert(nbBits >= 1);
return bitD_bitContainer
<< (int)(bitD_bitsConsumed & regMask)
>> (int)(regMask + 1 - nbBits & regMask);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void BIT_skipBits(ref uint bitD_bitsConsumed, uint nbBits)
{
bitD_bitsConsumed += nbBits;
}
/*! BIT_readBits() :
* Read (consume) next n bits from local register and update.
* Pay attention to not read more than nbBits contained into local register.
* @return : extracted value. */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_readBits(
nuint bitD_bitContainer,
ref uint bitD_bitsConsumed,
uint nbBits
)
{
nuint value = BIT_lookBits(bitD_bitContainer, bitD_bitsConsumed, nbBits);
BIT_skipBits(ref bitD_bitsConsumed, nbBits);
return value;
}
/*! BIT_readBitsFast() :
* unsafe version; only works if nbBits >= 1 */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint BIT_readBitsFast(
nuint bitD_bitContainer,
ref uint bitD_bitsConsumed,
uint nbBits
)
{
nuint value = BIT_lookBitsFast(bitD_bitContainer, bitD_bitsConsumed, nbBits);
assert(nbBits >= 1);
BIT_skipBits(ref bitD_bitsConsumed, nbBits);
return value;
}
/*! BIT_reloadDStreamFast() :
* Similar to BIT_reloadDStream(), but with two differences:
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
* point you must use BIT_reloadDStream() to reload.
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static BIT_DStream_status BIT_reloadDStreamFast(
ref nuint bitD_bitContainer,
ref uint bitD_bitsConsumed,
ref sbyte* bitD_ptr,
sbyte* bitD_start,
sbyte* bitD_limitPtr
)
{
if (bitD_ptr < bitD_limitPtr)
return BIT_DStream_status.BIT_DStream_overflow;
return BIT_reloadDStream_internal(
ref bitD_bitContainer,
ref bitD_bitsConsumed,
ref bitD_ptr,
bitD_start
);
}
/*! BIT_reloadDStream() :
* Refill `bitD` from buffer previously set in BIT_initDStream() .
* This function is safe, it guarantees it will not never beyond src buffer.
* @return : status of `BIT_DStream_t` internal register.
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static BIT_DStream_status BIT_reloadDStream(
ref nuint bitD_bitContainer,
ref uint bitD_bitsConsumed,
ref sbyte* bitD_ptr,
sbyte* bitD_start,
sbyte* bitD_limitPtr
)
{
if (bitD_bitsConsumed > (uint)(sizeof(nuint) * 8))
{
bitD_ptr = (sbyte*)&static_zeroFilled[0];
return BIT_DStream_status.BIT_DStream_overflow;
}
assert(bitD_ptr >= bitD_start);
if (bitD_ptr >= bitD_limitPtr)
{
return BIT_reloadDStream_internal(
ref bitD_bitContainer,
ref bitD_bitsConsumed,
ref bitD_ptr,
bitD_start
);
}
if (bitD_ptr == bitD_start)
{
if (bitD_bitsConsumed < (uint)(sizeof(nuint) * 8))
return BIT_DStream_status.BIT_DStream_endOfBuffer;
return BIT_DStream_status.BIT_DStream_completed;
}
{
uint nbBytes = bitD_bitsConsumed >> 3;
BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished;
if (bitD_ptr - nbBytes < bitD_start)
{
nbBytes = (uint)(bitD_ptr - bitD_start);
result = BIT_DStream_status.BIT_DStream_endOfBuffer;
}
bitD_ptr -= nbBytes;
bitD_bitsConsumed -= nbBytes * 8;
bitD_bitContainer = MEM_readLEST(bitD_ptr);
return result;
}
}
/*! BIT_reloadDStream_internal() :
* Simple variant of BIT_reloadDStream(), with two conditions:
* 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
* 2. look window is valid after shifted down : bitD->ptr >= bitD->start
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static BIT_DStream_status BIT_reloadDStream_internal(
ref nuint bitD_bitContainer,
ref uint bitD_bitsConsumed,
ref sbyte* bitD_ptr,
sbyte* bitD_start
)
{
assert(bitD_bitsConsumed <= (uint)(sizeof(nuint) * 8));
bitD_ptr -= bitD_bitsConsumed >> 3;
assert(bitD_ptr >= bitD_start);
bitD_bitsConsumed &= 7;
bitD_bitContainer = MEM_readLEST(bitD_ptr);
return BIT_DStream_status.BIT_DStream_unfinished;
}
/*! BIT_endOfDStream() :
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint BIT_endOfDStream(
uint DStream_bitsConsumed,
sbyte* DStream_ptr,
sbyte* DStream_start
)
{
return DStream_ptr == DStream_start && DStream_bitsConsumed == (uint)(sizeof(nuint) * 8)
? 1U
: 0U;
}
}

View File

@@ -0,0 +1,8 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public struct BlockSummary
{
public nuint nbSequences;
public nuint blockSize;
public nuint litSize;
}

View File

@@ -0,0 +1,20 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/**
* COVER_best_t is used for two purposes:
* 1. Synchronizing threads.
* 2. Saving the best parameters and dictionary.
*
* All of the methods except COVER_best_init() are thread safe if zstd is
* compiled with multithreaded support.
*/
public unsafe struct COVER_best_s
{
public void* mutex;
public void* cond;
public nuint liveJobs;
public void* dict;
public nuint dictSize;
public ZDICT_cover_params_t parameters;
public nuint compressedSize;
}

View File

@@ -0,0 +1,19 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/*-*************************************
* Context
***************************************/
public unsafe struct COVER_ctx_t
{
public byte* samples;
public nuint* offsets;
public nuint* samplesSizes;
public nuint nbSamples;
public nuint nbTrainSamples;
public nuint nbTestSamples;
public uint* suffix;
public nuint suffixSize;
public uint* freqs;
public uint* dmerAt;
public uint d;
}

View File

@@ -0,0 +1,11 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/**
* Struct used for the dictionary selection function.
*/
public unsafe struct COVER_dictSelection
{
public byte* dictContent;
public nuint dictSize;
public nuint totalCompressedSize;
}

View File

@@ -0,0 +1,10 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/**
*Number of epochs and size of each epoch.
*/
public struct COVER_epoch_info_t
{
public uint num;
public uint size;
}

View File

@@ -0,0 +1,7 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public struct COVER_map_pair_t_s
{
public uint key;
public uint value;
}

View File

@@ -0,0 +1,9 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct COVER_map_s
{
public COVER_map_pair_t_s* data;
public uint sizeLog;
public uint size;
public uint sizeMask;
}

View File

@@ -0,0 +1,11 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/**
* A segment is a range in the source as well as the score of the segment.
*/
public struct COVER_segment_t
{
public uint begin;
public uint end;
public uint score;
}

View File

@@ -0,0 +1,12 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/**
* Parameters for COVER_tryParameters().
*/
public unsafe struct COVER_tryParameters_data_s
{
public COVER_ctx_t* ctx;
public COVER_best_s* best;
public nuint dictBufferCapacity;
public ZDICT_cover_params_t parameters;
}

View File

@@ -0,0 +1,849 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
private static readonly ZSTD_compressionParameters[][] ZSTD_defaultCParameters =
new ZSTD_compressionParameters[4][]
{
new ZSTD_compressionParameters[23]
{
new ZSTD_compressionParameters(
windowLog: 19,
chainLog: 12,
hashLog: 13,
searchLog: 1,
minMatch: 6,
targetLength: 1,
strategy: ZSTD_strategy.ZSTD_fast
),
new ZSTD_compressionParameters(
windowLog: 19,
chainLog: 13,
hashLog: 14,
searchLog: 1,
minMatch: 7,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_fast
),
new ZSTD_compressionParameters(
windowLog: 20,
chainLog: 15,
hashLog: 16,
searchLog: 1,
minMatch: 6,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_fast
),
new ZSTD_compressionParameters(
windowLog: 21,
chainLog: 16,
hashLog: 17,
searchLog: 1,
minMatch: 5,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_dfast
),
new ZSTD_compressionParameters(
windowLog: 21,
chainLog: 18,
hashLog: 18,
searchLog: 1,
minMatch: 5,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_dfast
),
new ZSTD_compressionParameters(
windowLog: 21,
chainLog: 18,
hashLog: 19,
searchLog: 3,
minMatch: 5,
targetLength: 2,
strategy: ZSTD_strategy.ZSTD_greedy
),
new ZSTD_compressionParameters(
windowLog: 21,
chainLog: 18,
hashLog: 19,
searchLog: 3,
minMatch: 5,
targetLength: 4,
strategy: ZSTD_strategy.ZSTD_lazy
),
new ZSTD_compressionParameters(
windowLog: 21,
chainLog: 19,
hashLog: 20,
searchLog: 4,
minMatch: 5,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_lazy
),
new ZSTD_compressionParameters(
windowLog: 21,
chainLog: 19,
hashLog: 20,
searchLog: 4,
minMatch: 5,
targetLength: 16,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 22,
chainLog: 20,
hashLog: 21,
searchLog: 4,
minMatch: 5,
targetLength: 16,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 22,
chainLog: 21,
hashLog: 22,
searchLog: 5,
minMatch: 5,
targetLength: 16,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 22,
chainLog: 21,
hashLog: 22,
searchLog: 6,
minMatch: 5,
targetLength: 16,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 22,
chainLog: 22,
hashLog: 23,
searchLog: 6,
minMatch: 5,
targetLength: 32,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 22,
chainLog: 22,
hashLog: 22,
searchLog: 4,
minMatch: 5,
targetLength: 32,
strategy: ZSTD_strategy.ZSTD_btlazy2
),
new ZSTD_compressionParameters(
windowLog: 22,
chainLog: 22,
hashLog: 23,
searchLog: 5,
minMatch: 5,
targetLength: 32,
strategy: ZSTD_strategy.ZSTD_btlazy2
),
new ZSTD_compressionParameters(
windowLog: 22,
chainLog: 23,
hashLog: 23,
searchLog: 6,
minMatch: 5,
targetLength: 32,
strategy: ZSTD_strategy.ZSTD_btlazy2
),
new ZSTD_compressionParameters(
windowLog: 22,
chainLog: 22,
hashLog: 22,
searchLog: 5,
minMatch: 5,
targetLength: 48,
strategy: ZSTD_strategy.ZSTD_btopt
),
new ZSTD_compressionParameters(
windowLog: 23,
chainLog: 23,
hashLog: 22,
searchLog: 5,
minMatch: 4,
targetLength: 64,
strategy: ZSTD_strategy.ZSTD_btopt
),
new ZSTD_compressionParameters(
windowLog: 23,
chainLog: 23,
hashLog: 22,
searchLog: 6,
minMatch: 3,
targetLength: 64,
strategy: ZSTD_strategy.ZSTD_btultra
),
new ZSTD_compressionParameters(
windowLog: 23,
chainLog: 24,
hashLog: 22,
searchLog: 7,
minMatch: 3,
targetLength: 256,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 25,
chainLog: 25,
hashLog: 23,
searchLog: 7,
minMatch: 3,
targetLength: 256,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 26,
chainLog: 26,
hashLog: 24,
searchLog: 7,
minMatch: 3,
targetLength: 512,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 27,
chainLog: 27,
hashLog: 25,
searchLog: 9,
minMatch: 3,
targetLength: 999,
strategy: ZSTD_strategy.ZSTD_btultra2
),
},
new ZSTD_compressionParameters[23]
{
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 12,
hashLog: 13,
searchLog: 1,
minMatch: 5,
targetLength: 1,
strategy: ZSTD_strategy.ZSTD_fast
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 13,
hashLog: 14,
searchLog: 1,
minMatch: 6,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_fast
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 14,
hashLog: 14,
searchLog: 1,
minMatch: 5,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_dfast
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 16,
hashLog: 16,
searchLog: 1,
minMatch: 4,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_dfast
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 16,
hashLog: 17,
searchLog: 3,
minMatch: 5,
targetLength: 2,
strategy: ZSTD_strategy.ZSTD_greedy
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 17,
hashLog: 18,
searchLog: 5,
minMatch: 5,
targetLength: 2,
strategy: ZSTD_strategy.ZSTD_greedy
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 18,
hashLog: 19,
searchLog: 3,
minMatch: 5,
targetLength: 4,
strategy: ZSTD_strategy.ZSTD_lazy
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 18,
hashLog: 19,
searchLog: 4,
minMatch: 4,
targetLength: 4,
strategy: ZSTD_strategy.ZSTD_lazy
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 18,
hashLog: 19,
searchLog: 4,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 18,
hashLog: 19,
searchLog: 5,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 18,
hashLog: 19,
searchLog: 6,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 18,
hashLog: 19,
searchLog: 5,
minMatch: 4,
targetLength: 12,
strategy: ZSTD_strategy.ZSTD_btlazy2
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 19,
hashLog: 19,
searchLog: 7,
minMatch: 4,
targetLength: 12,
strategy: ZSTD_strategy.ZSTD_btlazy2
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 18,
hashLog: 19,
searchLog: 4,
minMatch: 4,
targetLength: 16,
strategy: ZSTD_strategy.ZSTD_btopt
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 18,
hashLog: 19,
searchLog: 4,
minMatch: 3,
targetLength: 32,
strategy: ZSTD_strategy.ZSTD_btopt
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 18,
hashLog: 19,
searchLog: 6,
minMatch: 3,
targetLength: 128,
strategy: ZSTD_strategy.ZSTD_btopt
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 19,
hashLog: 19,
searchLog: 6,
minMatch: 3,
targetLength: 128,
strategy: ZSTD_strategy.ZSTD_btultra
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 19,
hashLog: 19,
searchLog: 8,
minMatch: 3,
targetLength: 256,
strategy: ZSTD_strategy.ZSTD_btultra
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 19,
hashLog: 19,
searchLog: 6,
minMatch: 3,
targetLength: 128,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 19,
hashLog: 19,
searchLog: 8,
minMatch: 3,
targetLength: 256,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 19,
hashLog: 19,
searchLog: 10,
minMatch: 3,
targetLength: 512,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 19,
hashLog: 19,
searchLog: 12,
minMatch: 3,
targetLength: 512,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 18,
chainLog: 19,
hashLog: 19,
searchLog: 13,
minMatch: 3,
targetLength: 999,
strategy: ZSTD_strategy.ZSTD_btultra2
),
},
new ZSTD_compressionParameters[23]
{
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 12,
hashLog: 12,
searchLog: 1,
minMatch: 5,
targetLength: 1,
strategy: ZSTD_strategy.ZSTD_fast
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 12,
hashLog: 13,
searchLog: 1,
minMatch: 6,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_fast
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 13,
hashLog: 15,
searchLog: 1,
minMatch: 5,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_fast
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 15,
hashLog: 16,
searchLog: 2,
minMatch: 5,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_dfast
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 17,
hashLog: 17,
searchLog: 2,
minMatch: 4,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_dfast
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 16,
hashLog: 17,
searchLog: 3,
minMatch: 4,
targetLength: 2,
strategy: ZSTD_strategy.ZSTD_greedy
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 16,
hashLog: 17,
searchLog: 3,
minMatch: 4,
targetLength: 4,
strategy: ZSTD_strategy.ZSTD_lazy
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 16,
hashLog: 17,
searchLog: 3,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 16,
hashLog: 17,
searchLog: 4,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 16,
hashLog: 17,
searchLog: 5,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 16,
hashLog: 17,
searchLog: 6,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 17,
hashLog: 17,
searchLog: 5,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_btlazy2
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 18,
hashLog: 17,
searchLog: 7,
minMatch: 4,
targetLength: 12,
strategy: ZSTD_strategy.ZSTD_btlazy2
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 18,
hashLog: 17,
searchLog: 3,
minMatch: 4,
targetLength: 12,
strategy: ZSTD_strategy.ZSTD_btopt
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 18,
hashLog: 17,
searchLog: 4,
minMatch: 3,
targetLength: 32,
strategy: ZSTD_strategy.ZSTD_btopt
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 18,
hashLog: 17,
searchLog: 6,
minMatch: 3,
targetLength: 256,
strategy: ZSTD_strategy.ZSTD_btopt
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 18,
hashLog: 17,
searchLog: 6,
minMatch: 3,
targetLength: 128,
strategy: ZSTD_strategy.ZSTD_btultra
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 18,
hashLog: 17,
searchLog: 8,
minMatch: 3,
targetLength: 256,
strategy: ZSTD_strategy.ZSTD_btultra
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 18,
hashLog: 17,
searchLog: 10,
minMatch: 3,
targetLength: 512,
strategy: ZSTD_strategy.ZSTD_btultra
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 18,
hashLog: 17,
searchLog: 5,
minMatch: 3,
targetLength: 256,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 18,
hashLog: 17,
searchLog: 7,
minMatch: 3,
targetLength: 512,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 18,
hashLog: 17,
searchLog: 9,
minMatch: 3,
targetLength: 512,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 17,
chainLog: 18,
hashLog: 17,
searchLog: 11,
minMatch: 3,
targetLength: 999,
strategy: ZSTD_strategy.ZSTD_btultra2
),
},
new ZSTD_compressionParameters[23]
{
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 12,
hashLog: 13,
searchLog: 1,
minMatch: 5,
targetLength: 1,
strategy: ZSTD_strategy.ZSTD_fast
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 14,
hashLog: 15,
searchLog: 1,
minMatch: 5,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_fast
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 14,
hashLog: 15,
searchLog: 1,
minMatch: 4,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_fast
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 14,
hashLog: 15,
searchLog: 2,
minMatch: 4,
targetLength: 0,
strategy: ZSTD_strategy.ZSTD_dfast
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 14,
hashLog: 14,
searchLog: 4,
minMatch: 4,
targetLength: 2,
strategy: ZSTD_strategy.ZSTD_greedy
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 14,
hashLog: 14,
searchLog: 3,
minMatch: 4,
targetLength: 4,
strategy: ZSTD_strategy.ZSTD_lazy
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 14,
hashLog: 14,
searchLog: 4,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 14,
hashLog: 14,
searchLog: 6,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 14,
hashLog: 14,
searchLog: 8,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_lazy2
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 14,
searchLog: 5,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_btlazy2
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 14,
searchLog: 9,
minMatch: 4,
targetLength: 8,
strategy: ZSTD_strategy.ZSTD_btlazy2
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 14,
searchLog: 3,
minMatch: 4,
targetLength: 12,
strategy: ZSTD_strategy.ZSTD_btopt
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 14,
searchLog: 4,
minMatch: 3,
targetLength: 24,
strategy: ZSTD_strategy.ZSTD_btopt
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 14,
searchLog: 5,
minMatch: 3,
targetLength: 32,
strategy: ZSTD_strategy.ZSTD_btultra
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 15,
searchLog: 6,
minMatch: 3,
targetLength: 64,
strategy: ZSTD_strategy.ZSTD_btultra
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 15,
searchLog: 7,
minMatch: 3,
targetLength: 256,
strategy: ZSTD_strategy.ZSTD_btultra
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 15,
searchLog: 5,
minMatch: 3,
targetLength: 48,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 15,
searchLog: 6,
minMatch: 3,
targetLength: 128,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 15,
searchLog: 7,
minMatch: 3,
targetLength: 256,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 15,
searchLog: 8,
minMatch: 3,
targetLength: 256,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 15,
searchLog: 8,
minMatch: 3,
targetLength: 512,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 15,
searchLog: 9,
minMatch: 3,
targetLength: 512,
strategy: ZSTD_strategy.ZSTD_btultra2
),
new ZSTD_compressionParameters(
windowLog: 14,
chainLog: 15,
hashLog: 15,
searchLog: 10,
minMatch: 3,
targetLength: 999,
strategy: ZSTD_strategy.ZSTD_btultra2
),
},
};
}

View File

@@ -0,0 +1,61 @@
using System.Runtime.CompilerServices;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
/* @return 1 if @u is a 2^n value, 0 otherwise
* useful to check a value is valid for alignment restrictions */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static int ZSTD_isPower2(nuint u)
{
return (u & u - 1) == 0 ? 1 : 0;
}
/**
* Helper function to perform a wrapped pointer difference without triggering
* UBSAN.
*
* @returns lhs - rhs with wrapping
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nint ZSTD_wrappedPtrDiff(byte* lhs, byte* rhs)
{
return (nint)(lhs - rhs);
}
/**
* Helper function to perform a wrapped pointer add without triggering UBSAN.
*
* @return ptr + add with wrapping
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static byte* ZSTD_wrappedPtrAdd(byte* ptr, nint add)
{
return ptr + add;
}
/**
* Helper function to perform a wrapped pointer subtraction without triggering
* UBSAN.
*
* @return ptr - sub with wrapping
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static byte* ZSTD_wrappedPtrSub(byte* ptr, nint sub)
{
return ptr - sub;
}
/**
* Helper function to add to a pointer that works around C's undefined behavior
* of adding 0 to NULL.
*
* @returns `ptr + add` except it defines `NULL + 0 == NULL`.
*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static byte* ZSTD_maybeNullPtrAdd(byte* ptr, nint add)
{
return add > 0 ? ptr + add : ptr;
}
}

View File

@@ -0,0 +1,444 @@
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
private static int g_displayLevel = 0;
/**
* Returns the sum of the sample sizes.
*/
private static nuint COVER_sum(nuint* samplesSizes, uint nbSamples)
{
nuint sum = 0;
uint i;
for (i = 0; i < nbSamples; ++i)
{
sum += samplesSizes[i];
}
return sum;
}
/**
* Warns the user when their corpus is too small.
*/
private static void COVER_warnOnSmallCorpus(nuint maxDictSize, nuint nbDmers, int displayLevel)
{
double ratio = nbDmers / (double)maxDictSize;
if (ratio >= 10)
{
return;
}
}
/**
* Computes the number of epochs and the size of each epoch.
* We will make sure that each epoch gets at least 10 * k bytes.
*
* The COVER algorithms divide the data up into epochs of equal size and
* select one segment from each epoch.
*
* @param maxDictSize The maximum allowed dictionary size.
* @param nbDmers The number of dmers we are training on.
* @param k The parameter k (segment size).
* @param passes The target number of passes over the dmer corpus.
* More passes means a better dictionary.
*/
private static COVER_epoch_info_t COVER_computeEpochs(
uint maxDictSize,
uint nbDmers,
uint k,
uint passes
)
{
uint minEpochSize = k * 10;
COVER_epoch_info_t epochs;
epochs.num = 1 > maxDictSize / k / passes ? 1 : maxDictSize / k / passes;
epochs.size = nbDmers / epochs.num;
if (epochs.size >= minEpochSize)
{
assert(epochs.size * epochs.num <= nbDmers);
return epochs;
}
epochs.size = minEpochSize < nbDmers ? minEpochSize : nbDmers;
epochs.num = nbDmers / epochs.size;
assert(epochs.size * epochs.num <= nbDmers);
return epochs;
}
/**
* Checks total compressed size of a dictionary
*/
private static nuint COVER_checkTotalCompressedSize(
ZDICT_cover_params_t parameters,
nuint* samplesSizes,
byte* samples,
nuint* offsets,
nuint nbTrainSamples,
nuint nbSamples,
byte* dict,
nuint dictBufferCapacity
)
{
nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
/* Pointers */
ZSTD_CCtx_s* cctx;
ZSTD_CDict_s* cdict;
void* dst;
/* Local variables */
nuint dstCapacity;
nuint i;
{
nuint maxSampleSize = 0;
i = parameters.splitPoint < 1 ? nbTrainSamples : 0;
for (; i < nbSamples; ++i)
{
maxSampleSize = samplesSizes[i] > maxSampleSize ? samplesSizes[i] : maxSampleSize;
}
dstCapacity = ZSTD_compressBound(maxSampleSize);
dst = malloc(dstCapacity);
}
cctx = ZSTD_createCCtx();
cdict = ZSTD_createCDict(dict, dictBufferCapacity, parameters.zParams.compressionLevel);
if (dst == null || cctx == null || cdict == null)
{
goto _compressCleanup;
}
totalCompressedSize = dictBufferCapacity;
i = parameters.splitPoint < 1 ? nbTrainSamples : 0;
for (; i < nbSamples; ++i)
{
nuint size = ZSTD_compress_usingCDict(
cctx,
dst,
dstCapacity,
samples + offsets[i],
samplesSizes[i],
cdict
);
if (ERR_isError(size))
{
totalCompressedSize = size;
goto _compressCleanup;
}
totalCompressedSize += size;
}
_compressCleanup:
ZSTD_freeCCtx(cctx);
ZSTD_freeCDict(cdict);
if (dst != null)
{
free(dst);
}
return totalCompressedSize;
}
/**
* Initialize the `COVER_best_t`.
*/
private static void COVER_best_init(COVER_best_s* best)
{
if (best == null)
return;
SynchronizationWrapper.Init(&best->mutex);
best->liveJobs = 0;
best->dict = null;
best->dictSize = 0;
best->compressedSize = unchecked((nuint)(-1));
best->parameters = new ZDICT_cover_params_t();
}
/**
* Wait until liveJobs == 0.
*/
private static void COVER_best_wait(COVER_best_s* best)
{
if (best == null)
{
return;
}
SynchronizationWrapper.Enter(&best->mutex);
while (best->liveJobs != 0)
{
SynchronizationWrapper.Wait(&best->mutex);
}
SynchronizationWrapper.Exit(&best->mutex);
}
/**
* Call COVER_best_wait() and then destroy the COVER_best_t.
*/
private static void COVER_best_destroy(COVER_best_s* best)
{
if (best == null)
{
return;
}
COVER_best_wait(best);
if (best->dict != null)
{
free(best->dict);
}
SynchronizationWrapper.Free(&best->mutex);
}
/**
* Called when a thread is about to be launched.
* Increments liveJobs.
*/
private static void COVER_best_start(COVER_best_s* best)
{
if (best == null)
{
return;
}
SynchronizationWrapper.Enter(&best->mutex);
++best->liveJobs;
SynchronizationWrapper.Exit(&best->mutex);
}
/**
* Called when a thread finishes executing, both on error or success.
* Decrements liveJobs and signals any waiting threads if liveJobs == 0.
* If this dictionary is the best so far save it and its parameters.
*/
private static void COVER_best_finish(
COVER_best_s* best,
ZDICT_cover_params_t parameters,
COVER_dictSelection selection
)
{
void* dict = selection.dictContent;
nuint compressedSize = selection.totalCompressedSize;
nuint dictSize = selection.dictSize;
if (best == null)
{
return;
}
{
nuint liveJobs;
SynchronizationWrapper.Enter(&best->mutex);
--best->liveJobs;
liveJobs = best->liveJobs;
if (compressedSize < best->compressedSize)
{
if (best->dict == null || best->dictSize < dictSize)
{
if (best->dict != null)
{
free(best->dict);
}
best->dict = malloc(dictSize);
if (best->dict == null)
{
best->compressedSize = unchecked(
(nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)
);
best->dictSize = 0;
SynchronizationWrapper.Pulse(&best->mutex);
SynchronizationWrapper.Exit(&best->mutex);
return;
}
}
if (dict != null)
{
memcpy(best->dict, dict, (uint)dictSize);
best->dictSize = dictSize;
best->parameters = parameters;
best->compressedSize = compressedSize;
}
}
if (liveJobs == 0)
{
SynchronizationWrapper.PulseAll(&best->mutex);
}
SynchronizationWrapper.Exit(&best->mutex);
}
}
private static COVER_dictSelection setDictSelection(byte* buf, nuint s, nuint csz)
{
COVER_dictSelection ds;
ds.dictContent = buf;
ds.dictSize = s;
ds.totalCompressedSize = csz;
return ds;
}
/**
* Error function for COVER_selectDict function. Returns a struct where
* return.totalCompressedSize is a ZSTD error.
*/
private static COVER_dictSelection COVER_dictSelectionError(nuint error)
{
return setDictSelection(null, 0, error);
}
/**
* Error function for COVER_selectDict function. Checks if the return
* value is an error.
*/
private static uint COVER_dictSelectionIsError(COVER_dictSelection selection)
{
return ERR_isError(selection.totalCompressedSize) || selection.dictContent == null
? 1U
: 0U;
}
/**
* Always call after selectDict is called to free up used memory from
* newly created dictionary.
*/
private static void COVER_dictSelectionFree(COVER_dictSelection selection)
{
free(selection.dictContent);
}
/**
* Called to finalize the dictionary and select one based on whether or not
* the shrink-dict flag was enabled. If enabled the dictionary used is the
* smallest dictionary within a specified regression of the compressed size
* from the largest dictionary.
*/
private static COVER_dictSelection COVER_selectDict(
byte* customDictContent,
nuint dictBufferCapacity,
nuint dictContentSize,
byte* samplesBuffer,
nuint* samplesSizes,
uint nbFinalizeSamples,
nuint nbCheckSamples,
nuint nbSamples,
ZDICT_cover_params_t @params,
nuint* offsets,
nuint totalCompressedSize
)
{
nuint largestDict = 0;
nuint largestCompressed = 0;
byte* customDictContentEnd = customDictContent + dictContentSize;
byte* largestDictbuffer = (byte*)malloc(dictBufferCapacity);
byte* candidateDictBuffer = (byte*)malloc(dictBufferCapacity);
double regressionTolerance = (double)@params.shrinkDictMaxRegression / 100 + 1;
if (largestDictbuffer == null || candidateDictBuffer == null)
{
free(largestDictbuffer);
free(candidateDictBuffer);
return COVER_dictSelectionError(dictContentSize);
}
memcpy(largestDictbuffer, customDictContent, (uint)dictContentSize);
dictContentSize = ZDICT_finalizeDictionary(
largestDictbuffer,
dictBufferCapacity,
customDictContent,
dictContentSize,
samplesBuffer,
samplesSizes,
nbFinalizeSamples,
@params.zParams
);
if (ZDICT_isError(dictContentSize))
{
free(largestDictbuffer);
free(candidateDictBuffer);
return COVER_dictSelectionError(dictContentSize);
}
totalCompressedSize = COVER_checkTotalCompressedSize(
@params,
samplesSizes,
samplesBuffer,
offsets,
nbCheckSamples,
nbSamples,
largestDictbuffer,
dictContentSize
);
if (ERR_isError(totalCompressedSize))
{
free(largestDictbuffer);
free(candidateDictBuffer);
return COVER_dictSelectionError(totalCompressedSize);
}
if (@params.shrinkDict == 0)
{
free(candidateDictBuffer);
return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize);
}
largestDict = dictContentSize;
largestCompressed = totalCompressedSize;
dictContentSize = 256;
while (dictContentSize < largestDict)
{
memcpy(candidateDictBuffer, largestDictbuffer, (uint)largestDict);
dictContentSize = ZDICT_finalizeDictionary(
candidateDictBuffer,
dictBufferCapacity,
customDictContentEnd - dictContentSize,
dictContentSize,
samplesBuffer,
samplesSizes,
nbFinalizeSamples,
@params.zParams
);
if (ZDICT_isError(dictContentSize))
{
free(largestDictbuffer);
free(candidateDictBuffer);
return COVER_dictSelectionError(dictContentSize);
}
totalCompressedSize = COVER_checkTotalCompressedSize(
@params,
samplesSizes,
samplesBuffer,
offsets,
nbCheckSamples,
nbSamples,
candidateDictBuffer,
dictContentSize
);
if (ERR_isError(totalCompressedSize))
{
free(largestDictbuffer);
free(candidateDictBuffer);
return COVER_dictSelectionError(totalCompressedSize);
}
if (totalCompressedSize <= largestCompressed * regressionTolerance)
{
free(largestDictbuffer);
return setDictSelection(candidateDictBuffer, dictContentSize, totalCompressedSize);
}
dictContentSize *= 2;
}
dictContentSize = largestDict;
totalCompressedSize = largestCompressed;
free(candidateDictBuffer);
return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize);
}
}

View File

@@ -0,0 +1,12 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/*-***************************/
/* generic DTableDesc */
/*-***************************/
public struct DTableDesc
{
public byte maxTableLog;
public byte tableType;
public byte tableLog;
public byte reserved;
}

View File

@@ -0,0 +1,13 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct EStats_ress_t
{
/* dictionary */
public ZSTD_CDict_s* dict;
/* working context */
public ZSTD_CCtx_s* zc;
/* must be ZSTD_BLOCKSIZE_MAX allocated */
public void* workPlace;
}

View File

@@ -0,0 +1,447 @@
using System.Runtime.CompilerServices;
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
/*=== Version ===*/
private static uint FSE_versionNumber()
{
return 0 * 100 * 100 + 9 * 100 + 0;
}
/*=== Error Management ===*/
private static bool FSE_isError(nuint code)
{
return ERR_isError(code);
}
private static string FSE_getErrorName(nuint code)
{
return ERR_getErrorName(code);
}
/* Error Management */
private static bool HUF_isError(nuint code)
{
return ERR_isError(code);
}
private static string HUF_getErrorName(nuint code)
{
return ERR_getErrorName(code);
}
/*-**************************************************************
* FSE NCount encoding-decoding
****************************************************************/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint FSE_readNCount_body(
short* normalizedCounter,
uint* maxSVPtr,
uint* tableLogPtr,
void* headerBuffer,
nuint hbSize
)
{
byte* istart = (byte*)headerBuffer;
byte* iend = istart + hbSize;
byte* ip = istart;
int nbBits;
int remaining;
int threshold;
uint bitStream;
int bitCount;
uint charnum = 0;
uint maxSV1 = *maxSVPtr + 1;
int previous0 = 0;
if (hbSize < 8)
{
sbyte* buffer = stackalloc sbyte[8];
/* This function only works when hbSize >= 8 */
memset(buffer, 0, sizeof(sbyte) * 8);
memcpy(buffer, headerBuffer, (uint)hbSize);
{
nuint countSize = FSE_readNCount(
normalizedCounter,
maxSVPtr,
tableLogPtr,
buffer,
sizeof(sbyte) * 8
);
if (FSE_isError(countSize))
return countSize;
if (countSize > hbSize)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
return countSize;
}
}
assert(hbSize >= 8);
memset(normalizedCounter, 0, (*maxSVPtr + 1) * sizeof(short));
bitStream = MEM_readLE32(ip);
nbBits = (int)((bitStream & 0xF) + 5);
if (nbBits > 15)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
bitStream >>= 4;
bitCount = 4;
*tableLogPtr = (uint)nbBits;
remaining = (1 << nbBits) + 1;
threshold = 1 << nbBits;
nbBits++;
for (; ; )
{
if (previous0 != 0)
{
/* Count the number of repeats. Each time the
* 2-bit repeat code is 0b11 there is another
* repeat.
* Avoid UB by setting the high bit to 1.
*/
int repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1);
while (repeats >= 12)
{
charnum += 3 * 12;
if (ip <= iend - 7)
{
ip += 3;
}
else
{
bitCount -= (int)(8 * (iend - 7 - ip));
bitCount &= 31;
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> bitCount;
repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1);
}
charnum += (uint)(3 * repeats);
bitStream >>= 2 * repeats;
bitCount += 2 * repeats;
assert((bitStream & 3) < 3);
charnum += bitStream & 3;
bitCount += 2;
if (charnum >= maxSV1)
break;
if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4)
{
assert(bitCount >> 3 <= 3);
ip += bitCount >> 3;
bitCount &= 7;
}
else
{
bitCount -= (int)(8 * (iend - 4 - ip));
bitCount &= 31;
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> bitCount;
}
{
int max = 2 * threshold - 1 - remaining;
int count;
if ((bitStream & (uint)(threshold - 1)) < (uint)max)
{
count = (int)(bitStream & (uint)(threshold - 1));
bitCount += nbBits - 1;
}
else
{
count = (int)(bitStream & (uint)(2 * threshold - 1));
if (count >= threshold)
count -= max;
bitCount += nbBits;
}
count--;
if (count >= 0)
{
remaining -= count;
}
else
{
assert(count == -1);
remaining += count;
}
normalizedCounter[charnum++] = (short)count;
previous0 = count == 0 ? 1 : 0;
assert(threshold > 1);
if (remaining < threshold)
{
if (remaining <= 1)
break;
nbBits = (int)(ZSTD_highbit32((uint)remaining) + 1);
threshold = 1 << nbBits - 1;
}
if (charnum >= maxSV1)
break;
if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4)
{
ip += bitCount >> 3;
bitCount &= 7;
}
else
{
bitCount -= (int)(8 * (iend - 4 - ip));
bitCount &= 31;
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> bitCount;
}
}
if (remaining != 1)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
if (charnum > maxSV1)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall));
if (bitCount > 32)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
*maxSVPtr = charnum - 1;
ip += bitCount + 7 >> 3;
return (nuint)(ip - istart);
}
/* Avoids the FORCE_INLINE of the _body() function. */
private static nuint FSE_readNCount_body_default(
short* normalizedCounter,
uint* maxSVPtr,
uint* tableLogPtr,
void* headerBuffer,
nuint hbSize
)
{
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
}
/*! FSE_readNCount_bmi2():
* Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
*/
private static nuint FSE_readNCount_bmi2(
short* normalizedCounter,
uint* maxSVPtr,
uint* tableLogPtr,
void* headerBuffer,
nuint hbSize,
int bmi2
)
{
return FSE_readNCount_body_default(
normalizedCounter,
maxSVPtr,
tableLogPtr,
headerBuffer,
hbSize
);
}
/*! FSE_readNCount():
Read compactly saved 'normalizedCounter' from 'rBuffer'.
@return : size read from 'rBuffer',
or an errorCode, which can be tested using FSE_isError().
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
private static nuint FSE_readNCount(
short* normalizedCounter,
uint* maxSVPtr,
uint* tableLogPtr,
void* headerBuffer,
nuint hbSize
)
{
return FSE_readNCount_bmi2(
normalizedCounter,
maxSVPtr,
tableLogPtr,
headerBuffer,
hbSize,
0
);
}
/*! HUF_readStats() :
Read compact Huffman tree, saved by HUF_writeCTable().
`huffWeight` is destination buffer.
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
@return : size read from `src` , or an error Code .
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
*/
private static nuint HUF_readStats(
byte* huffWeight,
nuint hwSize,
uint* rankStats,
uint* nbSymbolsPtr,
uint* tableLogPtr,
void* src,
nuint srcSize
)
{
uint* wksp = stackalloc uint[219];
return HUF_readStats_wksp(
huffWeight,
hwSize,
rankStats,
nbSymbolsPtr,
tableLogPtr,
src,
srcSize,
wksp,
sizeof(uint) * 219,
0
);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint HUF_readStats_body(
byte* huffWeight,
nuint hwSize,
uint* rankStats,
uint* nbSymbolsPtr,
uint* tableLogPtr,
void* src,
nuint srcSize,
void* workSpace,
nuint wkspSize,
int bmi2
)
{
uint weightTotal;
byte* ip = (byte*)src;
nuint iSize;
nuint oSize;
if (srcSize == 0)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
iSize = ip[0];
if (iSize >= 128)
{
oSize = iSize - 127;
iSize = (oSize + 1) / 2;
if (iSize + 1 > srcSize)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
if (oSize >= hwSize)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
ip += 1;
{
uint n;
for (n = 0; n < oSize; n += 2)
{
huffWeight[n] = (byte)(ip[n / 2] >> 4);
huffWeight[n + 1] = (byte)(ip[n / 2] & 15);
}
}
}
else
{
if (iSize + 1 > srcSize)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
oSize = FSE_decompress_wksp_bmi2(
huffWeight,
hwSize - 1,
ip + 1,
iSize,
6,
workSpace,
wkspSize,
bmi2
);
if (FSE_isError(oSize))
return oSize;
}
memset(rankStats, 0, (12 + 1) * sizeof(uint));
weightTotal = 0;
{
uint n;
for (n = 0; n < oSize; n++)
{
if (huffWeight[n] > 12)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
rankStats[huffWeight[n]]++;
weightTotal += (uint)(1 << huffWeight[n] >> 1);
}
}
if (weightTotal == 0)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
{
uint tableLog = ZSTD_highbit32(weightTotal) + 1;
if (tableLog > 12)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
*tableLogPtr = tableLog;
{
uint total = (uint)(1 << (int)tableLog);
uint rest = total - weightTotal;
uint verif = (uint)(1 << (int)ZSTD_highbit32(rest));
uint lastWeight = ZSTD_highbit32(rest) + 1;
if (verif != rest)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
huffWeight[oSize] = (byte)lastWeight;
rankStats[lastWeight]++;
}
}
if (rankStats[1] < 2 || (rankStats[1] & 1) != 0)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
*nbSymbolsPtr = (uint)(oSize + 1);
return iSize + 1;
}
/* Avoids the FORCE_INLINE of the _body() function. */
private static nuint HUF_readStats_body_default(
byte* huffWeight,
nuint hwSize,
uint* rankStats,
uint* nbSymbolsPtr,
uint* tableLogPtr,
void* src,
nuint srcSize,
void* workSpace,
nuint wkspSize
)
{
return HUF_readStats_body(
huffWeight,
hwSize,
rankStats,
nbSymbolsPtr,
tableLogPtr,
src,
srcSize,
workSpace,
wkspSize,
0
);
}
private static nuint HUF_readStats_wksp(
byte* huffWeight,
nuint hwSize,
uint* rankStats,
uint* nbSymbolsPtr,
uint* tableLogPtr,
void* src,
nuint srcSize,
void* workSpace,
nuint wkspSize,
int flags
)
{
return HUF_readStats_body_default(
huffWeight,
hwSize,
rankStats,
nbSymbolsPtr,
tableLogPtr,
src,
srcSize,
workSpace,
wkspSize
);
}
}

View File

@@ -0,0 +1,110 @@
using System.Runtime.CompilerServices;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static bool ERR_isError(nuint code)
{
return code > unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode));
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static ZSTD_ErrorCode ERR_getErrorCode(nuint code)
{
if (!ERR_isError(code))
return 0;
return (ZSTD_ErrorCode)(0 - code);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static string ERR_getErrorName(nuint code)
{
return ERR_getErrorString(ERR_getErrorCode(code));
}
/*-****************************************
* Error Strings
******************************************/
private static string ERR_getErrorString(ZSTD_ErrorCode code)
{
const string notErrorCode = "Unspecified error code";
switch (code)
{
case ZSTD_ErrorCode.ZSTD_error_no_error:
return "No error detected";
case ZSTD_ErrorCode.ZSTD_error_GENERIC:
return "Error (generic)";
case ZSTD_ErrorCode.ZSTD_error_prefix_unknown:
return "Unknown frame descriptor";
case ZSTD_ErrorCode.ZSTD_error_version_unsupported:
return "Version not supported";
case ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported:
return "Unsupported frame parameter";
case ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge:
return "Frame requires too much memory for decoding";
case ZSTD_ErrorCode.ZSTD_error_corruption_detected:
return "Data corruption detected";
case ZSTD_ErrorCode.ZSTD_error_checksum_wrong:
return "Restored data doesn't match checksum";
case ZSTD_ErrorCode.ZSTD_error_literals_headerWrong:
return "Header of Literals' block doesn't respect format specification";
case ZSTD_ErrorCode.ZSTD_error_parameter_unsupported:
return "Unsupported parameter";
case ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported:
return "Unsupported combination of parameters";
case ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound:
return "Parameter is out of bound";
case ZSTD_ErrorCode.ZSTD_error_init_missing:
return "Context should be init first";
case ZSTD_ErrorCode.ZSTD_error_memory_allocation:
return "Allocation error : not enough memory";
case ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall:
return "workSpace buffer is not large enough";
case ZSTD_ErrorCode.ZSTD_error_stage_wrong:
return "Operation not authorized at current processing stage";
case ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge:
return "tableLog requires too much memory : unsupported";
case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge:
return "Unsupported max Symbol Value : too large";
case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall:
return "Specified maxSymbolValue is too small";
case ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock:
return "This mode cannot generate an uncompressed block";
case ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected:
return "pledged buffer stability condition is not respected";
case ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted:
return "Dictionary is corrupted";
case ZSTD_ErrorCode.ZSTD_error_dictionary_wrong:
return "Dictionary mismatch";
case ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed:
return "Cannot create Dictionary from provided samples";
case ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall:
return "Destination buffer is too small";
case ZSTD_ErrorCode.ZSTD_error_srcSize_wrong:
return "Src size is incorrect";
case ZSTD_ErrorCode.ZSTD_error_dstBuffer_null:
return "Operation on NULL destination buffer";
case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull:
return "Operation made no progress over multiple calls, due to output buffer being full";
case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty:
return "Operation made no progress over multiple calls, due to input being empty";
case ZSTD_ErrorCode.ZSTD_error_frameIndex_tooLarge:
return "Frame index is too large";
case ZSTD_ErrorCode.ZSTD_error_seekableIO:
return "An I/O error occurred when reading/seeking";
case ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong:
return "Destination buffer is wrong";
case ZSTD_ErrorCode.ZSTD_error_srcBuffer_wrong:
return "Source buffer is wrong";
case ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed:
return "Block-level external sequence producer returned an error code";
case ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid:
return "External sequences are not valid";
case ZSTD_ErrorCode.ZSTD_error_maxCode:
default:
return notErrorCode;
}
}
}

View File

@@ -0,0 +1,7 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public struct EstimatedBlockSize
{
public nuint estLitSize;
public nuint estBlockSize;
}

View File

@@ -0,0 +1,19 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/*-*************************************
* Acceleration
***************************************/
public struct FASTCOVER_accel_t
{
/* Percentage of training samples used for ZDICT_finalizeDictionary */
public uint finalize;
/* Number of dmer skipped between each dmer counted in computeFrequency */
public uint skip;
public FASTCOVER_accel_t(uint finalize, uint skip)
{
this.finalize = finalize;
this.skip = skip;
}
}

View File

@@ -0,0 +1,19 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/*-*************************************
* Context
***************************************/
public unsafe struct FASTCOVER_ctx_t
{
public byte* samples;
public nuint* offsets;
public nuint* samplesSizes;
public nuint nbSamples;
public nuint nbTrainSamples;
public nuint nbTestSamples;
public nuint nbDmers;
public uint* freqs;
public uint d;
public uint f;
public FASTCOVER_accel_t accelParams;
}

View File

@@ -0,0 +1,12 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/**
* Parameters for FASTCOVER_tryParameters().
*/
public unsafe struct FASTCOVER_tryParameters_data_s
{
public FASTCOVER_ctx_t* ctx;
public COVER_best_s* best;
public nuint dictBufferCapacity;
public ZDICT_cover_params_t parameters;
}

View File

@@ -0,0 +1,7 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public struct FPStats
{
public Fingerprint pastEvents;
public Fingerprint newEvents;
}

View File

@@ -0,0 +1,16 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/* *****************************************
* FSE symbol compression API
*******************************************/
/*!
This API consists of small unitary functions, which highly benefit from being inlined.
Hence their body are included in next section.
*/
public unsafe struct FSE_CState_t
{
public nint value;
public void* stateTable;
public void* symbolTT;
public uint stateLog;
}

View File

@@ -0,0 +1,12 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/* *****************************************
* FSE symbol decompression API
*******************************************/
public unsafe struct FSE_DState_t
{
public nuint state;
/* precise table may vary, depending on U16 */
public void* table;
}

View File

@@ -0,0 +1,8 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/* ====== Decompression ====== */
public struct FSE_DTableHeader
{
public ushort tableLog;
public ushort fastMode;
}

View File

@@ -0,0 +1,6 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct FSE_DecompressWksp
{
public fixed short ncount[256];
}

View File

@@ -0,0 +1,8 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public struct FSE_decode_t
{
public ushort newState;
public byte symbol;
public byte nbBits;
}

View File

@@ -0,0 +1,13 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public enum FSE_repeat
{
/**< Cannot use the previous table */
FSE_repeat_none,
/**< Can use the previous table but it must be checked */
FSE_repeat_check,
/**< Can use the previous table and it is assumed to be valid */
FSE_repeat_valid,
}

View File

@@ -0,0 +1,10 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/* *****************************************
* Implementation of inlined functions
*******************************************/
public struct FSE_symbolCompressionTransform
{
public int deltaFindState;
public uint deltaNbBits;
}

View File

@@ -0,0 +1,761 @@
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
/*-*************************************
* Hash Functions
***************************************/
/**
* Hash the d-byte value pointed to by p and mod 2^f into the frequency vector
*/
private static nuint FASTCOVER_hashPtrToIndex(void* p, uint f, uint d)
{
if (d == 6)
{
return ZSTD_hash6Ptr(p, f);
}
return ZSTD_hash8Ptr(p, f);
}
private static readonly FASTCOVER_accel_t* FASTCOVER_defaultAccelParameters = GetArrayPointer(
new FASTCOVER_accel_t[11]
{
new FASTCOVER_accel_t(finalize: 100, skip: 0),
new FASTCOVER_accel_t(finalize: 100, skip: 0),
new FASTCOVER_accel_t(finalize: 50, skip: 1),
new FASTCOVER_accel_t(finalize: 34, skip: 2),
new FASTCOVER_accel_t(finalize: 25, skip: 3),
new FASTCOVER_accel_t(finalize: 20, skip: 4),
new FASTCOVER_accel_t(finalize: 17, skip: 5),
new FASTCOVER_accel_t(finalize: 14, skip: 6),
new FASTCOVER_accel_t(finalize: 13, skip: 7),
new FASTCOVER_accel_t(finalize: 11, skip: 8),
new FASTCOVER_accel_t(finalize: 10, skip: 9),
}
);
/*-*************************************
* Helper functions
***************************************/
/**
* Selects the best segment in an epoch.
* Segments of are scored according to the function:
*
* Let F(d) be the frequency of all dmers with hash value d.
* Let S_i be hash value of the dmer at position i of segment S which has length k.
*
* Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
*
* Once the dmer with hash value d is in the dictionary we set F(d) = 0.
*/
private static COVER_segment_t FASTCOVER_selectSegment(
FASTCOVER_ctx_t* ctx,
uint* freqs,
uint begin,
uint end,
ZDICT_cover_params_t parameters,
ushort* segmentFreqs
)
{
/* Constants */
uint k = parameters.k;
uint d = parameters.d;
uint f = ctx->f;
uint dmersInK = k - d + 1;
/* Try each segment (activeSegment) and save the best (bestSegment) */
COVER_segment_t bestSegment = new COVER_segment_t
{
begin = 0,
end = 0,
score = 0,
};
COVER_segment_t activeSegment;
activeSegment.begin = begin;
activeSegment.end = begin;
activeSegment.score = 0;
while (activeSegment.end < end)
{
/* Get hash value of current dmer */
nuint idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d);
if (segmentFreqs[idx] == 0)
{
activeSegment.score += freqs[idx];
}
activeSegment.end += 1;
segmentFreqs[idx] += 1;
if (activeSegment.end - activeSegment.begin == dmersInK + 1)
{
/* Get hash value of the dmer to be eliminated from active segment */
nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
segmentFreqs[delIndex] -= 1;
if (segmentFreqs[delIndex] == 0)
{
activeSegment.score -= freqs[delIndex];
}
activeSegment.begin += 1;
}
if (activeSegment.score > bestSegment.score)
{
bestSegment = activeSegment;
}
}
while (activeSegment.begin < end)
{
nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
segmentFreqs[delIndex] -= 1;
activeSegment.begin += 1;
}
{
/* Zero the frequency of hash value of each dmer covered by the chosen segment. */
uint pos;
for (pos = bestSegment.begin; pos != bestSegment.end; ++pos)
{
nuint i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d);
freqs[i] = 0;
}
}
return bestSegment;
}
private static int FASTCOVER_checkParameters(
ZDICT_cover_params_t parameters,
nuint maxDictSize,
uint f,
uint accel
)
{
if (parameters.d == 0 || parameters.k == 0)
{
return 0;
}
if (parameters.d != 6 && parameters.d != 8)
{
return 0;
}
if (parameters.k > maxDictSize)
{
return 0;
}
if (parameters.d > parameters.k)
{
return 0;
}
if (f > 31 || f == 0)
{
return 0;
}
if (parameters.splitPoint <= 0 || parameters.splitPoint > 1)
{
return 0;
}
if (accel > 10 || accel == 0)
{
return 0;
}
return 1;
}
/**
* Clean up a context initialized with `FASTCOVER_ctx_init()`.
*/
private static void FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx)
{
if (ctx == null)
return;
free(ctx->freqs);
ctx->freqs = null;
free(ctx->offsets);
ctx->offsets = null;
}
/**
* Calculate for frequency of hash value of each dmer in ctx->samples
*/
private static void FASTCOVER_computeFrequency(uint* freqs, FASTCOVER_ctx_t* ctx)
{
uint f = ctx->f;
uint d = ctx->d;
uint skip = ctx->accelParams.skip;
uint readLength = d > 8 ? d : 8;
nuint i;
assert(ctx->nbTrainSamples >= 5);
assert(ctx->nbTrainSamples <= ctx->nbSamples);
for (i = 0; i < ctx->nbTrainSamples; i++)
{
/* start of current dmer */
nuint start = ctx->offsets[i];
nuint currSampleEnd = ctx->offsets[i + 1];
while (start + readLength <= currSampleEnd)
{
nuint dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d);
freqs[dmerIndex]++;
start = start + skip + 1;
}
}
}
/**
* Prepare a context for dictionary building.
* The context is only dependent on the parameter `d` and can be used multiple
* times.
* Returns 0 on success or error code on error.
* The context must be destroyed with `FASTCOVER_ctx_destroy()`.
*/
private static nuint FASTCOVER_ctx_init(
FASTCOVER_ctx_t* ctx,
void* samplesBuffer,
nuint* samplesSizes,
uint nbSamples,
uint d,
double splitPoint,
uint f,
FASTCOVER_accel_t accelParams
)
{
byte* samples = (byte*)samplesBuffer;
nuint totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
/* Split samples into testing and training sets */
uint nbTrainSamples = splitPoint < 1 ? (uint)(nbSamples * splitPoint) : nbSamples;
uint nbTestSamples = splitPoint < 1 ? nbSamples - nbTrainSamples : nbSamples;
nuint trainingSamplesSize =
splitPoint < 1 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
nuint testSamplesSize =
splitPoint < 1
? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples)
: totalSamplesSize;
if (
totalSamplesSize < (d > sizeof(ulong) ? d : sizeof(ulong))
|| totalSamplesSize >= (sizeof(nuint) == 8 ? unchecked((uint)-1) : 1 * (1U << 30))
)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
}
if (nbTrainSamples < 5)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
}
if (nbTestSamples < 1)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
}
*ctx = new FASTCOVER_ctx_t
{
samples = samples,
samplesSizes = samplesSizes,
nbSamples = nbSamples,
nbTrainSamples = nbTrainSamples,
nbTestSamples = nbTestSamples,
nbDmers = trainingSamplesSize - (d > sizeof(ulong) ? d : sizeof(ulong)) + 1,
d = d,
f = f,
accelParams = accelParams,
offsets = (nuint*)calloc(nbSamples + 1, (ulong)sizeof(nuint)),
};
if (ctx->offsets == null)
{
FASTCOVER_ctx_destroy(ctx);
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
}
{
uint i;
ctx->offsets[0] = 0;
assert(nbSamples >= 5);
for (i = 1; i <= nbSamples; ++i)
{
ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
}
}
ctx->freqs = (uint*)calloc((ulong)1 << (int)f, sizeof(uint));
if (ctx->freqs == null)
{
FASTCOVER_ctx_destroy(ctx);
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
}
FASTCOVER_computeFrequency(ctx->freqs, ctx);
return 0;
}
/**
* Given the prepared context build the dictionary.
*/
private static nuint FASTCOVER_buildDictionary(
FASTCOVER_ctx_t* ctx,
uint* freqs,
void* dictBuffer,
nuint dictBufferCapacity,
ZDICT_cover_params_t parameters,
ushort* segmentFreqs
)
{
byte* dict = (byte*)dictBuffer;
nuint tail = dictBufferCapacity;
/* Divide the data into epochs. We will select one segment from each epoch. */
COVER_epoch_info_t epochs = COVER_computeEpochs(
(uint)dictBufferCapacity,
(uint)ctx->nbDmers,
parameters.k,
1
);
const nuint maxZeroScoreRun = 10;
nuint zeroScoreRun = 0;
nuint epoch;
for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num)
{
uint epochBegin = (uint)(epoch * epochs.size);
uint epochEnd = epochBegin + epochs.size;
nuint segmentSize;
/* Select a segment */
COVER_segment_t segment = FASTCOVER_selectSegment(
ctx,
freqs,
epochBegin,
epochEnd,
parameters,
segmentFreqs
);
if (segment.score == 0)
{
if (++zeroScoreRun >= maxZeroScoreRun)
{
break;
}
continue;
}
zeroScoreRun = 0;
segmentSize =
segment.end - segment.begin + parameters.d - 1 < tail
? segment.end - segment.begin + parameters.d - 1
: tail;
if (segmentSize < parameters.d)
{
break;
}
tail -= segmentSize;
memcpy(dict + tail, ctx->samples + segment.begin, (uint)segmentSize);
}
return tail;
}
/**
* Tries a set of parameters and updates the COVER_best_t with the results.
* This function is thread safe if zstd is compiled with multithreaded support.
* It takes its parameters as an *OWNING* opaque pointer to support threading.
*/
private static void FASTCOVER_tryParameters(void* opaque)
{
/* Save parameters as local variables */
FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)opaque;
FASTCOVER_ctx_t* ctx = data->ctx;
ZDICT_cover_params_t parameters = data->parameters;
nuint dictBufferCapacity = data->dictBufferCapacity;
nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
/* Initialize array to keep track of frequency of dmer within activeSegment */
ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)ctx->f, sizeof(ushort));
/* Allocate space for hash table, dict, and freqs */
byte* dict = (byte*)malloc(dictBufferCapacity);
COVER_dictSelection selection = COVER_dictSelectionError(
unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC))
);
uint* freqs = (uint*)malloc(((ulong)1 << (int)ctx->f) * sizeof(uint));
if (segmentFreqs == null || dict == null || freqs == null)
{
goto _cleanup;
}
memcpy(freqs, ctx->freqs, (uint)(((ulong)1 << (int)ctx->f) * sizeof(uint)));
{
nuint tail = FASTCOVER_buildDictionary(
ctx,
freqs,
dict,
dictBufferCapacity,
parameters,
segmentFreqs
);
uint nbFinalizeSamples = (uint)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);
selection = COVER_selectDict(
dict + tail,
dictBufferCapacity,
dictBufferCapacity - tail,
ctx->samples,
ctx->samplesSizes,
nbFinalizeSamples,
ctx->nbTrainSamples,
ctx->nbSamples,
parameters,
ctx->offsets,
totalCompressedSize
);
if (COVER_dictSelectionIsError(selection) != 0)
{
goto _cleanup;
}
}
_cleanup:
free(dict);
COVER_best_finish(data->best, parameters, selection);
free(data);
free(segmentFreqs);
COVER_dictSelectionFree(selection);
free(freqs);
}
private static void FASTCOVER_convertToCoverParams(
ZDICT_fastCover_params_t fastCoverParams,
ZDICT_cover_params_t* coverParams
)
{
coverParams->k = fastCoverParams.k;
coverParams->d = fastCoverParams.d;
coverParams->steps = fastCoverParams.steps;
coverParams->nbThreads = fastCoverParams.nbThreads;
coverParams->splitPoint = fastCoverParams.splitPoint;
coverParams->zParams = fastCoverParams.zParams;
coverParams->shrinkDict = fastCoverParams.shrinkDict;
}
private static void FASTCOVER_convertToFastCoverParams(
ZDICT_cover_params_t coverParams,
ZDICT_fastCover_params_t* fastCoverParams,
uint f,
uint accel
)
{
fastCoverParams->k = coverParams.k;
fastCoverParams->d = coverParams.d;
fastCoverParams->steps = coverParams.steps;
fastCoverParams->nbThreads = coverParams.nbThreads;
fastCoverParams->splitPoint = coverParams.splitPoint;
fastCoverParams->f = f;
fastCoverParams->accel = accel;
fastCoverParams->zParams = coverParams.zParams;
fastCoverParams->shrinkDict = coverParams.shrinkDict;
}
/*! ZDICT_trainFromBuffer_fastCover():
* Train a dictionary from an array of samples using a modified version of COVER algorithm.
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
* d and k are required.
* All other parameters are optional, will use default values if not provided
* The resulting dictionary will be saved into `dictBuffer`.
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
* or an error code, which can be tested with ZDICT_isError().
* See ZDICT_trainFromBuffer() for details on failure modes.
* Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
* It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
*/
public static nuint ZDICT_trainFromBuffer_fastCover(
void* dictBuffer,
nuint dictBufferCapacity,
void* samplesBuffer,
nuint* samplesSizes,
uint nbSamples,
ZDICT_fastCover_params_t parameters
)
{
byte* dict = (byte*)dictBuffer;
FASTCOVER_ctx_t ctx;
ZDICT_cover_params_t coverParams;
FASTCOVER_accel_t accelParams;
g_displayLevel = (int)parameters.zParams.notificationLevel;
parameters.splitPoint = 1;
parameters.f = parameters.f == 0 ? 20 : parameters.f;
parameters.accel = parameters.accel == 0 ? 1 : parameters.accel;
coverParams = new ZDICT_cover_params_t();
FASTCOVER_convertToCoverParams(parameters, &coverParams);
if (
FASTCOVER_checkParameters(
coverParams,
dictBufferCapacity,
parameters.f,
parameters.accel
) == 0
)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
}
if (nbSamples == 0)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
}
if (dictBufferCapacity < 256)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
}
accelParams = FASTCOVER_defaultAccelParameters[parameters.accel];
{
nuint initVal = FASTCOVER_ctx_init(
&ctx,
samplesBuffer,
samplesSizes,
nbSamples,
coverParams.d,
parameters.splitPoint,
parameters.f,
accelParams
);
if (ERR_isError(initVal))
{
return initVal;
}
}
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel);
{
/* Initialize array to keep track of frequency of dmer within activeSegment */
ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)parameters.f, sizeof(ushort));
nuint tail = FASTCOVER_buildDictionary(
&ctx,
ctx.freqs,
dictBuffer,
dictBufferCapacity,
coverParams,
segmentFreqs
);
uint nbFinalizeSamples = (uint)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100);
nuint dictionarySize = ZDICT_finalizeDictionary(
dict,
dictBufferCapacity,
dict + tail,
dictBufferCapacity - tail,
samplesBuffer,
samplesSizes,
nbFinalizeSamples,
coverParams.zParams
);
if (!ERR_isError(dictionarySize)) { }
FASTCOVER_ctx_destroy(&ctx);
free(segmentFreqs);
return dictionarySize;
}
}
/*! ZDICT_optimizeTrainFromBuffer_fastCover():
* The same requirements as above hold for all the parameters except `parameters`.
* This function tries many parameter combinations (specifically, k and d combinations)
* and picks the best parameters. `*parameters` is filled with the best parameters found,
* dictionary constructed with those parameters is stored in `dictBuffer`.
* All of the parameters d, k, steps, f, and accel are optional.
* If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
* if steps is zero it defaults to its default value.
* If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
* If f is zero, default value of 20 is used.
* If accel is zero, default value of 1 is used.
*
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
* or an error code, which can be tested with ZDICT_isError().
* On success `*parameters` contains the parameters selected.
* See ZDICT_trainFromBuffer() for details on failure modes.
* Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.
*/
public static nuint ZDICT_optimizeTrainFromBuffer_fastCover(
void* dictBuffer,
nuint dictBufferCapacity,
void* samplesBuffer,
nuint* samplesSizes,
uint nbSamples,
ZDICT_fastCover_params_t* parameters
)
{
ZDICT_cover_params_t coverParams;
FASTCOVER_accel_t accelParams;
/* constants */
uint nbThreads = parameters->nbThreads;
double splitPoint = parameters->splitPoint <= 0 ? 0.75 : parameters->splitPoint;
uint kMinD = parameters->d == 0 ? 6 : parameters->d;
uint kMaxD = parameters->d == 0 ? 8 : parameters->d;
uint kMinK = parameters->k == 0 ? 50 : parameters->k;
uint kMaxK = parameters->k == 0 ? 2000 : parameters->k;
uint kSteps = parameters->steps == 0 ? 40 : parameters->steps;
uint kStepSize = (kMaxK - kMinK) / kSteps > 1 ? (kMaxK - kMinK) / kSteps : 1;
uint kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
uint f = parameters->f == 0 ? 20 : parameters->f;
uint accel = parameters->accel == 0 ? 1 : parameters->accel;
const uint shrinkDict = 0;
/* Local variables */
int displayLevel = (int)parameters->zParams.notificationLevel;
uint iteration = 1;
uint d;
uint k;
COVER_best_s best;
void* pool = null;
int warned = 0;
if (splitPoint <= 0 || splitPoint > 1)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
}
if (accel == 0 || accel > 10)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
}
if (kMinK < kMaxD || kMaxK < kMinK)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound));
}
if (nbSamples == 0)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong));
}
if (dictBufferCapacity < 256)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
}
if (nbThreads > 1)
{
pool = POOL_create(nbThreads, 1);
if (pool == null)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
}
}
COVER_best_init(&best);
coverParams = new ZDICT_cover_params_t();
FASTCOVER_convertToCoverParams(*parameters, &coverParams);
accelParams = FASTCOVER_defaultAccelParameters[accel];
g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
for (d = kMinD; d <= kMaxD; d += 2)
{
/* Initialize the context for this value of d */
FASTCOVER_ctx_t ctx;
{
nuint initVal = FASTCOVER_ctx_init(
&ctx,
samplesBuffer,
samplesSizes,
nbSamples,
d,
splitPoint,
f,
accelParams
);
if (ERR_isError(initVal))
{
COVER_best_destroy(&best);
POOL_free(pool);
return initVal;
}
}
if (warned == 0)
{
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel);
warned = 1;
}
for (k = kMinK; k <= kMaxK; k += kStepSize)
{
/* Prepare the arguments */
FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)malloc(
(ulong)sizeof(FASTCOVER_tryParameters_data_s)
);
if (data == null)
{
COVER_best_destroy(&best);
FASTCOVER_ctx_destroy(&ctx);
POOL_free(pool);
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation));
}
data->ctx = &ctx;
data->best = &best;
data->dictBufferCapacity = dictBufferCapacity;
data->parameters = coverParams;
data->parameters.k = k;
data->parameters.d = d;
data->parameters.splitPoint = splitPoint;
data->parameters.steps = kSteps;
data->parameters.shrinkDict = shrinkDict;
data->parameters.zParams.notificationLevel = (uint)g_displayLevel;
if (
FASTCOVER_checkParameters(
data->parameters,
dictBufferCapacity,
data->ctx->f,
accel
) == 0
)
{
free(data);
continue;
}
COVER_best_start(&best);
if (pool != null)
{
POOL_add(
pool,
(delegate* managed<void*, void>)(&FASTCOVER_tryParameters),
data
);
}
else
{
FASTCOVER_tryParameters(data);
}
++iteration;
}
COVER_best_wait(&best);
FASTCOVER_ctx_destroy(&ctx);
}
{
nuint dictSize = best.dictSize;
if (ERR_isError(best.compressedSize))
{
nuint compressedSize = best.compressedSize;
COVER_best_destroy(&best);
POOL_free(pool);
return compressedSize;
}
FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel);
memcpy(dictBuffer, best.dict, (uint)dictSize);
COVER_best_destroy(&best);
POOL_free(pool);
return dictSize;
}
}
}

View File

@@ -0,0 +1,7 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct Fingerprint
{
public fixed uint events[1024];
public nuint nbEvents;
}

View File

@@ -0,0 +1,198 @@
using System.Runtime.CompilerServices;
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void FSE_initCState(FSE_CState_t* statePtr, uint* ct)
{
void* ptr = ct;
ushort* u16ptr = (ushort*)ptr;
uint tableLog = MEM_read16(ptr);
statePtr->value = (nint)1 << (int)tableLog;
statePtr->stateTable = u16ptr + 2;
statePtr->symbolTT = ct + 1 + (tableLog != 0 ? 1 << (int)(tableLog - 1) : 1);
statePtr->stateLog = tableLog;
}
/*! FSE_initCState2() :
* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
* uses the smallest state value possible, saving the cost of this symbol */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void FSE_initCState2(ref FSE_CState_t statePtr, uint* ct, uint symbol)
{
FSE_initCState(ref statePtr, ct);
{
FSE_symbolCompressionTransform symbolTT = (
(FSE_symbolCompressionTransform*)statePtr.symbolTT
)[symbol];
ushort* stateTable = (ushort*)statePtr.stateTable;
uint nbBitsOut = symbolTT.deltaNbBits + (1 << 15) >> 16;
statePtr.value = (nint)((nbBitsOut << 16) - symbolTT.deltaNbBits);
statePtr.value = stateTable[
(statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState
];
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void FSE_encodeSymbol(
ref nuint bitC_bitContainer,
ref uint bitC_bitPos,
ref FSE_CState_t statePtr,
uint symbol
)
{
FSE_symbolCompressionTransform symbolTT = (
(FSE_symbolCompressionTransform*)statePtr.symbolTT
)[symbol];
ushort* stateTable = (ushort*)statePtr.stateTable;
uint nbBitsOut = (uint)statePtr.value + symbolTT.deltaNbBits >> 16;
BIT_addBits(ref bitC_bitContainer, ref bitC_bitPos, (nuint)statePtr.value, nbBitsOut);
statePtr.value = stateTable[(statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState];
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void FSE_flushCState(
ref nuint bitC_bitContainer,
ref uint bitC_bitPos,
ref sbyte* bitC_ptr,
sbyte* bitC_endPtr,
ref FSE_CState_t statePtr
)
{
BIT_addBits(
ref bitC_bitContainer,
ref bitC_bitPos,
(nuint)statePtr.value,
statePtr.stateLog
);
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
}
/* FSE_getMaxNbBits() :
* Approximate maximum cost of a symbol, in bits.
* Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint FSE_getMaxNbBits(void* symbolTTPtr, uint symbolValue)
{
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)symbolTTPtr;
return symbolTT[symbolValue].deltaNbBits + ((1 << 16) - 1) >> 16;
}
/* FSE_bitCost() :
* Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint FSE_bitCost(
void* symbolTTPtr,
uint tableLog,
uint symbolValue,
uint accuracyLog
)
{
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)symbolTTPtr;
uint minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
uint threshold = minNbBits + 1 << 16;
assert(tableLog < 16);
assert(accuracyLog < 31 - tableLog);
{
uint tableSize = (uint)(1 << (int)tableLog);
uint deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
/* linear interpolation (very approximate) */
uint normalizedDeltaFromThreshold =
deltaFromThreshold << (int)accuracyLog >> (int)tableLog;
uint bitMultiplier = (uint)(1 << (int)accuracyLog);
assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
assert(normalizedDeltaFromThreshold <= bitMultiplier);
return (minNbBits + 1) * bitMultiplier - normalizedDeltaFromThreshold;
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void FSE_initDState(ref FSE_DState_t DStatePtr, ref BIT_DStream_t bitD, uint* dt)
{
void* ptr = dt;
FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr;
DStatePtr.state = BIT_readBits(bitD.bitContainer, ref bitD.bitsConsumed, DTableH->tableLog);
BIT_reloadDStream(
ref bitD.bitContainer,
ref bitD.bitsConsumed,
ref bitD.ptr,
bitD.start,
bitD.limitPtr
);
DStatePtr.table = dt + 1;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static byte FSE_peekSymbol(FSE_DState_t* DStatePtr)
{
FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr->table)[DStatePtr->state];
return DInfo.symbol;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
{
FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr->table)[DStatePtr->state];
uint nbBits = DInfo.nbBits;
nuint lowBits = BIT_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static byte FSE_decodeSymbol(
ref FSE_DState_t DStatePtr,
nuint bitD_bitContainer,
ref uint bitD_bitsConsumed
)
{
FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state];
uint nbBits = DInfo.nbBits;
byte symbol = DInfo.symbol;
nuint lowBits = BIT_readBits(bitD_bitContainer, ref bitD_bitsConsumed, nbBits);
DStatePtr.state = DInfo.newState + lowBits;
return symbol;
}
/*! FSE_decodeSymbolFast() :
unsafe, only works if no symbol has a probability > 50% */
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static byte FSE_decodeSymbolFast(
ref FSE_DState_t DStatePtr,
nuint bitD_bitContainer,
ref uint bitD_bitsConsumed
)
{
FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state];
uint nbBits = DInfo.nbBits;
byte symbol = DInfo.symbol;
nuint lowBits = BIT_readBitsFast(bitD_bitContainer, ref bitD_bitsConsumed, nbBits);
DStatePtr.state = DInfo.newState + lowBits;
return symbol;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint FSE_endOfDState(FSE_DState_t* DStatePtr)
{
return DStatePtr->state == 0 ? 1U : 0U;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void FSE_initCState(ref FSE_CState_t statePtr, uint* ct)
{
void* ptr = ct;
ushort* u16ptr = (ushort*)ptr;
uint tableLog = MEM_read16(ptr);
statePtr.value = (nint)1 << (int)tableLog;
statePtr.stateTable = u16ptr + 2;
statePtr.symbolTT = ct + 1 + (tableLog != 0 ? 1 << (int)(tableLog - 1) : 1);
statePtr.stateLog = tableLog;
}
}

View File

@@ -0,0 +1,782 @@
using System;
using System.Runtime.InteropServices;
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
/* FSE_buildCTable_wksp() :
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
* wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
* workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
*/
private static nuint FSE_buildCTable_wksp(
uint* ct,
short* normalizedCounter,
uint maxSymbolValue,
uint tableLog,
void* workSpace,
nuint wkspSize
)
{
uint tableSize = (uint)(1 << (int)tableLog);
uint tableMask = tableSize - 1;
void* ptr = ct;
ushort* tableU16 = (ushort*)ptr + 2;
/* header */
void* FSCT = (uint*)ptr + 1 + (tableLog != 0 ? tableSize >> 1 : 1);
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)FSCT;
uint step = (tableSize >> 1) + (tableSize >> 3) + 3;
uint maxSV1 = maxSymbolValue + 1;
/* size = maxSV1 */
ushort* cumul = (ushort*)workSpace;
/* size = tableSize */
byte* tableSymbol = (byte*)(cumul + (maxSV1 + 1));
uint highThreshold = tableSize - 1;
assert(((nuint)workSpace & 1) == 0);
if (
sizeof(uint)
* ((maxSymbolValue + 2 + (1UL << (int)tableLog)) / 2 + sizeof(ulong) / sizeof(uint))
> wkspSize
)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
tableU16[-2] = (ushort)tableLog;
tableU16[-1] = (ushort)maxSymbolValue;
assert(tableLog < 16);
{
uint u;
cumul[0] = 0;
for (u = 1; u <= maxSV1; u++)
{
if (normalizedCounter[u - 1] == -1)
{
cumul[u] = (ushort)(cumul[u - 1] + 1);
tableSymbol[highThreshold--] = (byte)(u - 1);
}
else
{
assert(normalizedCounter[u - 1] >= 0);
cumul[u] = (ushort)(cumul[u - 1] + (ushort)normalizedCounter[u - 1]);
assert(cumul[u] >= cumul[u - 1]);
}
}
cumul[maxSV1] = (ushort)(tableSize + 1);
}
if (highThreshold == tableSize - 1)
{
/* size = tableSize + 8 (may write beyond tableSize) */
byte* spread = tableSymbol + tableSize;
{
const ulong add = 0x0101010101010101UL;
nuint pos = 0;
ulong sv = 0;
uint s;
for (s = 0; s < maxSV1; ++s, sv += add)
{
int i;
int n = normalizedCounter[s];
MEM_write64(spread + pos, sv);
for (i = 8; i < n; i += 8)
{
MEM_write64(spread + pos + i, sv);
}
assert(n >= 0);
pos += (nuint)n;
}
}
{
nuint position = 0;
nuint s;
/* Experimentally determined optimal unroll */
const nuint unroll = 2;
assert(tableSize % unroll == 0);
for (s = 0; s < tableSize; s += unroll)
{
nuint u;
for (u = 0; u < unroll; ++u)
{
nuint uPosition = position + u * step & tableMask;
tableSymbol[uPosition] = spread[s + u];
}
position = position + unroll * step & tableMask;
}
assert(position == 0);
}
}
else
{
uint position = 0;
uint symbol;
for (symbol = 0; symbol < maxSV1; symbol++)
{
int nbOccurrences;
int freq = normalizedCounter[symbol];
for (nbOccurrences = 0; nbOccurrences < freq; nbOccurrences++)
{
tableSymbol[position] = (byte)symbol;
position = position + step & tableMask;
while (position > highThreshold)
position = position + step & tableMask;
}
}
assert(position == 0);
}
{
uint u;
for (u = 0; u < tableSize; u++)
{
/* note : static analyzer may not understand tableSymbol is properly initialized */
byte s = tableSymbol[u];
tableU16[cumul[s]++] = (ushort)(tableSize + u);
}
}
{
uint total = 0;
uint s;
for (s = 0; s <= maxSymbolValue; s++)
{
switch (normalizedCounter[s])
{
case 0:
symbolTT[s].deltaNbBits = (tableLog + 1 << 16) - (uint)(1 << (int)tableLog);
break;
case -1:
case 1:
symbolTT[s].deltaNbBits = (tableLog << 16) - (uint)(1 << (int)tableLog);
assert(total <= 2147483647);
symbolTT[s].deltaFindState = (int)(total - 1);
total++;
break;
default:
assert(normalizedCounter[s] > 1);
{
uint maxBitsOut =
tableLog - ZSTD_highbit32((uint)normalizedCounter[s] - 1);
uint minStatePlus = (uint)normalizedCounter[s] << (int)maxBitsOut;
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
symbolTT[s].deltaFindState = (int)(total - (uint)normalizedCounter[s]);
total += (uint)normalizedCounter[s];
}
break;
}
}
}
return 0;
}
/*-**************************************************************
* FSE NCount encoding
****************************************************************/
private static nuint FSE_NCountWriteBound(uint maxSymbolValue, uint tableLog)
{
nuint maxHeaderSize = ((maxSymbolValue + 1) * tableLog + 4 + 2) / 8 + 1 + 2;
return maxSymbolValue != 0 ? maxHeaderSize : 512;
}
private static nuint FSE_writeNCount_generic(
void* header,
nuint headerBufferSize,
short* normalizedCounter,
uint maxSymbolValue,
uint tableLog,
uint writeIsSafe
)
{
byte* ostart = (byte*)header;
byte* @out = ostart;
byte* oend = ostart + headerBufferSize;
int nbBits;
int tableSize = 1 << (int)tableLog;
int remaining;
int threshold;
uint bitStream = 0;
int bitCount = 0;
uint symbol = 0;
uint alphabetSize = maxSymbolValue + 1;
int previousIs0 = 0;
bitStream += tableLog - 5 << bitCount;
bitCount += 4;
remaining = tableSize + 1;
threshold = tableSize;
nbBits = (int)tableLog + 1;
while (symbol < alphabetSize && remaining > 1)
{
if (previousIs0 != 0)
{
uint start = symbol;
while (symbol < alphabetSize && normalizedCounter[symbol] == 0)
symbol++;
if (symbol == alphabetSize)
break;
while (symbol >= start + 24)
{
start += 24;
bitStream += 0xFFFFU << bitCount;
if (writeIsSafe == 0 && @out > oend - 2)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
@out[0] = (byte)bitStream;
@out[1] = (byte)(bitStream >> 8);
@out += 2;
bitStream >>= 16;
}
while (symbol >= start + 3)
{
start += 3;
bitStream += 3U << bitCount;
bitCount += 2;
}
bitStream += symbol - start << bitCount;
bitCount += 2;
if (bitCount > 16)
{
if (writeIsSafe == 0 && @out > oend - 2)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
@out[0] = (byte)bitStream;
@out[1] = (byte)(bitStream >> 8);
@out += 2;
bitStream >>= 16;
bitCount -= 16;
}
}
{
int count = normalizedCounter[symbol++];
int max = 2 * threshold - 1 - remaining;
remaining -= count < 0 ? -count : count;
count++;
if (count >= threshold)
count += max;
bitStream += (uint)count << bitCount;
bitCount += nbBits;
bitCount -= count < max ? 1 : 0;
previousIs0 = count == 1 ? 1 : 0;
if (remaining < 1)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
while (remaining < threshold)
{
nbBits--;
threshold >>= 1;
}
}
if (bitCount > 16)
{
if (writeIsSafe == 0 && @out > oend - 2)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
@out[0] = (byte)bitStream;
@out[1] = (byte)(bitStream >> 8);
@out += 2;
bitStream >>= 16;
bitCount -= 16;
}
}
if (remaining != 1)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
assert(symbol <= alphabetSize);
if (writeIsSafe == 0 && @out > oend - 2)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
@out[0] = (byte)bitStream;
@out[1] = (byte)(bitStream >> 8);
@out += (bitCount + 7) / 8;
assert(@out >= ostart);
return (nuint)(@out - ostart);
}
/*! FSE_writeNCount():
Compactly save 'normalizedCounter' into 'buffer'.
@return : size of the compressed table,
or an errorCode, which can be tested using FSE_isError(). */
private static nuint FSE_writeNCount(
void* buffer,
nuint bufferSize,
short* normalizedCounter,
uint maxSymbolValue,
uint tableLog
)
{
if (tableLog > 14 - 2)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
if (tableLog < 5)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
return FSE_writeNCount_generic(
buffer,
bufferSize,
normalizedCounter,
maxSymbolValue,
tableLog,
0
);
return FSE_writeNCount_generic(
buffer,
bufferSize,
normalizedCounter,
maxSymbolValue,
tableLog,
1
);
}
/* provides the minimum logSize to safely represent a distribution */
private static uint FSE_minTableLog(nuint srcSize, uint maxSymbolValue)
{
uint minBitsSrc = ZSTD_highbit32((uint)srcSize) + 1;
uint minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2;
uint minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
assert(srcSize > 1);
return minBits;
}
/* *****************************************
* FSE advanced API
***************************************** */
private static uint FSE_optimalTableLog_internal(
uint maxTableLog,
nuint srcSize,
uint maxSymbolValue,
uint minus
)
{
uint maxBitsSrc = ZSTD_highbit32((uint)(srcSize - 1)) - minus;
uint tableLog = maxTableLog;
uint minBits = FSE_minTableLog(srcSize, maxSymbolValue);
assert(srcSize > 1);
if (tableLog == 0)
tableLog = 13 - 2;
if (maxBitsSrc < tableLog)
tableLog = maxBitsSrc;
if (minBits > tableLog)
tableLog = minBits;
if (tableLog < 5)
tableLog = 5;
if (tableLog > 14 - 2)
tableLog = 14 - 2;
return tableLog;
}
/*! FSE_optimalTableLog():
dynamically downsize 'tableLog' when conditions are met.
It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
@return : recommended tableLog (necessarily <= 'maxTableLog') */
private static uint FSE_optimalTableLog(uint maxTableLog, nuint srcSize, uint maxSymbolValue)
{
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
}
/* Secondary normalization method.
To be used when primary method fails. */
private static nuint FSE_normalizeM2(
short* norm,
uint tableLog,
uint* count,
nuint total,
uint maxSymbolValue,
short lowProbCount
)
{
const short NOT_YET_ASSIGNED = -2;
uint s;
uint distributed = 0;
uint ToDistribute;
/* Init */
uint lowThreshold = (uint)(total >> (int)tableLog);
uint lowOne = (uint)(total * 3 >> (int)(tableLog + 1));
for (s = 0; s <= maxSymbolValue; s++)
{
if (count[s] == 0)
{
norm[s] = 0;
continue;
}
if (count[s] <= lowThreshold)
{
norm[s] = lowProbCount;
distributed++;
total -= count[s];
continue;
}
if (count[s] <= lowOne)
{
norm[s] = 1;
distributed++;
total -= count[s];
continue;
}
norm[s] = NOT_YET_ASSIGNED;
}
ToDistribute = (uint)(1 << (int)tableLog) - distributed;
if (ToDistribute == 0)
return 0;
if (total / ToDistribute > lowOne)
{
lowOne = (uint)(total * 3 / (ToDistribute * 2));
for (s = 0; s <= maxSymbolValue; s++)
{
if (norm[s] == NOT_YET_ASSIGNED && count[s] <= lowOne)
{
norm[s] = 1;
distributed++;
total -= count[s];
continue;
}
}
ToDistribute = (uint)(1 << (int)tableLog) - distributed;
}
if (distributed == maxSymbolValue + 1)
{
/* all values are pretty poor;
probably incompressible data (should have already been detected);
find max, then give all remaining points to max */
uint maxV = 0,
maxC = 0;
for (s = 0; s <= maxSymbolValue; s++)
if (count[s] > maxC)
{
maxV = s;
maxC = count[s];
}
norm[maxV] += (short)ToDistribute;
return 0;
}
if (total == 0)
{
for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1))
if (norm[s] > 0)
{
ToDistribute--;
norm[s]++;
}
return 0;
}
{
ulong vStepLog = 62 - tableLog;
ulong mid = (1UL << (int)(vStepLog - 1)) - 1;
/* scale on remaining */
ulong rStep = (((ulong)1 << (int)vStepLog) * ToDistribute + mid) / (uint)total;
ulong tmpTotal = mid;
for (s = 0; s <= maxSymbolValue; s++)
{
if (norm[s] == NOT_YET_ASSIGNED)
{
ulong end = tmpTotal + count[s] * rStep;
uint sStart = (uint)(tmpTotal >> (int)vStepLog);
uint sEnd = (uint)(end >> (int)vStepLog);
uint weight = sEnd - sStart;
if (weight < 1)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
norm[s] = (short)weight;
tmpTotal = end;
}
}
}
return 0;
}
#if NET7_0_OR_GREATER
private static ReadOnlySpan<uint> Span_rtbTable =>
new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
private static uint* rtbTable =>
(uint*)
System.Runtime.CompilerServices.Unsafe.AsPointer(
ref MemoryMarshal.GetReference(Span_rtbTable)
);
#else
private static readonly uint* rtbTable = GetArrayPointer(
new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }
);
#endif
/*! FSE_normalizeCount():
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
useLowProbCount is a boolean parameter which trades off compressed size for
faster header decoding. When it is set to 1, the compressed data will be slightly
smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
is a good default, since header deserialization makes a big speed difference.
Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
@return : tableLog,
or an errorCode, which can be tested using FSE_isError() */
private static nuint FSE_normalizeCount(
short* normalizedCounter,
uint tableLog,
uint* count,
nuint total,
uint maxSymbolValue,
uint useLowProbCount
)
{
if (tableLog == 0)
tableLog = 13 - 2;
if (tableLog < 5)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
if (tableLog > 14 - 2)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
if (tableLog < FSE_minTableLog(total, maxSymbolValue))
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
{
short lowProbCount = (short)(useLowProbCount != 0 ? -1 : 1);
ulong scale = 62 - tableLog;
/* <== here, one division ! */
ulong step = ((ulong)1 << 62) / (uint)total;
ulong vStep = 1UL << (int)(scale - 20);
int stillToDistribute = 1 << (int)tableLog;
uint s;
uint largest = 0;
short largestP = 0;
uint lowThreshold = (uint)(total >> (int)tableLog);
for (s = 0; s <= maxSymbolValue; s++)
{
if (count[s] == total)
return 0;
if (count[s] == 0)
{
normalizedCounter[s] = 0;
continue;
}
if (count[s] <= lowThreshold)
{
normalizedCounter[s] = lowProbCount;
stillToDistribute--;
}
else
{
short proba = (short)(count[s] * step >> (int)scale);
if (proba < 8)
{
ulong restToBeat = vStep * rtbTable[proba];
proba += (short)(
count[s] * step - ((ulong)proba << (int)scale) > restToBeat ? 1 : 0
);
}
if (proba > largestP)
{
largestP = proba;
largest = s;
}
normalizedCounter[s] = proba;
stillToDistribute -= proba;
}
}
if (-stillToDistribute >= normalizedCounter[largest] >> 1)
{
/* corner case, need another normalization method */
nuint errorCode = FSE_normalizeM2(
normalizedCounter,
tableLog,
count,
total,
maxSymbolValue,
lowProbCount
);
if (ERR_isError(errorCode))
return errorCode;
}
else
normalizedCounter[largest] += (short)stillToDistribute;
}
return tableLog;
}
/* fake FSE_CTable, for rle input (always same symbol) */
private static nuint FSE_buildCTable_rle(uint* ct, byte symbolValue)
{
void* ptr = ct;
ushort* tableU16 = (ushort*)ptr + 2;
void* FSCTptr = (uint*)ptr + 2;
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)FSCTptr;
tableU16[-2] = 0;
tableU16[-1] = symbolValue;
tableU16[0] = 0;
tableU16[1] = 0;
symbolTT[symbolValue].deltaNbBits = 0;
symbolTT[symbolValue].deltaFindState = 0;
return 0;
}
private static nuint FSE_compress_usingCTable_generic(
void* dst,
nuint dstSize,
void* src,
nuint srcSize,
uint* ct,
uint fast
)
{
byte* istart = (byte*)src;
byte* iend = istart + srcSize;
byte* ip = iend;
BIT_CStream_t bitC;
System.Runtime.CompilerServices.Unsafe.SkipInit(out bitC);
FSE_CState_t CState1,
CState2;
System.Runtime.CompilerServices.Unsafe.SkipInit(out CState1);
System.Runtime.CompilerServices.Unsafe.SkipInit(out CState2);
if (srcSize <= 2)
return 0;
{
nuint initError = BIT_initCStream(ref bitC, dst, dstSize);
if (ERR_isError(initError))
return 0;
}
nuint bitC_bitContainer = bitC.bitContainer;
uint bitC_bitPos = bitC.bitPos;
sbyte* bitC_ptr = bitC.ptr;
sbyte* bitC_endPtr = bitC.endPtr;
if ((srcSize & 1) != 0)
{
FSE_initCState2(ref CState1, ct, *--ip);
FSE_initCState2(ref CState2, ct, *--ip);
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip);
if (fast != 0)
BIT_flushBitsFast(
ref bitC_bitContainer,
ref bitC_bitPos,
ref bitC_ptr,
bitC_endPtr
);
else
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
}
else
{
FSE_initCState2(ref CState2, ct, *--ip);
FSE_initCState2(ref CState1, ct, *--ip);
}
srcSize -= 2;
if (sizeof(nuint) * 8 > (14 - 2) * 4 + 7 && (srcSize & 2) != 0)
{
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip);
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip);
if (fast != 0)
BIT_flushBitsFast(
ref bitC_bitContainer,
ref bitC_bitPos,
ref bitC_ptr,
bitC_endPtr
);
else
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
}
while (ip > istart)
{
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip);
if (sizeof(nuint) * 8 < (14 - 2) * 2 + 7)
if (fast != 0)
BIT_flushBitsFast(
ref bitC_bitContainer,
ref bitC_bitPos,
ref bitC_ptr,
bitC_endPtr
);
else
BIT_flushBits(
ref bitC_bitContainer,
ref bitC_bitPos,
ref bitC_ptr,
bitC_endPtr
);
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip);
if (sizeof(nuint) * 8 > (14 - 2) * 4 + 7)
{
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip);
FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip);
}
if (fast != 0)
BIT_flushBitsFast(
ref bitC_bitContainer,
ref bitC_bitPos,
ref bitC_ptr,
bitC_endPtr
);
else
BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr);
}
FSE_flushCState(
ref bitC_bitContainer,
ref bitC_bitPos,
ref bitC_ptr,
bitC_endPtr,
ref CState2
);
FSE_flushCState(
ref bitC_bitContainer,
ref bitC_bitPos,
ref bitC_ptr,
bitC_endPtr,
ref CState1
);
return BIT_closeCStream(
ref bitC_bitContainer,
ref bitC_bitPos,
bitC_ptr,
bitC_endPtr,
bitC.startPtr
);
}
/*! FSE_compress_usingCTable():
Compress `src` using `ct` into `dst` which must be already allocated.
@return : size of compressed data (<= `dstCapacity`),
or 0 if compressed data could not fit into `dst`,
or an errorCode, which can be tested using FSE_isError() */
private static nuint FSE_compress_usingCTable(
void* dst,
nuint dstSize,
void* src,
nuint srcSize,
uint* ct
)
{
uint fast = dstSize >= srcSize + (srcSize >> 7) + 4 + (nuint)sizeof(nuint) ? 1U : 0U;
if (fast != 0)
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
else
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
}
/*-*****************************************
* Tool functions
******************************************/
private static nuint FSE_compressBound(nuint size)
{
return 512 + (size + (size >> 7) + 4 + (nuint)sizeof(nuint));
}
}

View File

@@ -0,0 +1,462 @@
using System.Runtime.CompilerServices;
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
private static nuint FSE_buildDTable_internal(
uint* dt,
short* normalizedCounter,
uint maxSymbolValue,
uint tableLog,
void* workSpace,
nuint wkspSize
)
{
/* because *dt is unsigned, 32-bits aligned on 32-bits */
void* tdPtr = dt + 1;
FSE_decode_t* tableDecode = (FSE_decode_t*)tdPtr;
ushort* symbolNext = (ushort*)workSpace;
byte* spread = (byte*)(symbolNext + maxSymbolValue + 1);
uint maxSV1 = maxSymbolValue + 1;
uint tableSize = (uint)(1 << (int)tableLog);
uint highThreshold = tableSize - 1;
if (sizeof(short) * (maxSymbolValue + 1) + (1UL << (int)tableLog) + 8 > wkspSize)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge));
if (maxSymbolValue > 255)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge));
if (tableLog > 14 - 2)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
{
FSE_DTableHeader DTableH;
DTableH.tableLog = (ushort)tableLog;
DTableH.fastMode = 1;
{
short largeLimit = (short)(1 << (int)(tableLog - 1));
uint s;
for (s = 0; s < maxSV1; s++)
{
if (normalizedCounter[s] == -1)
{
tableDecode[highThreshold--].symbol = (byte)s;
symbolNext[s] = 1;
}
else
{
if (normalizedCounter[s] >= largeLimit)
DTableH.fastMode = 0;
symbolNext[s] = (ushort)normalizedCounter[s];
}
}
}
memcpy(dt, &DTableH, (uint)sizeof(FSE_DTableHeader));
}
if (highThreshold == tableSize - 1)
{
nuint tableMask = tableSize - 1;
nuint step = (tableSize >> 1) + (tableSize >> 3) + 3;
{
const ulong add = 0x0101010101010101UL;
nuint pos = 0;
ulong sv = 0;
uint s;
for (s = 0; s < maxSV1; ++s, sv += add)
{
int i;
int n = normalizedCounter[s];
MEM_write64(spread + pos, sv);
for (i = 8; i < n; i += 8)
{
MEM_write64(spread + pos + i, sv);
}
pos += (nuint)n;
}
}
{
nuint position = 0;
nuint s;
const nuint unroll = 2;
assert(tableSize % unroll == 0);
for (s = 0; s < tableSize; s += unroll)
{
nuint u;
for (u = 0; u < unroll; ++u)
{
nuint uPosition = position + u * step & tableMask;
tableDecode[uPosition].symbol = spread[s + u];
}
position = position + unroll * step & tableMask;
}
assert(position == 0);
}
}
else
{
uint tableMask = tableSize - 1;
uint step = (tableSize >> 1) + (tableSize >> 3) + 3;
uint s,
position = 0;
for (s = 0; s < maxSV1; s++)
{
int i;
for (i = 0; i < normalizedCounter[s]; i++)
{
tableDecode[position].symbol = (byte)s;
position = position + step & tableMask;
while (position > highThreshold)
position = position + step & tableMask;
}
}
if (position != 0)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
}
{
uint u;
for (u = 0; u < tableSize; u++)
{
byte symbol = tableDecode[u].symbol;
uint nextState = symbolNext[symbol]++;
tableDecode[u].nbBits = (byte)(tableLog - ZSTD_highbit32(nextState));
tableDecode[u].newState = (ushort)(
(nextState << tableDecode[u].nbBits) - tableSize
);
}
}
return 0;
}
private static nuint FSE_buildDTable_wksp(
uint* dt,
short* normalizedCounter,
uint maxSymbolValue,
uint tableLog,
void* workSpace,
nuint wkspSize
)
{
return FSE_buildDTable_internal(
dt,
normalizedCounter,
maxSymbolValue,
tableLog,
workSpace,
wkspSize
);
}
/*-*******************************************************
* Decompression (Byte symbols)
*********************************************************/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint FSE_decompress_usingDTable_generic(
void* dst,
nuint maxDstSize,
void* cSrc,
nuint cSrcSize,
uint* dt,
uint fast
)
{
byte* ostart = (byte*)dst;
byte* op = ostart;
byte* omax = op + maxDstSize;
byte* olimit = omax - 3;
BIT_DStream_t bitD;
System.Runtime.CompilerServices.Unsafe.SkipInit(out bitD);
FSE_DState_t state1;
System.Runtime.CompilerServices.Unsafe.SkipInit(out state1);
FSE_DState_t state2;
System.Runtime.CompilerServices.Unsafe.SkipInit(out state2);
{
/* Init */
nuint _var_err__ = BIT_initDStream(ref bitD, cSrc, cSrcSize);
if (ERR_isError(_var_err__))
return _var_err__;
}
FSE_initDState(ref state1, ref bitD, dt);
FSE_initDState(ref state2, ref bitD, dt);
nuint bitD_bitContainer = bitD.bitContainer;
uint bitD_bitsConsumed = bitD.bitsConsumed;
sbyte* bitD_ptr = bitD.ptr;
sbyte* bitD_start = bitD.start;
sbyte* bitD_limitPtr = bitD.limitPtr;
if (
BIT_reloadDStream(
ref bitD_bitContainer,
ref bitD_bitsConsumed,
ref bitD_ptr,
bitD_start,
bitD_limitPtr
) == BIT_DStream_status.BIT_DStream_overflow
)
{
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected));
}
for (
;
BIT_reloadDStream(
ref bitD_bitContainer,
ref bitD_bitsConsumed,
ref bitD_ptr,
bitD_start,
bitD_limitPtr
) == BIT_DStream_status.BIT_DStream_unfinished
&& op < olimit;
op += 4
)
{
op[0] =
fast != 0
? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed)
: FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed);
if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8)
BIT_reloadDStream(
ref bitD_bitContainer,
ref bitD_bitsConsumed,
ref bitD_ptr,
bitD_start,
bitD_limitPtr
);
op[1] =
fast != 0
? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed)
: FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed);
if ((14 - 2) * 4 + 7 > sizeof(nuint) * 8)
{
if (
BIT_reloadDStream(
ref bitD_bitContainer,
ref bitD_bitsConsumed,
ref bitD_ptr,
bitD_start,
bitD_limitPtr
) > BIT_DStream_status.BIT_DStream_unfinished
)
{
op += 2;
break;
}
}
op[2] =
fast != 0
? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed)
: FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed);
if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8)
BIT_reloadDStream(
ref bitD_bitContainer,
ref bitD_bitsConsumed,
ref bitD_ptr,
bitD_start,
bitD_limitPtr
);
op[3] =
fast != 0
? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed)
: FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed);
}
while (true)
{
if (op > omax - 2)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
*op++ =
fast != 0
? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed)
: FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed);
if (
BIT_reloadDStream(
ref bitD_bitContainer,
ref bitD_bitsConsumed,
ref bitD_ptr,
bitD_start,
bitD_limitPtr
) == BIT_DStream_status.BIT_DStream_overflow
)
{
*op++ =
fast != 0
? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed)
: FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed);
break;
}
if (op > omax - 2)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall));
*op++ =
fast != 0
? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed)
: FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed);
if (
BIT_reloadDStream(
ref bitD_bitContainer,
ref bitD_bitsConsumed,
ref bitD_ptr,
bitD_start,
bitD_limitPtr
) == BIT_DStream_status.BIT_DStream_overflow
)
{
*op++ =
fast != 0
? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed)
: FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed);
break;
}
}
assert(op >= ostart);
return (nuint)(op - ostart);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint FSE_decompress_wksp_body(
void* dst,
nuint dstCapacity,
void* cSrc,
nuint cSrcSize,
uint maxLog,
void* workSpace,
nuint wkspSize,
int bmi2
)
{
byte* istart = (byte*)cSrc;
byte* ip = istart;
uint tableLog;
uint maxSymbolValue = 255;
FSE_DecompressWksp* wksp = (FSE_DecompressWksp*)workSpace;
nuint dtablePos = (nuint)(sizeof(FSE_DecompressWksp) / sizeof(uint));
uint* dtable = (uint*)workSpace + dtablePos;
if (wkspSize < (nuint)sizeof(FSE_DecompressWksp))
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
{
nuint NCountLength = FSE_readNCount_bmi2(
wksp->ncount,
&maxSymbolValue,
&tableLog,
istart,
cSrcSize,
bmi2
);
if (ERR_isError(NCountLength))
return NCountLength;
if (tableLog > maxLog)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
assert(NCountLength <= cSrcSize);
ip += NCountLength;
cSrcSize -= NCountLength;
}
if (
(
(ulong)(1 + (1 << (int)tableLog) + 1)
+ (
sizeof(short) * (maxSymbolValue + 1)
+ (1UL << (int)tableLog)
+ 8
+ sizeof(uint)
- 1
) / sizeof(uint)
+ (255 + 1) / 2
+ 1
) * sizeof(uint)
> wkspSize
)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge));
assert(
(nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint))
<= wkspSize
);
workSpace =
(byte*)workSpace
+ sizeof(FSE_DecompressWksp)
+ (1 + (1 << (int)tableLog)) * sizeof(uint);
wkspSize -= (nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint));
{
nuint _var_err__ = FSE_buildDTable_internal(
dtable,
wksp->ncount,
maxSymbolValue,
tableLog,
workSpace,
wkspSize
);
if (ERR_isError(_var_err__))
return _var_err__;
}
{
void* ptr = dtable;
FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr;
uint fastMode = DTableH->fastMode;
if (fastMode != 0)
return FSE_decompress_usingDTable_generic(
dst,
dstCapacity,
ip,
cSrcSize,
dtable,
1
);
return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0);
}
}
/* Avoids the FORCE_INLINE of the _body() function. */
private static nuint FSE_decompress_wksp_body_default(
void* dst,
nuint dstCapacity,
void* cSrc,
nuint cSrcSize,
uint maxLog,
void* workSpace,
nuint wkspSize
)
{
return FSE_decompress_wksp_body(
dst,
dstCapacity,
cSrc,
cSrcSize,
maxLog,
workSpace,
wkspSize,
0
);
}
private static nuint FSE_decompress_wksp_bmi2(
void* dst,
nuint dstCapacity,
void* cSrc,
nuint cSrcSize,
uint maxLog,
void* workSpace,
nuint wkspSize,
int bmi2
)
{
return FSE_decompress_wksp_body_default(
dst,
dstCapacity,
cSrc,
cSrcSize,
maxLog,
workSpace,
wkspSize
);
}
}

View File

@@ -0,0 +1,7 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public enum HIST_checkInput_e
{
trustInput,
checkMaxSymbolValue,
}

View File

@@ -0,0 +1,22 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct HUF_CStream_t
{
public _bitContainer_e__FixedBuffer bitContainer;
public _bitPos_e__FixedBuffer bitPos;
public byte* startPtr;
public byte* ptr;
public byte* endPtr;
public unsafe struct _bitContainer_e__FixedBuffer
{
public nuint e0;
public nuint e1;
}
public unsafe struct _bitPos_e__FixedBuffer
{
public nuint e0;
public nuint e1;
}
}

View File

@@ -0,0 +1,8 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct HUF_CTableHeader
{
public byte tableLog;
public byte maxSymbolValue;
public fixed byte unused[6];
}

View File

@@ -0,0 +1,9 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct HUF_CompressWeightsWksp
{
public fixed uint CTable[59];
public fixed uint scratchBuffer[41];
public fixed uint count[13];
public fixed short norm[13];
}

View File

@@ -0,0 +1,11 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/*-***************************/
/* single-symbol decoding */
/*-***************************/
public struct HUF_DEltX1
{
/* single-symbol decoding */
public byte nbBits;
public byte @byte;
}

View File

@@ -0,0 +1,12 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/* *************************/
/* double-symbols decoding */
/* *************************/
public struct HUF_DEltX2
{
/* double-symbols decoding */
public ushort sequence;
public byte nbBits;
public byte length;
}

View File

@@ -0,0 +1,49 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/**
* The input/output arguments to the Huffman fast decoding loop:
*
* ip [in/out] - The input pointers, must be updated to reflect what is consumed.
* op [in/out] - The output pointers, must be updated to reflect what is written.
* bits [in/out] - The bitstream containers, must be updated to reflect the current state.
* dt [in] - The decoding table.
* ilowest [in] - The beginning of the valid range of the input. Decoders may read
* down to this pointer. It may be below iend[0].
* oend [in] - The end of the output stream. op[3] must not cross oend.
* iend [in] - The end of each input stream. ip[i] may cross iend[i],
* as long as it is above ilowest, but that indicates corruption.
*/
public unsafe struct HUF_DecompressFastArgs
{
public _ip_e__FixedBuffer ip;
public _op_e__FixedBuffer op;
public fixed ulong bits[4];
public void* dt;
public byte* ilowest;
public byte* oend;
public _iend_e__FixedBuffer iend;
public unsafe struct _ip_e__FixedBuffer
{
public byte* e0;
public byte* e1;
public byte* e2;
public byte* e3;
}
public unsafe struct _op_e__FixedBuffer
{
public byte* e0;
public byte* e1;
public byte* e2;
public byte* e3;
}
public unsafe struct _iend_e__FixedBuffer
{
public byte* e0;
public byte* e1;
public byte* e2;
public byte* e3;
}
}

View File

@@ -0,0 +1,10 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct HUF_ReadDTableX1_Workspace
{
public fixed uint rankVal[13];
public fixed uint rankStart[13];
public fixed uint statsWksp[219];
public fixed byte symbols[256];
public fixed byte huffWeight[256];
}

View File

@@ -0,0 +1,307 @@
using System.Runtime.CompilerServices;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct HUF_ReadDTableX2_Workspace
{
public _rankVal_e__FixedBuffer rankVal;
public fixed uint rankStats[13];
public fixed uint rankStart0[15];
public _sortedSymbol_e__FixedBuffer sortedSymbol;
public fixed byte weightList[256];
public fixed uint calleeWksp[219];
#if NET8_0_OR_GREATER
[InlineArray(12)]
public unsafe struct _rankVal_e__FixedBuffer
{
public rankValCol_t e0;
}
#else
public unsafe struct _rankVal_e__FixedBuffer
{
public rankValCol_t e0;
public rankValCol_t e1;
public rankValCol_t e2;
public rankValCol_t e3;
public rankValCol_t e4;
public rankValCol_t e5;
public rankValCol_t e6;
public rankValCol_t e7;
public rankValCol_t e8;
public rankValCol_t e9;
public rankValCol_t e10;
public rankValCol_t e11;
}
#endif
#if NET8_0_OR_GREATER
[InlineArray(256)]
public unsafe struct _sortedSymbol_e__FixedBuffer
{
public sortedSymbol_t e0;
}
#else
public unsafe struct _sortedSymbol_e__FixedBuffer
{
public sortedSymbol_t e0;
public sortedSymbol_t e1;
public sortedSymbol_t e2;
public sortedSymbol_t e3;
public sortedSymbol_t e4;
public sortedSymbol_t e5;
public sortedSymbol_t e6;
public sortedSymbol_t e7;
public sortedSymbol_t e8;
public sortedSymbol_t e9;
public sortedSymbol_t e10;
public sortedSymbol_t e11;
public sortedSymbol_t e12;
public sortedSymbol_t e13;
public sortedSymbol_t e14;
public sortedSymbol_t e15;
public sortedSymbol_t e16;
public sortedSymbol_t e17;
public sortedSymbol_t e18;
public sortedSymbol_t e19;
public sortedSymbol_t e20;
public sortedSymbol_t e21;
public sortedSymbol_t e22;
public sortedSymbol_t e23;
public sortedSymbol_t e24;
public sortedSymbol_t e25;
public sortedSymbol_t e26;
public sortedSymbol_t e27;
public sortedSymbol_t e28;
public sortedSymbol_t e29;
public sortedSymbol_t e30;
public sortedSymbol_t e31;
public sortedSymbol_t e32;
public sortedSymbol_t e33;
public sortedSymbol_t e34;
public sortedSymbol_t e35;
public sortedSymbol_t e36;
public sortedSymbol_t e37;
public sortedSymbol_t e38;
public sortedSymbol_t e39;
public sortedSymbol_t e40;
public sortedSymbol_t e41;
public sortedSymbol_t e42;
public sortedSymbol_t e43;
public sortedSymbol_t e44;
public sortedSymbol_t e45;
public sortedSymbol_t e46;
public sortedSymbol_t e47;
public sortedSymbol_t e48;
public sortedSymbol_t e49;
public sortedSymbol_t e50;
public sortedSymbol_t e51;
public sortedSymbol_t e52;
public sortedSymbol_t e53;
public sortedSymbol_t e54;
public sortedSymbol_t e55;
public sortedSymbol_t e56;
public sortedSymbol_t e57;
public sortedSymbol_t e58;
public sortedSymbol_t e59;
public sortedSymbol_t e60;
public sortedSymbol_t e61;
public sortedSymbol_t e62;
public sortedSymbol_t e63;
public sortedSymbol_t e64;
public sortedSymbol_t e65;
public sortedSymbol_t e66;
public sortedSymbol_t e67;
public sortedSymbol_t e68;
public sortedSymbol_t e69;
public sortedSymbol_t e70;
public sortedSymbol_t e71;
public sortedSymbol_t e72;
public sortedSymbol_t e73;
public sortedSymbol_t e74;
public sortedSymbol_t e75;
public sortedSymbol_t e76;
public sortedSymbol_t e77;
public sortedSymbol_t e78;
public sortedSymbol_t e79;
public sortedSymbol_t e80;
public sortedSymbol_t e81;
public sortedSymbol_t e82;
public sortedSymbol_t e83;
public sortedSymbol_t e84;
public sortedSymbol_t e85;
public sortedSymbol_t e86;
public sortedSymbol_t e87;
public sortedSymbol_t e88;
public sortedSymbol_t e89;
public sortedSymbol_t e90;
public sortedSymbol_t e91;
public sortedSymbol_t e92;
public sortedSymbol_t e93;
public sortedSymbol_t e94;
public sortedSymbol_t e95;
public sortedSymbol_t e96;
public sortedSymbol_t e97;
public sortedSymbol_t e98;
public sortedSymbol_t e99;
public sortedSymbol_t e100;
public sortedSymbol_t e101;
public sortedSymbol_t e102;
public sortedSymbol_t e103;
public sortedSymbol_t e104;
public sortedSymbol_t e105;
public sortedSymbol_t e106;
public sortedSymbol_t e107;
public sortedSymbol_t e108;
public sortedSymbol_t e109;
public sortedSymbol_t e110;
public sortedSymbol_t e111;
public sortedSymbol_t e112;
public sortedSymbol_t e113;
public sortedSymbol_t e114;
public sortedSymbol_t e115;
public sortedSymbol_t e116;
public sortedSymbol_t e117;
public sortedSymbol_t e118;
public sortedSymbol_t e119;
public sortedSymbol_t e120;
public sortedSymbol_t e121;
public sortedSymbol_t e122;
public sortedSymbol_t e123;
public sortedSymbol_t e124;
public sortedSymbol_t e125;
public sortedSymbol_t e126;
public sortedSymbol_t e127;
public sortedSymbol_t e128;
public sortedSymbol_t e129;
public sortedSymbol_t e130;
public sortedSymbol_t e131;
public sortedSymbol_t e132;
public sortedSymbol_t e133;
public sortedSymbol_t e134;
public sortedSymbol_t e135;
public sortedSymbol_t e136;
public sortedSymbol_t e137;
public sortedSymbol_t e138;
public sortedSymbol_t e139;
public sortedSymbol_t e140;
public sortedSymbol_t e141;
public sortedSymbol_t e142;
public sortedSymbol_t e143;
public sortedSymbol_t e144;
public sortedSymbol_t e145;
public sortedSymbol_t e146;
public sortedSymbol_t e147;
public sortedSymbol_t e148;
public sortedSymbol_t e149;
public sortedSymbol_t e150;
public sortedSymbol_t e151;
public sortedSymbol_t e152;
public sortedSymbol_t e153;
public sortedSymbol_t e154;
public sortedSymbol_t e155;
public sortedSymbol_t e156;
public sortedSymbol_t e157;
public sortedSymbol_t e158;
public sortedSymbol_t e159;
public sortedSymbol_t e160;
public sortedSymbol_t e161;
public sortedSymbol_t e162;
public sortedSymbol_t e163;
public sortedSymbol_t e164;
public sortedSymbol_t e165;
public sortedSymbol_t e166;
public sortedSymbol_t e167;
public sortedSymbol_t e168;
public sortedSymbol_t e169;
public sortedSymbol_t e170;
public sortedSymbol_t e171;
public sortedSymbol_t e172;
public sortedSymbol_t e173;
public sortedSymbol_t e174;
public sortedSymbol_t e175;
public sortedSymbol_t e176;
public sortedSymbol_t e177;
public sortedSymbol_t e178;
public sortedSymbol_t e179;
public sortedSymbol_t e180;
public sortedSymbol_t e181;
public sortedSymbol_t e182;
public sortedSymbol_t e183;
public sortedSymbol_t e184;
public sortedSymbol_t e185;
public sortedSymbol_t e186;
public sortedSymbol_t e187;
public sortedSymbol_t e188;
public sortedSymbol_t e189;
public sortedSymbol_t e190;
public sortedSymbol_t e191;
public sortedSymbol_t e192;
public sortedSymbol_t e193;
public sortedSymbol_t e194;
public sortedSymbol_t e195;
public sortedSymbol_t e196;
public sortedSymbol_t e197;
public sortedSymbol_t e198;
public sortedSymbol_t e199;
public sortedSymbol_t e200;
public sortedSymbol_t e201;
public sortedSymbol_t e202;
public sortedSymbol_t e203;
public sortedSymbol_t e204;
public sortedSymbol_t e205;
public sortedSymbol_t e206;
public sortedSymbol_t e207;
public sortedSymbol_t e208;
public sortedSymbol_t e209;
public sortedSymbol_t e210;
public sortedSymbol_t e211;
public sortedSymbol_t e212;
public sortedSymbol_t e213;
public sortedSymbol_t e214;
public sortedSymbol_t e215;
public sortedSymbol_t e216;
public sortedSymbol_t e217;
public sortedSymbol_t e218;
public sortedSymbol_t e219;
public sortedSymbol_t e220;
public sortedSymbol_t e221;
public sortedSymbol_t e222;
public sortedSymbol_t e223;
public sortedSymbol_t e224;
public sortedSymbol_t e225;
public sortedSymbol_t e226;
public sortedSymbol_t e227;
public sortedSymbol_t e228;
public sortedSymbol_t e229;
public sortedSymbol_t e230;
public sortedSymbol_t e231;
public sortedSymbol_t e232;
public sortedSymbol_t e233;
public sortedSymbol_t e234;
public sortedSymbol_t e235;
public sortedSymbol_t e236;
public sortedSymbol_t e237;
public sortedSymbol_t e238;
public sortedSymbol_t e239;
public sortedSymbol_t e240;
public sortedSymbol_t e241;
public sortedSymbol_t e242;
public sortedSymbol_t e243;
public sortedSymbol_t e244;
public sortedSymbol_t e245;
public sortedSymbol_t e246;
public sortedSymbol_t e247;
public sortedSymbol_t e248;
public sortedSymbol_t e249;
public sortedSymbol_t e250;
public sortedSymbol_t e251;
public sortedSymbol_t e252;
public sortedSymbol_t e253;
public sortedSymbol_t e254;
public sortedSymbol_t e255;
}
#endif
}

View File

@@ -0,0 +1,10 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct HUF_WriteCTableWksp
{
public HUF_CompressWeightsWksp wksp;
/* precomputed conversion table */
public fixed byte bitsToWeight[13];
public fixed byte huffWeight[255];
}

View File

@@ -0,0 +1,739 @@
using System.Runtime.CompilerServices;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public struct HUF_buildCTable_wksp_tables
{
public _huffNodeTbl_e__FixedBuffer huffNodeTbl;
public _rankPosition_e__FixedBuffer rankPosition;
#if NET8_0_OR_GREATER
[InlineArray(512)]
public unsafe struct _huffNodeTbl_e__FixedBuffer
{
public nodeElt_s e0;
}
#else
public unsafe struct _huffNodeTbl_e__FixedBuffer
{
public nodeElt_s e0;
public nodeElt_s e1;
public nodeElt_s e2;
public nodeElt_s e3;
public nodeElt_s e4;
public nodeElt_s e5;
public nodeElt_s e6;
public nodeElt_s e7;
public nodeElt_s e8;
public nodeElt_s e9;
public nodeElt_s e10;
public nodeElt_s e11;
public nodeElt_s e12;
public nodeElt_s e13;
public nodeElt_s e14;
public nodeElt_s e15;
public nodeElt_s e16;
public nodeElt_s e17;
public nodeElt_s e18;
public nodeElt_s e19;
public nodeElt_s e20;
public nodeElt_s e21;
public nodeElt_s e22;
public nodeElt_s e23;
public nodeElt_s e24;
public nodeElt_s e25;
public nodeElt_s e26;
public nodeElt_s e27;
public nodeElt_s e28;
public nodeElt_s e29;
public nodeElt_s e30;
public nodeElt_s e31;
public nodeElt_s e32;
public nodeElt_s e33;
public nodeElt_s e34;
public nodeElt_s e35;
public nodeElt_s e36;
public nodeElt_s e37;
public nodeElt_s e38;
public nodeElt_s e39;
public nodeElt_s e40;
public nodeElt_s e41;
public nodeElt_s e42;
public nodeElt_s e43;
public nodeElt_s e44;
public nodeElt_s e45;
public nodeElt_s e46;
public nodeElt_s e47;
public nodeElt_s e48;
public nodeElt_s e49;
public nodeElt_s e50;
public nodeElt_s e51;
public nodeElt_s e52;
public nodeElt_s e53;
public nodeElt_s e54;
public nodeElt_s e55;
public nodeElt_s e56;
public nodeElt_s e57;
public nodeElt_s e58;
public nodeElt_s e59;
public nodeElt_s e60;
public nodeElt_s e61;
public nodeElt_s e62;
public nodeElt_s e63;
public nodeElt_s e64;
public nodeElt_s e65;
public nodeElt_s e66;
public nodeElt_s e67;
public nodeElt_s e68;
public nodeElt_s e69;
public nodeElt_s e70;
public nodeElt_s e71;
public nodeElt_s e72;
public nodeElt_s e73;
public nodeElt_s e74;
public nodeElt_s e75;
public nodeElt_s e76;
public nodeElt_s e77;
public nodeElt_s e78;
public nodeElt_s e79;
public nodeElt_s e80;
public nodeElt_s e81;
public nodeElt_s e82;
public nodeElt_s e83;
public nodeElt_s e84;
public nodeElt_s e85;
public nodeElt_s e86;
public nodeElt_s e87;
public nodeElt_s e88;
public nodeElt_s e89;
public nodeElt_s e90;
public nodeElt_s e91;
public nodeElt_s e92;
public nodeElt_s e93;
public nodeElt_s e94;
public nodeElt_s e95;
public nodeElt_s e96;
public nodeElt_s e97;
public nodeElt_s e98;
public nodeElt_s e99;
public nodeElt_s e100;
public nodeElt_s e101;
public nodeElt_s e102;
public nodeElt_s e103;
public nodeElt_s e104;
public nodeElt_s e105;
public nodeElt_s e106;
public nodeElt_s e107;
public nodeElt_s e108;
public nodeElt_s e109;
public nodeElt_s e110;
public nodeElt_s e111;
public nodeElt_s e112;
public nodeElt_s e113;
public nodeElt_s e114;
public nodeElt_s e115;
public nodeElt_s e116;
public nodeElt_s e117;
public nodeElt_s e118;
public nodeElt_s e119;
public nodeElt_s e120;
public nodeElt_s e121;
public nodeElt_s e122;
public nodeElt_s e123;
public nodeElt_s e124;
public nodeElt_s e125;
public nodeElt_s e126;
public nodeElt_s e127;
public nodeElt_s e128;
public nodeElt_s e129;
public nodeElt_s e130;
public nodeElt_s e131;
public nodeElt_s e132;
public nodeElt_s e133;
public nodeElt_s e134;
public nodeElt_s e135;
public nodeElt_s e136;
public nodeElt_s e137;
public nodeElt_s e138;
public nodeElt_s e139;
public nodeElt_s e140;
public nodeElt_s e141;
public nodeElt_s e142;
public nodeElt_s e143;
public nodeElt_s e144;
public nodeElt_s e145;
public nodeElt_s e146;
public nodeElt_s e147;
public nodeElt_s e148;
public nodeElt_s e149;
public nodeElt_s e150;
public nodeElt_s e151;
public nodeElt_s e152;
public nodeElt_s e153;
public nodeElt_s e154;
public nodeElt_s e155;
public nodeElt_s e156;
public nodeElt_s e157;
public nodeElt_s e158;
public nodeElt_s e159;
public nodeElt_s e160;
public nodeElt_s e161;
public nodeElt_s e162;
public nodeElt_s e163;
public nodeElt_s e164;
public nodeElt_s e165;
public nodeElt_s e166;
public nodeElt_s e167;
public nodeElt_s e168;
public nodeElt_s e169;
public nodeElt_s e170;
public nodeElt_s e171;
public nodeElt_s e172;
public nodeElt_s e173;
public nodeElt_s e174;
public nodeElt_s e175;
public nodeElt_s e176;
public nodeElt_s e177;
public nodeElt_s e178;
public nodeElt_s e179;
public nodeElt_s e180;
public nodeElt_s e181;
public nodeElt_s e182;
public nodeElt_s e183;
public nodeElt_s e184;
public nodeElt_s e185;
public nodeElt_s e186;
public nodeElt_s e187;
public nodeElt_s e188;
public nodeElt_s e189;
public nodeElt_s e190;
public nodeElt_s e191;
public nodeElt_s e192;
public nodeElt_s e193;
public nodeElt_s e194;
public nodeElt_s e195;
public nodeElt_s e196;
public nodeElt_s e197;
public nodeElt_s e198;
public nodeElt_s e199;
public nodeElt_s e200;
public nodeElt_s e201;
public nodeElt_s e202;
public nodeElt_s e203;
public nodeElt_s e204;
public nodeElt_s e205;
public nodeElt_s e206;
public nodeElt_s e207;
public nodeElt_s e208;
public nodeElt_s e209;
public nodeElt_s e210;
public nodeElt_s e211;
public nodeElt_s e212;
public nodeElt_s e213;
public nodeElt_s e214;
public nodeElt_s e215;
public nodeElt_s e216;
public nodeElt_s e217;
public nodeElt_s e218;
public nodeElt_s e219;
public nodeElt_s e220;
public nodeElt_s e221;
public nodeElt_s e222;
public nodeElt_s e223;
public nodeElt_s e224;
public nodeElt_s e225;
public nodeElt_s e226;
public nodeElt_s e227;
public nodeElt_s e228;
public nodeElt_s e229;
public nodeElt_s e230;
public nodeElt_s e231;
public nodeElt_s e232;
public nodeElt_s e233;
public nodeElt_s e234;
public nodeElt_s e235;
public nodeElt_s e236;
public nodeElt_s e237;
public nodeElt_s e238;
public nodeElt_s e239;
public nodeElt_s e240;
public nodeElt_s e241;
public nodeElt_s e242;
public nodeElt_s e243;
public nodeElt_s e244;
public nodeElt_s e245;
public nodeElt_s e246;
public nodeElt_s e247;
public nodeElt_s e248;
public nodeElt_s e249;
public nodeElt_s e250;
public nodeElt_s e251;
public nodeElt_s e252;
public nodeElt_s e253;
public nodeElt_s e254;
public nodeElt_s e255;
public nodeElt_s e256;
public nodeElt_s e257;
public nodeElt_s e258;
public nodeElt_s e259;
public nodeElt_s e260;
public nodeElt_s e261;
public nodeElt_s e262;
public nodeElt_s e263;
public nodeElt_s e264;
public nodeElt_s e265;
public nodeElt_s e266;
public nodeElt_s e267;
public nodeElt_s e268;
public nodeElt_s e269;
public nodeElt_s e270;
public nodeElt_s e271;
public nodeElt_s e272;
public nodeElt_s e273;
public nodeElt_s e274;
public nodeElt_s e275;
public nodeElt_s e276;
public nodeElt_s e277;
public nodeElt_s e278;
public nodeElt_s e279;
public nodeElt_s e280;
public nodeElt_s e281;
public nodeElt_s e282;
public nodeElt_s e283;
public nodeElt_s e284;
public nodeElt_s e285;
public nodeElt_s e286;
public nodeElt_s e287;
public nodeElt_s e288;
public nodeElt_s e289;
public nodeElt_s e290;
public nodeElt_s e291;
public nodeElt_s e292;
public nodeElt_s e293;
public nodeElt_s e294;
public nodeElt_s e295;
public nodeElt_s e296;
public nodeElt_s e297;
public nodeElt_s e298;
public nodeElt_s e299;
public nodeElt_s e300;
public nodeElt_s e301;
public nodeElt_s e302;
public nodeElt_s e303;
public nodeElt_s e304;
public nodeElt_s e305;
public nodeElt_s e306;
public nodeElt_s e307;
public nodeElt_s e308;
public nodeElt_s e309;
public nodeElt_s e310;
public nodeElt_s e311;
public nodeElt_s e312;
public nodeElt_s e313;
public nodeElt_s e314;
public nodeElt_s e315;
public nodeElt_s e316;
public nodeElt_s e317;
public nodeElt_s e318;
public nodeElt_s e319;
public nodeElt_s e320;
public nodeElt_s e321;
public nodeElt_s e322;
public nodeElt_s e323;
public nodeElt_s e324;
public nodeElt_s e325;
public nodeElt_s e326;
public nodeElt_s e327;
public nodeElt_s e328;
public nodeElt_s e329;
public nodeElt_s e330;
public nodeElt_s e331;
public nodeElt_s e332;
public nodeElt_s e333;
public nodeElt_s e334;
public nodeElt_s e335;
public nodeElt_s e336;
public nodeElt_s e337;
public nodeElt_s e338;
public nodeElt_s e339;
public nodeElt_s e340;
public nodeElt_s e341;
public nodeElt_s e342;
public nodeElt_s e343;
public nodeElt_s e344;
public nodeElt_s e345;
public nodeElt_s e346;
public nodeElt_s e347;
public nodeElt_s e348;
public nodeElt_s e349;
public nodeElt_s e350;
public nodeElt_s e351;
public nodeElt_s e352;
public nodeElt_s e353;
public nodeElt_s e354;
public nodeElt_s e355;
public nodeElt_s e356;
public nodeElt_s e357;
public nodeElt_s e358;
public nodeElt_s e359;
public nodeElt_s e360;
public nodeElt_s e361;
public nodeElt_s e362;
public nodeElt_s e363;
public nodeElt_s e364;
public nodeElt_s e365;
public nodeElt_s e366;
public nodeElt_s e367;
public nodeElt_s e368;
public nodeElt_s e369;
public nodeElt_s e370;
public nodeElt_s e371;
public nodeElt_s e372;
public nodeElt_s e373;
public nodeElt_s e374;
public nodeElt_s e375;
public nodeElt_s e376;
public nodeElt_s e377;
public nodeElt_s e378;
public nodeElt_s e379;
public nodeElt_s e380;
public nodeElt_s e381;
public nodeElt_s e382;
public nodeElt_s e383;
public nodeElt_s e384;
public nodeElt_s e385;
public nodeElt_s e386;
public nodeElt_s e387;
public nodeElt_s e388;
public nodeElt_s e389;
public nodeElt_s e390;
public nodeElt_s e391;
public nodeElt_s e392;
public nodeElt_s e393;
public nodeElt_s e394;
public nodeElt_s e395;
public nodeElt_s e396;
public nodeElt_s e397;
public nodeElt_s e398;
public nodeElt_s e399;
public nodeElt_s e400;
public nodeElt_s e401;
public nodeElt_s e402;
public nodeElt_s e403;
public nodeElt_s e404;
public nodeElt_s e405;
public nodeElt_s e406;
public nodeElt_s e407;
public nodeElt_s e408;
public nodeElt_s e409;
public nodeElt_s e410;
public nodeElt_s e411;
public nodeElt_s e412;
public nodeElt_s e413;
public nodeElt_s e414;
public nodeElt_s e415;
public nodeElt_s e416;
public nodeElt_s e417;
public nodeElt_s e418;
public nodeElt_s e419;
public nodeElt_s e420;
public nodeElt_s e421;
public nodeElt_s e422;
public nodeElt_s e423;
public nodeElt_s e424;
public nodeElt_s e425;
public nodeElt_s e426;
public nodeElt_s e427;
public nodeElt_s e428;
public nodeElt_s e429;
public nodeElt_s e430;
public nodeElt_s e431;
public nodeElt_s e432;
public nodeElt_s e433;
public nodeElt_s e434;
public nodeElt_s e435;
public nodeElt_s e436;
public nodeElt_s e437;
public nodeElt_s e438;
public nodeElt_s e439;
public nodeElt_s e440;
public nodeElt_s e441;
public nodeElt_s e442;
public nodeElt_s e443;
public nodeElt_s e444;
public nodeElt_s e445;
public nodeElt_s e446;
public nodeElt_s e447;
public nodeElt_s e448;
public nodeElt_s e449;
public nodeElt_s e450;
public nodeElt_s e451;
public nodeElt_s e452;
public nodeElt_s e453;
public nodeElt_s e454;
public nodeElt_s e455;
public nodeElt_s e456;
public nodeElt_s e457;
public nodeElt_s e458;
public nodeElt_s e459;
public nodeElt_s e460;
public nodeElt_s e461;
public nodeElt_s e462;
public nodeElt_s e463;
public nodeElt_s e464;
public nodeElt_s e465;
public nodeElt_s e466;
public nodeElt_s e467;
public nodeElt_s e468;
public nodeElt_s e469;
public nodeElt_s e470;
public nodeElt_s e471;
public nodeElt_s e472;
public nodeElt_s e473;
public nodeElt_s e474;
public nodeElt_s e475;
public nodeElt_s e476;
public nodeElt_s e477;
public nodeElt_s e478;
public nodeElt_s e479;
public nodeElt_s e480;
public nodeElt_s e481;
public nodeElt_s e482;
public nodeElt_s e483;
public nodeElt_s e484;
public nodeElt_s e485;
public nodeElt_s e486;
public nodeElt_s e487;
public nodeElt_s e488;
public nodeElt_s e489;
public nodeElt_s e490;
public nodeElt_s e491;
public nodeElt_s e492;
public nodeElt_s e493;
public nodeElt_s e494;
public nodeElt_s e495;
public nodeElt_s e496;
public nodeElt_s e497;
public nodeElt_s e498;
public nodeElt_s e499;
public nodeElt_s e500;
public nodeElt_s e501;
public nodeElt_s e502;
public nodeElt_s e503;
public nodeElt_s e504;
public nodeElt_s e505;
public nodeElt_s e506;
public nodeElt_s e507;
public nodeElt_s e508;
public nodeElt_s e509;
public nodeElt_s e510;
public nodeElt_s e511;
}
#endif
#if NET8_0_OR_GREATER
[InlineArray(192)]
public unsafe struct _rankPosition_e__FixedBuffer
{
public rankPos e0;
}
#else
public unsafe struct _rankPosition_e__FixedBuffer
{
public rankPos e0;
public rankPos e1;
public rankPos e2;
public rankPos e3;
public rankPos e4;
public rankPos e5;
public rankPos e6;
public rankPos e7;
public rankPos e8;
public rankPos e9;
public rankPos e10;
public rankPos e11;
public rankPos e12;
public rankPos e13;
public rankPos e14;
public rankPos e15;
public rankPos e16;
public rankPos e17;
public rankPos e18;
public rankPos e19;
public rankPos e20;
public rankPos e21;
public rankPos e22;
public rankPos e23;
public rankPos e24;
public rankPos e25;
public rankPos e26;
public rankPos e27;
public rankPos e28;
public rankPos e29;
public rankPos e30;
public rankPos e31;
public rankPos e32;
public rankPos e33;
public rankPos e34;
public rankPos e35;
public rankPos e36;
public rankPos e37;
public rankPos e38;
public rankPos e39;
public rankPos e40;
public rankPos e41;
public rankPos e42;
public rankPos e43;
public rankPos e44;
public rankPos e45;
public rankPos e46;
public rankPos e47;
public rankPos e48;
public rankPos e49;
public rankPos e50;
public rankPos e51;
public rankPos e52;
public rankPos e53;
public rankPos e54;
public rankPos e55;
public rankPos e56;
public rankPos e57;
public rankPos e58;
public rankPos e59;
public rankPos e60;
public rankPos e61;
public rankPos e62;
public rankPos e63;
public rankPos e64;
public rankPos e65;
public rankPos e66;
public rankPos e67;
public rankPos e68;
public rankPos e69;
public rankPos e70;
public rankPos e71;
public rankPos e72;
public rankPos e73;
public rankPos e74;
public rankPos e75;
public rankPos e76;
public rankPos e77;
public rankPos e78;
public rankPos e79;
public rankPos e80;
public rankPos e81;
public rankPos e82;
public rankPos e83;
public rankPos e84;
public rankPos e85;
public rankPos e86;
public rankPos e87;
public rankPos e88;
public rankPos e89;
public rankPos e90;
public rankPos e91;
public rankPos e92;
public rankPos e93;
public rankPos e94;
public rankPos e95;
public rankPos e96;
public rankPos e97;
public rankPos e98;
public rankPos e99;
public rankPos e100;
public rankPos e101;
public rankPos e102;
public rankPos e103;
public rankPos e104;
public rankPos e105;
public rankPos e106;
public rankPos e107;
public rankPos e108;
public rankPos e109;
public rankPos e110;
public rankPos e111;
public rankPos e112;
public rankPos e113;
public rankPos e114;
public rankPos e115;
public rankPos e116;
public rankPos e117;
public rankPos e118;
public rankPos e119;
public rankPos e120;
public rankPos e121;
public rankPos e122;
public rankPos e123;
public rankPos e124;
public rankPos e125;
public rankPos e126;
public rankPos e127;
public rankPos e128;
public rankPos e129;
public rankPos e130;
public rankPos e131;
public rankPos e132;
public rankPos e133;
public rankPos e134;
public rankPos e135;
public rankPos e136;
public rankPos e137;
public rankPos e138;
public rankPos e139;
public rankPos e140;
public rankPos e141;
public rankPos e142;
public rankPos e143;
public rankPos e144;
public rankPos e145;
public rankPos e146;
public rankPos e147;
public rankPos e148;
public rankPos e149;
public rankPos e150;
public rankPos e151;
public rankPos e152;
public rankPos e153;
public rankPos e154;
public rankPos e155;
public rankPos e156;
public rankPos e157;
public rankPos e158;
public rankPos e159;
public rankPos e160;
public rankPos e161;
public rankPos e162;
public rankPos e163;
public rankPos e164;
public rankPos e165;
public rankPos e166;
public rankPos e167;
public rankPos e168;
public rankPos e169;
public rankPos e170;
public rankPos e171;
public rankPos e172;
public rankPos e173;
public rankPos e174;
public rankPos e175;
public rankPos e176;
public rankPos e177;
public rankPos e178;
public rankPos e179;
public rankPos e180;
public rankPos e181;
public rankPos e182;
public rankPos e183;
public rankPos e184;
public rankPos e185;
public rankPos e186;
public rankPos e187;
public rankPos e188;
public rankPos e189;
public rankPos e190;
public rankPos e191;
}
#endif
}

View File

@@ -0,0 +1,280 @@
using System.Runtime.CompilerServices;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct HUF_compress_tables_t
{
public fixed uint count[256];
public _CTable_e__FixedBuffer CTable;
public _wksps_e__Union wksps;
#if NET8_0_OR_GREATER
[InlineArray(257)]
public unsafe struct _CTable_e__FixedBuffer
{
public nuint e0;
}
#else
public unsafe struct _CTable_e__FixedBuffer
{
public nuint e0;
public nuint e1;
public nuint e2;
public nuint e3;
public nuint e4;
public nuint e5;
public nuint e6;
public nuint e7;
public nuint e8;
public nuint e9;
public nuint e10;
public nuint e11;
public nuint e12;
public nuint e13;
public nuint e14;
public nuint e15;
public nuint e16;
public nuint e17;
public nuint e18;
public nuint e19;
public nuint e20;
public nuint e21;
public nuint e22;
public nuint e23;
public nuint e24;
public nuint e25;
public nuint e26;
public nuint e27;
public nuint e28;
public nuint e29;
public nuint e30;
public nuint e31;
public nuint e32;
public nuint e33;
public nuint e34;
public nuint e35;
public nuint e36;
public nuint e37;
public nuint e38;
public nuint e39;
public nuint e40;
public nuint e41;
public nuint e42;
public nuint e43;
public nuint e44;
public nuint e45;
public nuint e46;
public nuint e47;
public nuint e48;
public nuint e49;
public nuint e50;
public nuint e51;
public nuint e52;
public nuint e53;
public nuint e54;
public nuint e55;
public nuint e56;
public nuint e57;
public nuint e58;
public nuint e59;
public nuint e60;
public nuint e61;
public nuint e62;
public nuint e63;
public nuint e64;
public nuint e65;
public nuint e66;
public nuint e67;
public nuint e68;
public nuint e69;
public nuint e70;
public nuint e71;
public nuint e72;
public nuint e73;
public nuint e74;
public nuint e75;
public nuint e76;
public nuint e77;
public nuint e78;
public nuint e79;
public nuint e80;
public nuint e81;
public nuint e82;
public nuint e83;
public nuint e84;
public nuint e85;
public nuint e86;
public nuint e87;
public nuint e88;
public nuint e89;
public nuint e90;
public nuint e91;
public nuint e92;
public nuint e93;
public nuint e94;
public nuint e95;
public nuint e96;
public nuint e97;
public nuint e98;
public nuint e99;
public nuint e100;
public nuint e101;
public nuint e102;
public nuint e103;
public nuint e104;
public nuint e105;
public nuint e106;
public nuint e107;
public nuint e108;
public nuint e109;
public nuint e110;
public nuint e111;
public nuint e112;
public nuint e113;
public nuint e114;
public nuint e115;
public nuint e116;
public nuint e117;
public nuint e118;
public nuint e119;
public nuint e120;
public nuint e121;
public nuint e122;
public nuint e123;
public nuint e124;
public nuint e125;
public nuint e126;
public nuint e127;
public nuint e128;
public nuint e129;
public nuint e130;
public nuint e131;
public nuint e132;
public nuint e133;
public nuint e134;
public nuint e135;
public nuint e136;
public nuint e137;
public nuint e138;
public nuint e139;
public nuint e140;
public nuint e141;
public nuint e142;
public nuint e143;
public nuint e144;
public nuint e145;
public nuint e146;
public nuint e147;
public nuint e148;
public nuint e149;
public nuint e150;
public nuint e151;
public nuint e152;
public nuint e153;
public nuint e154;
public nuint e155;
public nuint e156;
public nuint e157;
public nuint e158;
public nuint e159;
public nuint e160;
public nuint e161;
public nuint e162;
public nuint e163;
public nuint e164;
public nuint e165;
public nuint e166;
public nuint e167;
public nuint e168;
public nuint e169;
public nuint e170;
public nuint e171;
public nuint e172;
public nuint e173;
public nuint e174;
public nuint e175;
public nuint e176;
public nuint e177;
public nuint e178;
public nuint e179;
public nuint e180;
public nuint e181;
public nuint e182;
public nuint e183;
public nuint e184;
public nuint e185;
public nuint e186;
public nuint e187;
public nuint e188;
public nuint e189;
public nuint e190;
public nuint e191;
public nuint e192;
public nuint e193;
public nuint e194;
public nuint e195;
public nuint e196;
public nuint e197;
public nuint e198;
public nuint e199;
public nuint e200;
public nuint e201;
public nuint e202;
public nuint e203;
public nuint e204;
public nuint e205;
public nuint e206;
public nuint e207;
public nuint e208;
public nuint e209;
public nuint e210;
public nuint e211;
public nuint e212;
public nuint e213;
public nuint e214;
public nuint e215;
public nuint e216;
public nuint e217;
public nuint e218;
public nuint e219;
public nuint e220;
public nuint e221;
public nuint e222;
public nuint e223;
public nuint e224;
public nuint e225;
public nuint e226;
public nuint e227;
public nuint e228;
public nuint e229;
public nuint e230;
public nuint e231;
public nuint e232;
public nuint e233;
public nuint e234;
public nuint e235;
public nuint e236;
public nuint e237;
public nuint e238;
public nuint e239;
public nuint e240;
public nuint e241;
public nuint e242;
public nuint e243;
public nuint e244;
public nuint e245;
public nuint e246;
public nuint e247;
public nuint e248;
public nuint e249;
public nuint e250;
public nuint e251;
public nuint e252;
public nuint e253;
public nuint e254;
public nuint e255;
public nuint e256;
}
#endif
}

View File

@@ -0,0 +1,44 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/**
* Huffman flags bitset.
* For all flags, 0 is the default value.
*/
public enum HUF_flags_e
{
/**
* If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime.
* Otherwise: Ignored.
*/
HUF_flags_bmi2 = 1 << 0,
/**
* If set: Test possible table depths to find the one that produces the smallest header + encoded size.
* If unset: Use heuristic to find the table depth.
*/
HUF_flags_optimalDepth = 1 << 1,
/**
* If set: If the previous table can encode the input, always reuse the previous table.
* If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output.
*/
HUF_flags_preferRepeat = 1 << 2,
/**
* If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress.
* If unset: Always histogram the entire input.
*/
HUF_flags_suspectUncompressible = 1 << 3,
/**
* If set: Don't use assembly implementations
* If unset: Allow using assembly implementations
*/
HUF_flags_disableAsm = 1 << 4,
/**
* If set: Don't use the fast decoding loop, always use the fallback decoding loop.
* If unset: Use the fast decoding loop when possible.
*/
HUF_flags_disableFast = 1 << 5,
}

View File

@@ -0,0 +1,7 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public enum HUF_nbStreams_e
{
HUF_singleStream,
HUF_fourStreams,
}

View File

@@ -0,0 +1,13 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public enum HUF_repeat
{
/**< Cannot use the previous table */
HUF_repeat_none,
/**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
HUF_repeat_check,
/**< Can use the previous table and it is assumed to be valid */
HUF_repeat_valid,
}

View File

@@ -0,0 +1,273 @@
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
/* --- Error management --- */
private static bool HIST_isError(nuint code)
{
return ERR_isError(code);
}
/*-**************************************************************
* Histogram functions
****************************************************************/
private static void HIST_add(uint* count, void* src, nuint srcSize)
{
byte* ip = (byte*)src;
byte* end = ip + srcSize;
while (ip < end)
{
count[*ip++]++;
}
}
/*! HIST_count_simple() :
* Same as HIST_countFast(), this function is unsafe,
* and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
* It is also a bit slower for large inputs.
* However, it does not need any additional memory (not even on stack).
* @return : count of the most frequent symbol.
* Note this function doesn't produce any error (i.e. it must succeed).
*/
private static uint HIST_count_simple(
uint* count,
uint* maxSymbolValuePtr,
void* src,
nuint srcSize
)
{
byte* ip = (byte*)src;
byte* end = ip + srcSize;
uint maxSymbolValue = *maxSymbolValuePtr;
uint largestCount = 0;
memset(count, 0, (maxSymbolValue + 1) * sizeof(uint));
if (srcSize == 0)
{
*maxSymbolValuePtr = 0;
return 0;
}
while (ip < end)
{
assert(*ip <= maxSymbolValue);
count[*ip++]++;
}
while (count[maxSymbolValue] == 0)
maxSymbolValue--;
*maxSymbolValuePtr = maxSymbolValue;
{
uint s;
for (s = 0; s <= maxSymbolValue; s++)
if (count[s] > largestCount)
largestCount = count[s];
}
return largestCount;
}
/* HIST_count_parallel_wksp() :
* store histogram into 4 intermediate tables, recombined at the end.
* this design makes better use of OoO cpus,
* and is noticeably faster when some values are heavily repeated.
* But it needs some additional workspace for intermediate tables.
* `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
* @return : largest histogram frequency,
* or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
private static nuint HIST_count_parallel_wksp(
uint* count,
uint* maxSymbolValuePtr,
void* source,
nuint sourceSize,
HIST_checkInput_e check,
uint* workSpace
)
{
byte* ip = (byte*)source;
byte* iend = ip + sourceSize;
nuint countSize = (*maxSymbolValuePtr + 1) * sizeof(uint);
uint max = 0;
uint* Counting1 = workSpace;
uint* Counting2 = Counting1 + 256;
uint* Counting3 = Counting2 + 256;
uint* Counting4 = Counting3 + 256;
assert(*maxSymbolValuePtr <= 255);
if (sourceSize == 0)
{
memset(count, 0, (uint)countSize);
*maxSymbolValuePtr = 0;
return 0;
}
memset(workSpace, 0, 4 * 256 * sizeof(uint));
{
uint cached = MEM_read32(ip);
ip += 4;
while (ip < iend - 15)
{
uint c = cached;
cached = MEM_read32(ip);
ip += 4;
Counting1[(byte)c]++;
Counting2[(byte)(c >> 8)]++;
Counting3[(byte)(c >> 16)]++;
Counting4[c >> 24]++;
c = cached;
cached = MEM_read32(ip);
ip += 4;
Counting1[(byte)c]++;
Counting2[(byte)(c >> 8)]++;
Counting3[(byte)(c >> 16)]++;
Counting4[c >> 24]++;
c = cached;
cached = MEM_read32(ip);
ip += 4;
Counting1[(byte)c]++;
Counting2[(byte)(c >> 8)]++;
Counting3[(byte)(c >> 16)]++;
Counting4[c >> 24]++;
c = cached;
cached = MEM_read32(ip);
ip += 4;
Counting1[(byte)c]++;
Counting2[(byte)(c >> 8)]++;
Counting3[(byte)(c >> 16)]++;
Counting4[c >> 24]++;
}
ip -= 4;
}
while (ip < iend)
Counting1[*ip++]++;
{
uint s;
for (s = 0; s < 256; s++)
{
Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
if (Counting1[s] > max)
max = Counting1[s];
}
}
{
uint maxSymbolValue = 255;
while (Counting1[maxSymbolValue] == 0)
maxSymbolValue--;
if (check != default && maxSymbolValue > *maxSymbolValuePtr)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall));
*maxSymbolValuePtr = maxSymbolValue;
memmove(count, Counting1, countSize);
}
return max;
}
/* HIST_countFast_wksp() :
* Same as HIST_countFast(), but using an externally provided scratch buffer.
* `workSpace` is a writable buffer which must be 4-bytes aligned,
* `workSpaceSize` must be >= HIST_WKSP_SIZE
*/
private static nuint HIST_countFast_wksp(
uint* count,
uint* maxSymbolValuePtr,
void* source,
nuint sourceSize,
void* workSpace,
nuint workSpaceSize
)
{
if (sourceSize < 1500)
return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
if (((nuint)workSpace & 3) != 0)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
if (workSpaceSize < 1024 * sizeof(uint))
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall));
return HIST_count_parallel_wksp(
count,
maxSymbolValuePtr,
source,
sourceSize,
HIST_checkInput_e.trustInput,
(uint*)workSpace
);
}
/* HIST_count_wksp() :
* Same as HIST_count(), but using an externally provided scratch buffer.
* `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
private static nuint HIST_count_wksp(
uint* count,
uint* maxSymbolValuePtr,
void* source,
nuint sourceSize,
void* workSpace,
nuint workSpaceSize
)
{
if (((nuint)workSpace & 3) != 0)
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC));
if (workSpaceSize < 1024 * sizeof(uint))
return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall));
if (*maxSymbolValuePtr < 255)
return HIST_count_parallel_wksp(
count,
maxSymbolValuePtr,
source,
sourceSize,
HIST_checkInput_e.checkMaxSymbolValue,
(uint*)workSpace
);
*maxSymbolValuePtr = 255;
return HIST_countFast_wksp(
count,
maxSymbolValuePtr,
source,
sourceSize,
workSpace,
workSpaceSize
);
}
/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
private static nuint HIST_countFast(
uint* count,
uint* maxSymbolValuePtr,
void* source,
nuint sourceSize
)
{
uint* tmpCounters = stackalloc uint[1024];
return HIST_countFast_wksp(
count,
maxSymbolValuePtr,
source,
sourceSize,
tmpCounters,
sizeof(uint) * 1024
);
}
/*! HIST_count():
* Provides the precise count of each byte within a table 'count'.
* 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
* Updates *maxSymbolValuePtr with actual largest symbol value detected.
* @return : count of the most frequent symbol (which isn't identified).
* or an error code, which can be tested using HIST_isError().
* note : if return == srcSize, there is only one symbol.
*/
private static nuint HIST_count(uint* count, uint* maxSymbolValuePtr, void* src, nuint srcSize)
{
uint* tmpCounters = stackalloc uint[1024];
return HIST_count_wksp(
count,
maxSymbolValuePtr,
src,
srcSize,
tmpCounters,
sizeof(uint) * 1024
);
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,162 @@
using System;
using System.Buffers.Binary;
using System.Runtime.CompilerServices;
using BclUnsafe = System.Runtime.CompilerServices.Unsafe;
// ReSharper disable InconsistentNaming
// ReSharper disable IdentifierTypo
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
/*-**************************************************************
* Memory I/O API
*****************************************************************/
/*=== Static platform detection ===*/
private static bool MEM_32bits
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
get => sizeof(nint) == 4;
}
private static bool MEM_64bits
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
get => sizeof(nint) == 8;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
/* default method, safe and standard.
can sometimes prove slower */
private static ushort MEM_read16(void* memPtr) => BclUnsafe.ReadUnaligned<ushort>(memPtr);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint MEM_read32(void* memPtr) => BclUnsafe.ReadUnaligned<uint>(memPtr);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static ulong MEM_read64(void* memPtr) => BclUnsafe.ReadUnaligned<ulong>(memPtr);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint MEM_readST(void* memPtr) => BclUnsafe.ReadUnaligned<nuint>(memPtr);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void MEM_write16(void* memPtr, ushort value) =>
BclUnsafe.WriteUnaligned(memPtr, value);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void MEM_write64(void* memPtr, ulong value) =>
BclUnsafe.WriteUnaligned(memPtr, value);
/*=== Little endian r/w ===*/
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static ushort MEM_readLE16(void* memPtr)
{
var val = BclUnsafe.ReadUnaligned<ushort>(memPtr);
if (!BitConverter.IsLittleEndian)
{
val = BinaryPrimitives.ReverseEndianness(val);
}
return val;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void MEM_writeLE16(void* memPtr, ushort val)
{
if (!BitConverter.IsLittleEndian)
{
val = BinaryPrimitives.ReverseEndianness(val);
}
BclUnsafe.WriteUnaligned(memPtr, val);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint MEM_readLE24(void* memPtr) =>
(uint)(MEM_readLE16(memPtr) + (((byte*)memPtr)[2] << 16));
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void MEM_writeLE24(void* memPtr, uint val)
{
MEM_writeLE16(memPtr, (ushort)val);
((byte*)memPtr)[2] = (byte)(val >> 16);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static uint MEM_readLE32(void* memPtr)
{
var val = BclUnsafe.ReadUnaligned<uint>(memPtr);
if (!BitConverter.IsLittleEndian)
{
val = BinaryPrimitives.ReverseEndianness(val);
}
return val;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void MEM_writeLE32(void* memPtr, uint val32)
{
if (!BitConverter.IsLittleEndian)
{
val32 = BinaryPrimitives.ReverseEndianness(val32);
}
BclUnsafe.WriteUnaligned(memPtr, val32);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static ulong MEM_readLE64(void* memPtr)
{
var val = BclUnsafe.ReadUnaligned<ulong>(memPtr);
if (!BitConverter.IsLittleEndian)
{
val = BinaryPrimitives.ReverseEndianness(val);
}
return val;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void MEM_writeLE64(void* memPtr, ulong val64)
{
if (!BitConverter.IsLittleEndian)
{
val64 = BinaryPrimitives.ReverseEndianness(val64);
}
BclUnsafe.WriteUnaligned(memPtr, val64);
}
#if !NET8_0_OR_GREATER
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint ReverseEndiannessNative(nuint val) =>
MEM_32bits
? BinaryPrimitives.ReverseEndianness((uint)val)
: (nuint)BinaryPrimitives.ReverseEndianness(val);
#endif
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static nuint MEM_readLEST(void* memPtr)
{
var val = BclUnsafe.ReadUnaligned<nuint>(memPtr);
if (!BitConverter.IsLittleEndian)
{
#if NET8_0_OR_GREATER
val = BinaryPrimitives.ReverseEndianness(val);
#else
val = ReverseEndiannessNative(val);
#endif
}
return val;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void MEM_writeLEST(void* memPtr, nuint val)
{
if (!BitConverter.IsLittleEndian)
{
#if NET8_0_OR_GREATER
val = BinaryPrimitives.ReverseEndianness(val);
#else
val = ReverseEndiannessNative(val);
#endif
}
BclUnsafe.WriteUnaligned(memPtr, val);
}
}

View File

@@ -0,0 +1,122 @@
using SharpCompress.Compressors.ZStandard.Unsafe;
using static SharpCompress.Compressors.ZStandard.UnsafeHelper;
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public static unsafe partial class Methods
{
private static JobThreadPool GetThreadPool(void* ctx) =>
UnmanagedObject.Unwrap<JobThreadPool>(ctx);
/* ZSTD_createThreadPool() : public access point */
public static void* ZSTD_createThreadPool(nuint numThreads)
{
return POOL_create(numThreads, 0);
}
/*! POOL_create() :
* Create a thread pool with at most `numThreads` threads.
* `numThreads` must be at least 1.
* The maximum number of queued jobs before blocking is `queueSize`.
* @return : POOL_ctx pointer on success, else NULL.
*/
private static void* POOL_create(nuint numThreads, nuint queueSize)
{
return POOL_create_advanced(numThreads, queueSize, Unsafe.Methods.ZSTD_defaultCMem);
}
private static void* POOL_create_advanced(
nuint numThreads,
nuint queueSize,
ZSTD_customMem customMem
)
{
var jobThreadPool = new JobThreadPool((int)numThreads, (int)queueSize);
return UnmanagedObject.Wrap(jobThreadPool);
}
/*! POOL_join() :
Shutdown the queue, wake any sleeping threads, and join all of the threads.
*/
private static void POOL_join(void* ctx)
{
GetThreadPool(ctx).Join();
}
/*! POOL_free() :
* Free a thread pool returned by POOL_create().
*/
private static void POOL_free(void* ctx)
{
if (ctx == null)
{
return;
}
var jobThreadPool = GetThreadPool(ctx);
jobThreadPool.Join();
jobThreadPool.Dispose();
UnmanagedObject.Free(ctx);
}
/*! POOL_joinJobs() :
* Waits for all queued jobs to finish executing.
*/
private static void POOL_joinJobs(void* ctx)
{
var jobThreadPool = GetThreadPool(ctx);
jobThreadPool.Join(false);
}
public static void ZSTD_freeThreadPool(void* pool)
{
POOL_free(pool);
}
/*! POOL_sizeof() :
* @return threadpool memory usage
* note : compatible with NULL (returns 0 in this case)
*/
private static nuint POOL_sizeof(void* ctx)
{
if (ctx == null)
return 0;
var jobThreadPool = GetThreadPool(ctx);
return (nuint)jobThreadPool.Size();
}
/* @return : 0 on success, 1 on error */
private static int POOL_resize(void* ctx, nuint numThreads)
{
if (ctx == null)
return 1;
var jobThreadPool = GetThreadPool(ctx);
jobThreadPool.Resize((int)numThreads);
return 0;
}
/*! POOL_add() :
* Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
* Possibly blocks until there is room in the queue.
* Note : The function may be executed asynchronously,
* therefore, `opaque` must live until function has been completed.
*/
private static void POOL_add(void* ctx, void* function, void* opaque)
{
assert(ctx != null);
var jobThreadPool = GetThreadPool(ctx);
jobThreadPool.Add(function, opaque);
}
/*! POOL_tryAdd() :
* Add the job `function(opaque)` to thread pool _if_ a queue slot is available.
* Returns immediately even if not (does not block).
* @return : 1 if successful, 0 if not.
*/
private static int POOL_tryAdd(void* ctx, void* function, void* opaque)
{
assert(ctx != null);
var jobThreadPool = GetThreadPool(ctx);
return jobThreadPool.TryAdd(function, opaque) ? 1 : 0;
}
}

View File

@@ -0,0 +1,8 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public struct RSyncState_t
{
public ulong hash;
public ulong hitMask;
public ulong primePower;
}

View File

@@ -0,0 +1,14 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/* ==== Serial State ==== */
public unsafe struct Range
{
public void* start;
public nuint size;
public Range(void* start, nuint size)
{
this.start = start;
this.size = size;
}
}

View File

@@ -0,0 +1,29 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct RawSeqStore_t
{
/* The start of the sequences */
public rawSeq* seq;
/* The index in seq where reading stopped. pos <= size. */
public nuint pos;
/* The position within the sequence at seq[pos] where reading
stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
public nuint posInSequence;
/* The number of sequences. <= capacity. */
public nuint size;
/* The capacity starting from `seq` pointer */
public nuint capacity;
public RawSeqStore_t(rawSeq* seq, nuint pos, nuint posInSequence, nuint size, nuint capacity)
{
this.seq = seq;
this.pos = pos;
this.posInSequence = posInSequence;
this.size = size;
this.capacity = capacity;
}
}

View File

@@ -0,0 +1,28 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct RoundBuff_t
{
/* The round input buffer. All jobs get references
* to pieces of the buffer. ZSTDMT_tryGetInputRange()
* handles handing out job input buffers, and makes
* sure it doesn't overlap with any pieces still in use.
*/
public byte* buffer;
/* The capacity of buffer. */
public nuint capacity;
/* The position of the current inBuff in the round
* buffer. Updated past the end if the inBuff once
* the inBuff is sent to the worker thread.
* pos <= capacity.
*/
public nuint pos;
public RoundBuff_t(byte* buffer, nuint capacity, nuint pos)
{
this.buffer = buffer;
this.capacity = capacity;
this.pos = pos;
}
}

View File

@@ -0,0 +1,9 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct SeqCollector
{
public int collectSequences;
public ZSTD_Sequence* seqStart;
public nuint seqIndex;
public nuint maxSequences;
}

View File

@@ -0,0 +1,14 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/***********************************************
* Sequences *
***********************************************/
public struct SeqDef_s
{
/* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
public uint offBase;
public ushort litLength;
/* mlBase == matchLength - MINMATCH */
public ushort mlBase;
}

View File

@@ -0,0 +1,27 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct SeqStore_t
{
public SeqDef_s* sequencesStart;
/* ptr to end of sequences */
public SeqDef_s* sequences;
public byte* litStart;
/* ptr to end of literals */
public byte* lit;
public byte* llCode;
public byte* mlCode;
public byte* ofCode;
public nuint maxNbSeq;
public nuint maxNbLit;
/* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength
* in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
* the existing value of the litLength or matchLength by 0x10000.
*/
public ZSTD_longLengthType_e longLengthType;
/* Index of the sequence to apply long length modification to */
public uint longLengthPos;
}

View File

@@ -0,0 +1,23 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public unsafe struct SerialState
{
/* All variables in the struct are protected by mutex. */
public void* mutex;
public void* cond;
public ZSTD_CCtx_params_s @params;
public ldmState_t ldmState;
public XXH64_state_s xxhState;
public uint nextJobID;
/* Protects ldmWindow.
* Must be acquired after the main mutex when acquiring both.
*/
public void* ldmWindowMutex;
/* Signaled when ldmWindow is updated */
public void* ldmWindowCond;
/* A thread-safe copy of ldmState.window */
public ZSTD_window_t ldmWindow;
}

View File

@@ -0,0 +1,9 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public enum SymbolEncodingType_e
{
set_basic,
set_rle,
set_compressed,
set_repeat,
}

View File

@@ -0,0 +1,10 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
public struct SyncPoint
{
/* The number of bytes to load from the input. */
public nuint toLoad;
/* Boolean declaring if we must flush because we found a synchronization point. */
public int flush;
}

View File

@@ -0,0 +1,10 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/*!
* @brief Canonical (big endian) representation of @ref XXH32_hash_t.
*/
public unsafe struct XXH32_canonical_t
{
/*!< Hash bytes, big endian */
public fixed byte digest[4];
}

View File

@@ -0,0 +1,34 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/*!
* @internal
* @brief Structure for XXH32 streaming API.
*
* @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
* @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
* an opaque type. This allows fields to safely be changed.
*
* Typedef'd to @ref XXH32_state_t.
* Do not access the members of this struct directly.
* @see XXH64_state_s, XXH3_state_s
*/
public unsafe struct XXH32_state_s
{
/*!< Total length hashed, modulo 2^32 */
public uint total_len_32;
/*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
public uint large_len;
/*!< Accumulator lanes */
public fixed uint v[4];
/*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
public fixed uint mem32[4];
/*!< Amount of data in @ref mem32 */
public uint memsize;
/*!< Reserved field. Do not read nor write to it. */
public uint reserved;
}

View File

@@ -0,0 +1,9 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/*!
* @brief Canonical (big endian) representation of @ref XXH64_hash_t.
*/
public unsafe struct XXH64_canonical_t
{
public fixed byte digest[8];
}

View File

@@ -0,0 +1,34 @@
namespace SharpCompress.Compressors.ZStandard.Unsafe;
/*!
* @internal
* @brief Structure for XXH64 streaming API.
*
* @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
* @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
* an opaque type. This allows fields to safely be changed.
*
* Typedef'd to @ref XXH64_state_t.
* Do not access the members of this struct directly.
* @see XXH32_state_s, XXH3_state_s
*/
public unsafe struct XXH64_state_s
{
/*!< Total length hashed. This is always 64-bit. */
public ulong total_len;
/*!< Accumulator lanes */
public fixed ulong v[4];
/*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
public fixed ulong mem64[4];
/*!< Amount of data in @ref mem64 */
public uint memsize;
/*!< Reserved field, needed for padding anyways*/
public uint reserved32;
/*!< Reserved field. Do not read or write to it. */
public ulong reserved64;
}

Some files were not shown because too many files have changed in this diff Show More