71 Commits

Author SHA1 Message Date
Matt Nadareski
d239d9f09b Bump version 2024-04-18 12:08:56 -04:00
Matt Nadareski
0cf3e3e816 Update SabreTools.IO 2024-04-18 12:05:30 -04:00
Matt Nadareski
fe319b71f1 Bump version 2024-04-16 22:10:41 -04:00
Matt Nadareski
cd5bf99f21 File formatting changes 2024-04-16 22:09:11 -04:00
Matt Nadareski
f79b5353f7 Add empty test project 2024-04-16 22:06:54 -04:00
Matt Nadareski
5e184b03c5 Move project files to subfolder 2024-04-16 22:02:51 -04:00
Matt Nadareski
a1417d2f8a Minor syntax cleanup 2024-04-16 22:00:58 -04:00
Matt Nadareski
7580d49830 Migrate to ReadOnlyBitStream 2024-04-16 21:57:31 -04:00
Matt Nadareski
c04c7d438d Update SabreTools.IO 2024-04-16 21:55:02 -04:00
Matt Nadareski
a0f602ed6f Bump version the correct way 2024-04-16 12:07:15 -04:00
Matt Nadareski
4bc7f53c9f Bump version 2024-04-16 12:05:46 -04:00
Matt Nadareski
049a8cf499 Minor fixes and tweaks 2024-04-16 12:04:58 -04:00
Matt Nadareski
9a8875f1e0 Update SabreTools.IO 2024-04-16 12:03:05 -04:00
Matt Nadareski
a916cb9954 Bump version 2024-04-03 22:51:17 -04:00
Matt Nadareski
a6ef762a73 Update SabreTools.Models 2024-04-03 22:49:32 -04:00
Matt Nadareski
7aac1e0bed Bump version 2024-04-02 16:08:42 -04:00
Matt Nadareski
bc569964d8 Update packages 2024-04-02 16:08:17 -04:00
Matt Nadareski
a3ac98a9f4 Bump version 2024-03-24 23:00:41 -04:00
Matt Nadareski
57c0f9b747 Import zlib port, with apologies to Nanook 2024-03-24 22:51:55 -04:00
Matt Nadareski
38f3ea1c98 Bump version 2024-03-12 16:48:36 -04:00
Matt Nadareski
ab36802840 Update packages 2024-03-12 16:34:07 -04:00
Matt Nadareski
422bda1830 Update SabreTools.IO 2024-03-05 11:11:07 -05:00
Matt Nadareski
1c989985d9 Update copyright date 2024-02-27 19:08:56 -05:00
Matt Nadareski
62c6e79ad3 Add nuget package and PR workflows 2024-02-27 19:08:42 -05:00
Matt Nadareski
6bbf521828 Fix repository URL 2023-11-22 09:40:32 -05:00
Matt Nadareski
7a4e2f0ee0 Bump version 2023-11-22 09:39:29 -05:00
Matt Nadareski
1d1a6f5976 Support .NET Framework 2.0 2023-11-22 09:39:18 -05:00
Matt Nadareski
065b68124b Update SabreTools libraries 2023-11-22 09:36:36 -05:00
Matt Nadareski
07b50e8c46 Support ancient .NET 2023-11-14 14:53:50 -05:00
Matt Nadareski
8eb82384d6 Expand supported RIDs 2023-11-08 22:51:25 -05:00
Matt Nadareski
dd6cc0e2f3 Fix whitespace in project file 2023-11-08 10:59:30 -05:00
Matt Nadareski
58502e0362 Enable latest language version 2023-11-07 22:12:51 -05:00
Matt Nadareski
f3bf1082d3 Update Models version 2023-10-25 15:46:02 -04:00
Matt Nadareski
7958b24a36 Update Models version 2023-09-28 23:30:07 -04:00
Matt Nadareski
3010a0523c Use local constants until Models updated 2023-09-23 11:16:11 -04:00
Matt Nadareski
d7670ae685 Add a little more Quantum decoding 2023-09-23 00:32:08 -04:00
Matt Nadareski
2d09d9696a Add source position and length to BitStream 2023-09-23 00:26:52 -04:00
Matt Nadareski
d75883a6cf Start adding Quantum decompression 2023-09-23 00:13:08 -04:00
Matt Nadareski
8e5cf3ee2e Add byte array constructors 2023-09-22 23:52:14 -04:00
Matt Nadareski
e739fd6fd5 Create Process method, make it throw for now 2023-09-22 23:41:06 -04:00
Matt Nadareski
6b238df5dc Address some nullability warnings 2023-09-22 23:38:05 -04:00
Matt Nadareski
3e3a0e122b Add selector selector model 2023-09-22 23:26:07 -04:00
Matt Nadareski
cb6e157cb4 Fix one more location 2023-09-22 23:25:00 -04:00
Matt Nadareski
12466d7083 Fix build after recasting 2023-09-22 23:24:03 -04:00
Matt Nadareski
47cb06cf34 Add notes and init for coding state 2023-09-22 23:20:03 -04:00
Matt Nadareski
c152cba81d Add internal models and constructor 2023-09-22 23:17:14 -04:00
Matt Nadareski
4684a6612c Finish update implementation 2023-09-22 22:58:43 -04:00
Matt Nadareski
a58da1d8db Add one more partial method 2023-09-22 22:07:38 -04:00
Matt Nadareski
8f098a6669 Add some helper Quantum methods 2023-09-22 21:59:15 -04:00
Matt Nadareski
8c5482a59a Make things more model-based 2023-09-22 16:57:36 -04:00
Matt Nadareski
b1f1863e9a Fix build issues from package update 2023-09-22 16:47:14 -04:00
Matt Nadareski
8ab555d6fc Add new package tag 2023-09-22 16:18:51 -04:00
Matt Nadareski
32b2f6c443 Update Models version 2023-09-22 16:06:17 -04:00
Matt Nadareski
44f1544725 Fix nullability warnings 2023-09-22 15:58:06 -04:00
Matt Nadareski
471cbc5707 Fix build 2023-09-22 15:46:56 -04:00
Matt Nadareski
5b785fb28f Use constants from Models 2023-09-22 15:46:45 -04:00
Matt Nadareski
38dd2a5caf Update README 2023-09-22 14:50:08 -04:00
Matt Nadareski
5e21a09fd1 Make header reading consistent 2023-09-22 11:59:46 -04:00
Matt Nadareski
8174af616f Handle model issues, combine logic 2023-09-22 11:53:27 -04:00
Matt Nadareski
297fffe8d7 Add RFC1951 implementation 2023-09-22 11:27:07 -04:00
Matt Nadareski
bd9258d9fa Fix build 2023-09-21 23:43:48 -04:00
Matt Nadareski
b7a081824c Create a separate Huffman decoder 2023-09-21 23:43:32 -04:00
Matt Nadareski
9617e5c583 Simplify BitStream implementation 2023-09-21 23:18:43 -04:00
Matt Nadareski
ec40e759a9 Start clean MSZIP implementation (nw) 2023-09-21 22:59:46 -04:00
Matt Nadareski
15bf2001b5 Add multiple byte reading 2023-09-21 22:27:19 -04:00
Matt Nadareski
81eab984fb Add endian reads for bits 2023-09-21 22:20:57 -04:00
Matt Nadareski
47691d2034 Add shortcut implementations for BitStream 2023-09-21 22:10:19 -04:00
Matt Nadareski
dde90a852d Fix BitStream returns 2023-09-21 22:08:22 -04:00
Matt Nadareski
2ce175af39 Add BitStream skeleton 2023-09-21 22:06:42 -04:00
Matt Nadareski
3353264090 Update README 2023-09-21 21:31:59 -04:00
Matt Nadareski
5477afaf1e Remove partially ported libmspack code 2023-09-21 21:31:45 -04:00
88 changed files with 9074 additions and 7675 deletions

43
.github/workflows/build_nupkg.yml vendored Normal file
View File

@@ -0,0 +1,43 @@
name: Nuget Pack
on:
push:
branches: [ "main" ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: 8.0.x
- name: Restore dependencies
run: dotnet restore
- name: Pack
run: dotnet pack
- name: Upload build
uses: actions/upload-artifact@v4
with:
name: 'Nuget Package'
path: 'bin/Release/*.nupkg'
- name: Upload to rolling
uses: ncipollo/release-action@v1.14.0
with:
allowUpdates: True
artifacts: 'bin/Release/*.nupkg'
body: 'Last built commit: ${{ github.sha }}'
name: 'Rolling Release'
prerelease: True
replacesArtifacts: True
tag: "rolling"
updateOnlyUnreleased: True

17
.github/workflows/check_pr.yml vendored Normal file
View File

@@ -0,0 +1,17 @@
name: Build PR
on: [pull_request]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: 8.0.x
- name: Build
run: dotnet build

View File

@@ -9,6 +9,12 @@ Find the link to the Nuget package [here](https://www.nuget.org/packages/SabreTo
| Compression Name | Decompress | Compress |
| --- | --- | --- |
| LZ | Yes | No |
| LZX | Incomplete | No |
| MSZIP | Incomplete | No |
| Quantum | Incomplete | No |
| MSZIP | Yes* | No |
**Note:** If something is marked with a `*` it means that it need testing.
## External Libraries
| Library Name | Use |
| --- | ---|
| [ZLibPort](https://github.com/Nanook/zlib-C-To-CSharp-Port) | Adds zlib code for internal and external use; minor edits have been made |

View File

@@ -1,36 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<!-- Assembly Properties -->
<TargetFrameworks>net48;net6.0;net7.0;net8.0</TargetFrameworks>
<RuntimeIdentifiers>win-x86;win-x64;linux-x64;osx-x64</RuntimeIdentifiers>
<Version>0.1.0</Version>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<!-- Package Properties -->
<Authors>Matt Nadareski</Authors>
<Description>Clean compression implementations</Description>
<Copyright>Copyright (c) Matt Nadareski 2022-2023</Copyright>
<PackageProjectUrl>https://github.com/SabreTools/</PackageProjectUrl>
<PackageReadmeFile>README.md</PackageReadmeFile>
<RepositoryUrl>https://github.com/SabreTools/SabreTools.Printing</RepositoryUrl>
<RepositoryType>git</RepositoryType>
<PackageTags>compression decompression lz</PackageTags>
<PackageLicenseExpression>MIT</PackageLicenseExpression>
</PropertyGroup>
<PropertyGroup Condition="'$(TargetFramework)'!='net48'">
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<None Include="README.md" Pack="true" PackagePath=""/>
</ItemGroup>
<ItemGroup>
<PackageReference Include="SabreTools.IO" Version="1.1.1" />
<PackageReference Include="SabreTools.Models" Version="1.1.2" />
</ItemGroup>
</Project>

View File

@@ -3,7 +3,9 @@ Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 17
VisualStudioVersion = 17.0.31903.59
MinimumVisualStudioVersion = 10.0.40219.1
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SabreTools.Compression", "SabreTools.Compression.csproj", "{B26E863F-8509-48BB-BABA-4FF83DB28D2A}"
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SabreTools.Compression", "SabreTools.Compression\SabreTools.Compression.csproj", "{B26E863F-8509-48BB-BABA-4FF83DB28D2A}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Test", "Test\Test.csproj", "{FDF6EF41-1B5A-4F3B-A7DD-DEB810E55A30}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
@@ -18,5 +20,9 @@ Global
{B26E863F-8509-48BB-BABA-4FF83DB28D2A}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B26E863F-8509-48BB-BABA-4FF83DB28D2A}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B26E863F-8509-48BB-BABA-4FF83DB28D2A}.Release|Any CPU.Build.0 = Release|Any CPU
{FDF6EF41-1B5A-4F3B-A7DD-DEB810E55A30}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{FDF6EF41-1B5A-4F3B-A7DD-DEB810E55A30}.Debug|Any CPU.Build.0 = Debug|Any CPU
{FDF6EF41-1B5A-4F3B-A7DD-DEB810E55A30}.Release|Any CPU.ActiveCfg = Release|Any CPU
{FDF6EF41-1B5A-4F3B-A7DD-DEB810E55A30}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
EndGlobal

View File

@@ -1,7 +1,7 @@
using System.IO;
using System.Linq;
using System.Text;
using SabreTools.IO;
using SabreTools.IO.Extensions;
using SabreTools.Models.Compression.LZ;
using static SabreTools.Models.Compression.LZ.Constants;
@@ -17,11 +17,7 @@ namespace SabreTools.Compression.LZ
/// </summary>
/// <param name="compressed">Byte array representing the compressed data</param>
/// <returns>Decompressed data as a byte array, null on error</returns>
#if NET48
public static byte[] Decompress(byte[] compressed)
#else
public static byte[]? Decompress(byte[]? compressed)
#endif
{
// If we have and invalid input
if (compressed == null || compressed.Length == 0)
@@ -37,11 +33,7 @@ namespace SabreTools.Compression.LZ
/// </summary>
/// <param name="compressed">Stream representing the compressed data</param>
/// <returns>Decompressed data as a byte array, null on error</returns>
#if NET48
public static byte[] Decompress(Stream compressed)
#else
public static byte[]? Decompress(Stream? compressed)
#endif
{
// If we have and invalid input
if (compressed == null || compressed.Length == 0)
@@ -65,7 +57,7 @@ namespace SabreTools.Compression.LZ
long read = lz.CopyTo(sourceState, destState, out LZERROR error);
// Copy the data to the buffer
var decompressed = new byte[0];
byte[]? decompressed;
if (read == 0 || (error != LZERROR.LZERROR_OK && error != LZERROR.LZERROR_NOT_LZ))
{
decompressed = null;
@@ -87,11 +79,7 @@ namespace SabreTools.Compression.LZ
/// <summary>
/// Reconstructs the full filename of the compressed file
/// </summary>
#if NET48
public static string GetExpandedName(string input, out LZERROR error)
#else
public static string? GetExpandedName(string input, out LZERROR error)
#endif
{
// Try to open the file as a compressed stream
var fileStream = File.Open(input, FileMode.Open, FileAccess.Read, FileShare.ReadWrite);
@@ -103,7 +91,7 @@ namespace SabreTools.Compression.LZ
string inputExtension = Path.GetExtension(input).TrimStart('.');
// If we have no extension
if (string.IsNullOrWhiteSpace(inputExtension))
if (string.IsNullOrEmpty(inputExtension))
return Path.GetFileNameWithoutExtension(input);
// If we have an extension of length 1
@@ -137,11 +125,7 @@ namespace SabreTools.Compression.LZ
/// <param name="error">Output representing the last error</param>
/// <returns>An initialized State, null on error</returns>
/// <remarks>Uncompressed streams are represented by a State with no buffer</remarks>
#if NET48
public State Open(Stream stream, out LZERROR error)
#else
public State? Open(Stream stream, out LZERROR error)
#endif
{
var lzs = Init(stream, out error);
if (error == LZERROR.LZERROR_OK || error == LZERROR.LZERROR_NOT_LZ)
@@ -170,11 +154,7 @@ namespace SabreTools.Compression.LZ
/// <param name="error">Output representing the last error</param>
/// <returns>An initialized State, null on error</returns>
/// <remarks>Uncompressed streams are represented by a State with no buffer</remarks>
#if NET48
public State Init(Stream source, out LZERROR error)
#else
public State? Init(Stream? source, out LZERROR error)
#endif
{
// If we have an invalid source
if (source == null)
@@ -540,11 +520,7 @@ namespace SabreTools.Compression.LZ
/// <param name="data">Stream to parse</param>
/// <param name="error">Output representing the last error</param>
/// <returns>Filled file header on success, null on error</returns>
#if NET48
private FileHeaader ParseFileHeader(Stream data, out LZERROR error)
#else
private FileHeaader? ParseFileHeader(Stream data, out LZERROR error)
#endif
{
error = LZERROR.LZERROR_OK;
var fileHeader = new FileHeaader();

View File

@@ -0,0 +1,381 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using SabreTools.IO.Streams;
using SabreTools.Models.Compression.MSZIP;
using static SabreTools.Models.Compression.MSZIP.Constants;
namespace SabreTools.Compression.MSZIP
{
/// <see href="https://www.rfc-editor.org/rfc/rfc1951"/>
public class DeflateDecompressor
{
/// <summary>
/// Internal bitstream to use for decompression
/// </summary>
private readonly ReadOnlyBitStream _bitStream;
/// <summary>
/// Create a new Decompressor from a byte array
/// </summary>
/// <param name="input">Byte array to decompress</param>
public DeflateDecompressor(byte[]? input)
{
// If we have an invalid stream
if (input == null || input.Length == 0)
throw new ArgumentException(nameof(input));
// Create a memory stream to wrap
var ms = new MemoryStream(input);
// Wrap the stream in a ReadOnlyBitStream
_bitStream = new ReadOnlyBitStream(ms);
}
/// <summary>
/// Create a new Decompressor from a Stream
/// </summary>
/// <param name="input">Stream to decompress</param>
public DeflateDecompressor(Stream? input)
{
// If we have an invalid stream
if (input == null || !input.CanRead || !input.CanSeek)
throw new ArgumentException(nameof(input));
// Wrap the stream in a ReadOnlyBitStream
_bitStream = new ReadOnlyBitStream(input);
}
/// <summary>
/// Decompress a stream into a <see cref="Block"/>
/// </summary>
/// <returns>Block containing the decompressed data on success, null on error</returns>
public Block? Process()
{
// Create a new block
var block = new Block();
// Try to read the header
block.BlockHeader = ReadBlockHeader();
if (block.BlockHeader.Signature != 0x4B43)
return null;
// Loop and read the deflate blocks
var deflateBlocks = new List<DeflateBlock>();
while (true)
{
// Try to read the deflate block
var deflateBlock = ReadDeflateBlock();
if (deflateBlock == null)
return null;
// Add the deflate block to the set
deflateBlocks.Add(deflateBlock);
// If we're at the final block, exit out of the loop
if (deflateBlock.Header!.BFINAL)
break;
}
// Assign the deflate blocks to the block and return
block.CompressedBlocks = deflateBlocks.ToArray();
return block;
}
#region Headers
/// <summary>
/// Read a BlockHeader from the input stream
/// </summary>
private BlockHeader ReadBlockHeader()
{
var header = new BlockHeader();
header.Signature = _bitStream.ReadUInt16() ?? 0;
return header;
}
/// <summary>
/// Read a DeflateBlockHeader from the input stream
/// </summary>
private DeflateBlockHeader ReadDeflateBlockHeader()
{
var header = new DeflateBlockHeader();
header.BFINAL = _bitStream.ReadBit() != 0x01;
uint? btype = _bitStream.ReadBitsLSB(2) ?? 0b11;
header.BTYPE = (CompressionType)btype;
return header;
}
/// <summary>
/// Read a NonCompressedBlockHeader from the input stream
/// </summary>
private NonCompressedBlockHeader ReadNonCompressedBlockHeader()
{
var header = new NonCompressedBlockHeader();
header.LEN = _bitStream.ReadUInt16() ?? 0;
header.NLEN = _bitStream.ReadUInt16() ?? 0;
return header;
}
/// <summary>
/// Read a FixedHuffmanCompressedBlockHeader from the input stream
/// </summary>
private (FixedCompressedDataHeader, uint, uint) RaadFixedCompressedDataHeader()
{
// Nothing needs to be read, all values are fixed
return (new FixedCompressedDataHeader(), 288, 30);
}
/// <summary>
/// Read a DynamicHuffmanCompressedBlockHeader from the input stream
/// </summary>
private (DynamicCompressedDataHeader, uint, uint) ReadDynamicCompressedDataHeader()
{
var header = new DynamicCompressedDataHeader();
// Setup the counts first
uint numLiteral = 257 + _bitStream.ReadBitsLSB(5) ?? 0;
uint numDistance = 1 + _bitStream.ReadBitsLSB(5) ?? 0;
uint numLength = 4 + _bitStream.ReadBitsLSB(4) ?? 0;
// Convert the alphabet based on lengths
uint[] lengthLengths = new uint[19];
for (int i = 0; i < numLength; i++)
{
lengthLengths[BitLengthOrder[i]] = (byte)(_bitStream.ReadBitsLSB(3) ?? 0);
}
for (int i = (int)numLength; i < 19; i++)
{
lengthLengths[BitLengthOrder[i]] = 0;
}
// Make the lengths tree
var lengthTree = new HuffmanDecoder(lengthLengths, 19);
// Setup the literal and distance lengths
header.LiteralLengths = new uint[288];
header.DistanceCodes = new uint[32];
// Read the literal and distance codes
int repeatCode = 1;
uint leftover = ReadHuffmanLengths(lengthTree, header.LiteralLengths, numLiteral, 0, ref repeatCode);
_ = ReadHuffmanLengths(lengthTree, header.DistanceCodes, numDistance, leftover, ref repeatCode);
return (header, numLiteral, numDistance);
}
#endregion
#region Data
/// <summary>
/// Read an RFC1951 block
/// </summary>
private DeflateBlock? ReadDeflateBlock()
{
var deflateBlock = new DeflateBlock();
// Try to read the deflate block header
deflateBlock.Header = ReadDeflateBlockHeader();
switch (deflateBlock.Header.BTYPE)
{
// If stored with no compression
case CompressionType.NoCompression:
(var header00, var bytes00) = ReadNoCompression();
if (header00 == null || bytes00 == null)
return null;
deflateBlock.DataHeader = header00;
deflateBlock.Data = bytes00;
break;
// If compressed with fixed Huffman codes
case CompressionType.FixedHuffman:
(var header01, var bytes01) = ReadFixedHuffman();
if (header01 == null || bytes01 == null)
return null;
deflateBlock.DataHeader = header01;
deflateBlock.Data = bytes01;
break;
// If compressed with dynamic Huffman codes
case CompressionType.DynamicHuffman:
(var header10, var bytes10) = ReadDynamicHuffman();
if (header10 == null || bytes10 == null)
return null;
deflateBlock.DataHeader = header10;
deflateBlock.Data = bytes10;
break;
// Reserved is not allowed and is treated as an error
case CompressionType.Reserved:
default:
return null;
}
return deflateBlock;
}
/// <summary>
/// Read an RFC1951 block with no compression
/// </summary>
private (NonCompressedBlockHeader?, byte[]?) ReadNoCompression()
{
// Skip any remaining bits in current partially processed byte
_bitStream.Discard();
// Read LEN and NLEN
var header = ReadNonCompressedBlockHeader();
if (header.LEN == 0 && header.NLEN == 0)
return (null, null);
// Copy LEN bytes of data to output
return (header, _bitStream.ReadBytes(header.LEN));
}
/// <summary>
/// Read an RFC1951 block with fixed Huffman compression
/// </summary>
private (FixedCompressedDataHeader, byte[]?) ReadFixedHuffman()
{
var bytes = new List<byte>();
// Get the fixed huffman header
(var header, uint numLiteral, uint numDistance) = RaadFixedCompressedDataHeader();
// Make the literal and distance trees
var literalTree = new HuffmanDecoder(header.LiteralLengths, numLiteral);
var distanceTree = new HuffmanDecoder(header.DistanceCodes, numDistance);
// Now loop and decode
return (header, ReadHuffmanBlock(literalTree, distanceTree));
}
/// <summary>
/// Read an RFC1951 block with dynamic Huffman compression
/// </summary>
private (DynamicCompressedDataHeader?, byte[]?) ReadDynamicHuffman()
{
// Get the dynamic huffman header
(var header, uint numLiteral, uint numDistance) = ReadDynamicCompressedDataHeader();
// Make the literal and distance trees
var literalTree = new HuffmanDecoder(header.LiteralLengths, numLiteral);
var distanceTree = new HuffmanDecoder(header.DistanceCodes, numDistance);
// Now loop and decode
return (header, ReadHuffmanBlock(literalTree, distanceTree));
}
/// <summary>
/// Read an RFC1951 block with Huffman compression
/// </summary>
private byte[]? ReadHuffmanBlock(HuffmanDecoder literalTree, HuffmanDecoder distanceTree)
{
// Now loop and decode
var bytes = new List<byte>();
while (true)
{
// Decode the next literal value
int sym = literalTree.Decode(_bitStream);
// If we have an immediate symbol
if (sym < 256)
{
bytes.Add((byte)sym);
}
// If we have the ending symbol
else if (sym == 256)
{
break;
}
// If we have a length/distance pair
else
{
sym -= 257;
uint? length = CopyLengths[sym] + _bitStream.ReadBitsLSB(LiteralExtraBits[sym]);
if (length == null)
return null;
int distanceCode = distanceTree.Decode(_bitStream);
uint? distance = CopyOffsets[distanceCode] + _bitStream.ReadBitsLSB(DistanceExtraBits[distanceCode]);
if (distance == null)
return null;
byte[] arr = bytes.Skip(bytes.Count - (int)distance).Take((int)length).ToArray();
bytes.AddRange(arr);
}
}
// Return the decoded array
return bytes.ToArray();
}
/// <summary>
/// Read the huffman lengths
/// </summary>
private uint ReadHuffmanLengths(HuffmanDecoder lengthTree, uint[] lengths, uint numCodes, uint repeat, ref int repeatCode)
{
int i = 0;
// First fill in any repeat codes
while (repeat > 0)
{
lengths[i++] = (byte)repeatCode;
repeat--;
}
// Then process the rest of the table
while (i < numCodes)
{
// Get the next length encoding from the stream
int lengthEncoding = lengthTree.Decode(_bitStream);
// Values less than 16 are encoded directly
if (lengthEncoding < 16)
{
lengths[i++] = (byte)lengthEncoding;
repeatCode = lengthEncoding;
}
// Otherwise, the repeat count is based on the next values
else
{
// Determine the repeat count and code from the encoding
if (lengthEncoding == 16)
{
repeat = 3 + _bitStream.ReadBitsLSB(2) ?? 0;
}
else if (lengthEncoding == 17)
{
repeat = 3 + _bitStream.ReadBitsLSB(3) ?? 0;
repeatCode = 0;
}
else if (lengthEncoding == 18)
{
repeat = 11 + _bitStream.ReadBitsLSB(7) ?? 0;
repeatCode = 0;
}
// Read in the expected lengths
while (i < numCodes && repeat > 0)
{
lengths[i++] = (byte)repeatCode;
repeat--;
}
}
}
// Return any repeat value we have left over
return repeat;
}
#endregion
}
}

View File

@@ -0,0 +1,143 @@
using System;
using System.IO;
using System.Linq;
using SabreTools.IO.Streams;
namespace SabreTools.Compression.MSZIP
{
public class HuffmanDecoder
{
/// <summary>
/// Root Huffman node for the tree
/// </summary>
private HuffmanNode _root;
/// <summary>
/// Create a Huffman tree to decode with
/// </summary>
/// <param name="lengths">Array representing the number of bits for each value</param>
/// <param name="numCodes">Number of Huffman codes encoded</param>
public HuffmanDecoder(uint[]? lengths, uint numCodes)
{
// Ensure we have lengths
if (lengths == null)
throw new ArgumentNullException(nameof(lengths));
// Set the root to null for now
HuffmanNode? root = null;
// Determine the value for max_bits
uint max_bits = lengths.Max();
// Count the number of codes for each code length
int[] bl_count = new int[max_bits + 1];
for (int i = 0; i < numCodes; i++)
{
uint length = lengths[i];
bl_count[length]++;
}
// Find the numerical value of the smalles code for each code length
int[] next_code = new int[max_bits + 1];
int code = 0;
bl_count[0] = 0;
for (int bits = 1; bits <= max_bits; bits++)
{
code = (code + bl_count[bits - 1]) << 1;
next_code[bits] = code;
}
// Assign numerical values to all codes, using consecutive
// values for all codes of the same length with the base
// values determined at step 2. Codes that are never used
// (which have a bit length of zero) must not be assigned a value.
int[] tree = new int[numCodes];
for (int i = 0; i < numCodes; i++)
{
uint len = lengths[i];
if (len == 0)
continue;
// Set the value in the tree
tree[i] = next_code[len];
next_code[len]++;
}
// Now insert the values into the structure
for (int i = 0; i < numCodes; i++)
{
// If we have a 0-length code
uint len = lengths[i];
if (len == 0)
continue;
// Insert the value starting at the root
_root = Insert(_root, i, len, tree[i]);
}
// Assign the root value
_root = root!;
}
/// <summary>
/// Decode the next value from the stream as a Huffman-encoded value
/// </summary>
/// <param name="input">BitStream representing the input</param>
/// <returns>Value of the node described by the input</returns>
public int Decode(ReadOnlyBitStream input)
{
// Start at the root of the tree
var node = _root;
while (node?.Left != null)
{
// Read the next bit to determine direction
byte? nextBit = input.ReadBit();
if (nextBit == null)
throw new EndOfStreamException();
// Left == 0, Right == 1
if (nextBit == 0)
node = node.Left;
else
node = node.Right;
}
// We traversed to the bottom of the branch
return node?.Value ?? 0;
}
/// <summary>
/// Insert a value based on an existing Huffman node
/// </summary>
/// <param name="node">Existing node to append to, or null if root</param>
/// <param name="value">Value to append to the tree</param>
/// <param name="length">Length of the current encoding</param>
/// <param name="code">Encoding of the value to traverse</param>
/// <returns>New instance of the node with value appended</returns>
private static HuffmanNode Insert(HuffmanNode? node, int value, uint length, int code)
{
// If no node is provided, create a new one
if (node == null)
node = new HuffmanNode();
// If we're at the correct location, insert the value
if (length == 0)
{
node.Value = value;
return node;
}
// Otherwise, get the next bit from the code
byte nextBit = (byte)(code >> (int)(length - 1) & 1);
// Left == 0, Right == 1
if (nextBit == 0)
node.Left = Insert(node.Left, value, length - 1, code);
else
node.Right = Insert(node.Right, value, length - 1, code);
// Now return the node
return node;
}
}
}

View File

@@ -0,0 +1,23 @@
namespace SabreTools.Compression.MSZIP
{
/// <summary>
/// Represents a single node in a Huffman tree
/// </summary>
public class HuffmanNode
{
/// <summary>
/// Left child of the current node
/// </summary>
public HuffmanNode? Left { get; set; }
/// <summary>
/// Right child of the current node
/// </summary>
public HuffmanNode? Right { get; set; }
/// <summary>
/// Value of the current node
/// </summary>
public int Value { get; set; }
}
}

View File

@@ -0,0 +1,50 @@
#if NET20 || NET35
using System;
using System.IO;
namespace SabreTools.Compression
{
/// <summary>
/// Derived from the mscorlib code from .NET Framework 4.0
/// </summary>
internal static class OldDotNet
{
public static void CopyTo(this Stream source, Stream destination)
{
if (destination == null)
{
throw new ArgumentNullException("destination");
}
if (!source.CanRead && !source.CanWrite)
{
throw new ObjectDisposedException(null);
}
if (!destination.CanRead && !destination.CanWrite)
{
throw new ObjectDisposedException("destination");
}
if (!source.CanRead)
{
throw new NotSupportedException();
}
if (!destination.CanWrite)
{
throw new NotSupportedException();
}
byte[] array = new byte[81920];
int count;
while ((count = source.Read(array, 0, array.Length)) != 0)
{
destination.Write(array, 0, count);
}
}
}
}
#endif

View File

@@ -0,0 +1,51 @@
namespace SabreTools.Compression.Quantum
{
/// <see href="www.russotto.net/quantumcomp.html"/>
/// TODO: Remove this class when Models gets updated
public static class Constants
{
public static readonly int[] PositionSlot =
[
0x00000, 0x00001, 0x00002, 0x00003, 0x00004, 0x00006, 0x00008, 0x0000c,
0x00010, 0x00018, 0x00020, 0x00030, 0x00040, 0x00060, 0x00080, 0x000c0,
0x00100, 0x00180, 0x00200, 0x00300, 0x00400, 0x00600, 0x00800, 0x00c00,
0x01000, 0x01800, 0x02000, 0x03000, 0x04000, 0x06000, 0x08000, 0x0c000,
0x10000, 0x18000, 0x20000, 0x30000, 0x40000, 0x60000, 0x80000, 0xc0000,
0x100000, 0x180000
];
public static readonly int[] PositionExtraBits =
[
0, 0, 0, 0, 1, 1, 2, 2,
3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10,
11, 11, 12, 12, 13, 13, 14, 14,
15, 15, 16, 16, 17, 17, 18, 18,
19, 19
];
public static readonly int[] LengthSlot =
[
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08,
0x0a, 0x0c, 0x0e, 0x12, 0x16, 0x1a, 0x1e, 0x26,
0x2e, 0x36, 0x3e, 0x4e, 0x5e, 0x6e, 0x7e, 0x9e,
0xbe, 0xde, 0xfe
];
public static readonly int[] LengthExtraBits =
[
0, 0, 0, 0, 0, 0, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3,
3, 3, 4, 4, 4, 4, 5, 5,
5, 5, 0
];
/// <summary>
/// Number of position slots for (tsize - 10)
/// </summary>
public static readonly int[] NumPositionSlots =
[
20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42
];
}
}

View File

@@ -0,0 +1,422 @@
using System;
using System.Collections.Generic;
using System.IO;
using SabreTools.IO.Streams;
using SabreTools.Models.Compression.Quantum;
using static SabreTools.Compression.Quantum.Constants;
namespace SabreTools.Compression.Quantum
{
/// <see href="www.russotto.net/quantumcomp.html"/>
public class Decompressor
{
/// <summary>
/// Internal bitstream to use for decompression
/// </summary>
private readonly ReadOnlyBitStream _bitStream;
#region Models
/// <summary>
/// Selector 0: literal, 64 entries, starting symbol 0
/// </summary>
private Model _model0;
/// <summary>
/// Selector 1: literal, 64 entries, starting symbol 64
/// </summary>
private Model _model1;
/// <summary>
/// Selector 2: literal, 64 entries, starting symbol 128
/// </summary>
private Model _model2;
/// <summary>
/// Selector 3: literal, 64 entries, starting symbol 192
/// </summary>
private Model _model3;
/// <summary>
/// Selector 4: LZ, 3 character matches
/// </summary>
private Model _model4;
/// <summary>
/// Selector 5: LZ, 4 character matches
/// </summary>
private Model _model5;
/// <summary>
/// Selector 6: LZ, 5+ character matches
/// </summary>
private Model _model6;
/// <summary>
/// Selector 6 length model
/// </summary>
private Model _model6len;
/// <summary>
/// Selector selector model
/// </summary>
private Model _selector;
#endregion
#region Coding State
/// <summary>
/// Artihmetic coding state: high
/// </summary>
private ushort CS_H;
/// <summary>
/// Artihmetic coding state: low
/// </summary>
private ushort CS_L;
/// <summary>
/// Artihmetic coding state: current
/// </summary>
private ushort CS_C;
#endregion
/// <summary>
/// Create a new Decompressor from a byte array
/// </summary>
/// <param name="input">Byte array to decompress</param>
/// <param name="windowBits">Number of bits in the sliding window</param>
public Decompressor(byte[]? input, uint windowBits)
{
// If we have an invalid stream
if (input == null || input.Length == 0)
throw new ArgumentException(nameof(input));
// If we have an invalid value for the window bits
if (windowBits < 10 || windowBits > 21)
throw new ArgumentOutOfRangeException(nameof(windowBits));
// Create a memory stream to wrap
var ms = new MemoryStream(input);
// Wrap the stream in a ReadOnlyBitStream
_bitStream = new ReadOnlyBitStream(ms);
// Initialize literal models
this._model0 = CreateModel(0, 64);
this._model1 = CreateModel(64, 64);
this._model2 = CreateModel(128, 64);
this._model3 = CreateModel(192, 64);
// Initialize LZ models
int maxBitLength = (int)(windowBits * 2);
this._model4 = CreateModel(0, maxBitLength > 24 ? 24 : maxBitLength);
this._model5 = CreateModel(0, maxBitLength > 36 ? 36 : maxBitLength);
this._model6 = CreateModel(0, maxBitLength);
this._model6len = CreateModel(0, 27);
// Initialze the selector model
this._selector = CreateModel(0, 7);
// Initialize coding state
this.CS_H = 0;
this.CS_L = 0;
this.CS_C = 0;
}
/// <summary>
/// Create a new Decompressor from a Stream
/// </summary>
/// <param name="input">Stream to decompress</param>
/// <param name="windowBits">Number of bits in the sliding window</param>
public Decompressor(Stream? input, uint windowBits)
{
// If we have an invalid stream
if (input == null || !input.CanRead || !input.CanSeek)
throw new ArgumentException(nameof(input));
// If we have an invalid value for the window bits
if (windowBits < 10 || windowBits > 21)
throw new ArgumentOutOfRangeException(nameof(windowBits));
// Wrap the stream in a ReadOnlyBitStream
_bitStream = new ReadOnlyBitStream(input);
// Initialize literal models
this._model0 = CreateModel(0, 64);
this._model1 = CreateModel(64, 64);
this._model2 = CreateModel(128, 64);
this._model3 = CreateModel(192, 64);
// Initialize LZ models
int maxBitLength = (int)(windowBits * 2);
this._model4 = CreateModel(0, maxBitLength > 24 ? 24 : maxBitLength);
this._model5 = CreateModel(0, maxBitLength > 36 ? 36 : maxBitLength);
this._model6 = CreateModel(0, maxBitLength);
this._model6len = CreateModel(0, 27);
// Initialze the selector model
this._selector = CreateModel(0, 7);
// Initialize coding state
this.CS_H = 0;
this.CS_L = 0;
this.CS_C = 0;
}
/// <summary>
/// Process the stream and return the decompressed output
/// </summary>
/// <returns>Byte array representing the decompressed data, null on error</returns>
public byte[] Process()
{
// Initialize the coding state
CS_H = 0xffff;
CS_L = 0x0000;
CS_C = (ushort)(_bitStream.ReadBitsMSB(16) ?? 0);
// Loop until the end of the stream
var bytes = new List<byte>();
while (_bitStream.Position < _bitStream.Length)
{
// Determine the selector to use
int selector = GetSymbol(_selector);
// Handle literal selectors
if (selector < 4)
{
switch (selector)
{
case 0:
bytes.Add((byte)GetSymbol(_model0));
break;
case 1:
bytes.Add((byte)GetSymbol(_model1));
break;
case 2:
bytes.Add((byte)GetSymbol(_model2));
break;
case 3:
bytes.Add((byte)GetSymbol(_model3));
break;
default:
throw new ArgumentOutOfRangeException();
}
}
// Handle LZ selectors
else
{
int offset, length;
switch (selector)
{
case 4:
int model4sym = GetSymbol(_model4);
int model4extra = (int)(_bitStream.ReadBitsMSB(PositionExtraBits[model4sym]) ?? 0);
offset = PositionSlot[model4sym] + model4extra + 1;
length = 3;
break;
case 5:
int model5sym = GetSymbol(_model5);
int model5extra = (int)(_bitStream.ReadBitsMSB(PositionExtraBits[model5sym]) ?? 0);
offset = PositionSlot[model5sym] + model5extra + 1;
length = 4;
break;
case 6:
int lengthSym = GetSymbol(_model6len);
int lengthExtra = (int)(_bitStream.ReadBitsMSB(LengthExtraBits[lengthSym]) ?? 0);
length = LengthSlot[lengthSym] + lengthExtra + 5;
int model6sym = GetSymbol(_model6);
int model6extra = (int)(_bitStream.ReadBitsMSB(PositionExtraBits[model6sym]) ?? 0);
offset = PositionSlot[model6sym] + model6extra + 1;
break;
default:
throw new ArgumentOutOfRangeException();
}
// Copy the previous data
int copyIndex = bytes.Count - offset;
while (length-- > 0)
{
bytes.Add(bytes[copyIndex++]);
}
// TODO: Add MS-CAB specific padding
// TODO: Add Cinematronics specific checksum
}
}
return bytes.ToArray();
}
/// <summary>
/// Create and initialize a model base on the start symbol and length
/// </summary>
private Model CreateModel(ushort start, int length)
{
// Create the model
var model = new Model
{
Entries = length,
Symbols = new ModelSymbol[length],
TimeToReorder = 4,
};
// Populate the symbol array
for (int i = 0; i < length; i++)
{
model.Symbols[i] = new ModelSymbol
{
Symbol = (ushort)(start + i),
CumulativeFrequency = (ushort)(length - 1),
};
}
return model;
}
/// <summary>
/// Get the next symbol from a model
/// </summary>
private int GetSymbol(Model model)
{
int freq = GetFrequency(model.Symbols![0]!.CumulativeFrequency);
int i;
for (i = 1; i < model.Entries; i++)
{
if (model.Symbols[i]!.CumulativeFrequency <= freq)
break;
}
int sym = model.Symbols![i - 1]!.Symbol;
GetCode(model.Symbols![i - 1]!.CumulativeFrequency,
model.Symbols![i]!.CumulativeFrequency,
model.Symbols![0]!.CumulativeFrequency);
UpdateModel(model, i);
return sym;
}
/// <summary>
/// Get the next code based on the frequencies
/// </summary>
private void GetCode(int prevFrequency, int currentFrequency, int totalFrequency)
{
uint range = (ushort)((CS_H - CS_L) + 1);
CS_H = (ushort)(CS_L + (prevFrequency * range) / totalFrequency - 1);
CS_L = (ushort)(CS_L + (currentFrequency * range) / totalFrequency);
while (true)
{
if ((CS_L & 0x8000) != (CS_H & 0x8000))
{
if ((CS_L & 0x4000) != 0 && (CS_H & 0x4000) == 0)
{
// Underflow case
CS_C ^= 0x4000;
CS_L &= 0x3FFF;
CS_H |= 0x4000;
}
else
{
break;
}
}
CS_L <<= 1;
CS_H = (ushort)((CS_H << 1) | 1);
CS_C = (ushort)((CS_C << 1) | _bitStream.ReadBit() ?? 0);
}
}
/// <summary>
/// Update the model after an encode or decode step
/// </summary>
private void UpdateModel(Model model, int lastUpdated)
{
// Update cumulative frequencies
for (int i = 0; i < lastUpdated; i++)
{
var sym = model.Symbols![i]!;
sym.CumulativeFrequency += 8;
}
// Decrement reordering time, if needed
if (model.Symbols![0]!.CumulativeFrequency > 3800)
model.TimeToReorder--;
// If we haven't hit the reordering time
if (model.TimeToReorder > 0)
{
// Update the cumulative frequencies
for (int i = model.Entries - 1; i >= 0; i--)
{
// Divide with truncation by 2
var sym = model.Symbols![i]!;
sym.CumulativeFrequency >>= 1;
// If we are lower the next frequency
if (i != 0 && sym.CumulativeFrequency <= model.Symbols![i + 1]!.CumulativeFrequency)
sym.CumulativeFrequency = (ushort)(model.Symbols![i + 1]!.CumulativeFrequency + 1);
}
}
// If we hit the reordering time
else
{
// Calculate frequencies from cumulative frequencies
for (int i = 0; i < model.Entries; i++)
{
if (i != model.Entries - 1)
model.Symbols![i]!.CumulativeFrequency -= model.Symbols![i + 1]!.CumulativeFrequency;
model.Symbols![i]!.CumulativeFrequency++;
model.Symbols![i]!.CumulativeFrequency >>= 1;
}
// Sort frequencies in decreasing order
for (int i = 0; i < model.Entries; i++)
{
for (int j = i + 1; j < model.Entries; j++)
{
if (model.Symbols![i]!.CumulativeFrequency < model.Symbols![j]!.CumulativeFrequency)
{
var temp = model.Symbols[i];
model.Symbols[i] = model.Symbols[j];
model.Symbols[j] = temp;
}
}
}
// Calculate cumulative frequencies from frequencies
for (int i = model.Entries - 1; i >= 0; i--)
{
if (i != model.Entries - 1)
model.Symbols![i]!.CumulativeFrequency += model.Symbols![i + 1]!.CumulativeFrequency;
}
// Reset the time to reorder
model.TimeToReorder = 50;
}
}
/// <summary>
/// Get the frequency of a symbol based on its total frequency
/// </summary>
private ushort GetFrequency(ushort totalFrequency)
{
ulong range = (ulong)(((CS_H - CS_L) & 0xFFFF) + 1);
ulong frequency = (ulong)((CS_C - CS_L + 1) * totalFrequency - 1) / range;
return (ushort)(frequency & 0xFFFF);
}
}
}

View File

@@ -0,0 +1,34 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<!-- Assembly Properties -->
<TargetFrameworks>net20;net35;net40;net452;net462;net472;net48;netcoreapp3.1;net5.0;net6.0;net7.0;net8.0</TargetFrameworks>
<RuntimeIdentifiers>win-x86;win-x64;win-arm64;linux-x64;linux-arm64;osx-x64;osx-arm64</RuntimeIdentifiers>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<LangVersion>latest</LangVersion>
<Nullable>enable</Nullable>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
<Version>0.4.5</Version>
<!-- Package Properties -->
<Authors>Matt Nadareski</Authors>
<Description>Clean compression implementations</Description>
<Copyright>Copyright (c) Matt Nadareski 2022-2024</Copyright>
<PackageProjectUrl>https://github.com/SabreTools/</PackageProjectUrl>
<PackageReadmeFile>README.md</PackageReadmeFile>
<RepositoryUrl>https://github.com/SabreTools/SabreTools.Compression</RepositoryUrl>
<RepositoryType>git</RepositoryType>
<PackageTags>compression decompression lz mszip</PackageTags>
<PackageLicenseExpression>MIT</PackageLicenseExpression>
</PropertyGroup>
<ItemGroup>
<None Include="../README.md" Pack="true" PackagePath="" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="SabreTools.IO" Version="1.3.7" />
<PackageReference Include="SabreTools.Models" Version="1.4.2" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,218 @@
using System;
using System.Runtime.InteropServices;
namespace SabreTools.Compression.zlib
{
public static unsafe class CRuntime
{
private static readonly string numbers = "0123456789";
public static void* malloc(ulong size)
{
return malloc((long)size);
}
public static void* malloc(long size)
{
var ptr = Marshal.AllocHGlobal((int)size);
MemoryStats.Allocated();
return ptr.ToPointer();
}
public static void free(void* a)
{
if (a == null)
return;
var ptr = new IntPtr(a);
Marshal.FreeHGlobal(ptr);
MemoryStats.Freed();
}
public static void memcpy(void* a, void* b, long size)
{
var ap = (byte*)a;
var bp = (byte*)b;
for (long i = 0; i < size; ++i)
*ap++ = *bp++;
}
public static void memcpy(void* a, void* b, ulong size)
{
memcpy(a, b, (long)size);
}
public static void memmove(void* a, void* b, long size)
{
void* temp = null;
try
{
temp = malloc(size);
memcpy(temp, b, size);
memcpy(a, temp, size);
}
finally
{
if (temp != null)
free(temp);
}
}
public static void memmove(void* a, void* b, ulong size)
{
memmove(a, b, (long)size);
}
public static int memcmp(void* a, void* b, long size)
{
var result = 0;
var ap = (byte*)a;
var bp = (byte*)b;
for (long i = 0; i < size; ++i)
{
if (*ap != *bp)
result += 1;
ap++;
bp++;
}
return result;
}
public static int memcmp(void* a, void* b, ulong size)
{
return memcmp(a, b, (long)size);
}
public static int memcmp(byte* a, byte[] b, ulong size)
{
fixed (void* bptr = b)
{
return memcmp(a, bptr, (long)size);
}
}
public static void memset(void* ptr, int value, long size)
{
var bptr = (byte*)ptr;
var bval = (byte)value;
for (long i = 0; i < size; ++i)
*bptr++ = bval;
}
public static void memset(void* ptr, int value, ulong size)
{
memset(ptr, value, (long)size);
}
public static uint _lrotl(uint x, int y)
{
return (x << y) | (x >> (32 - y));
}
public static void* realloc(void* a, long newSize)
{
if (a == null)
return malloc(newSize);
var ptr = new IntPtr(a);
var result = Marshal.ReAllocHGlobal(ptr, new IntPtr(newSize));
return result.ToPointer();
}
public static void* realloc(void* a, ulong newSize)
{
return realloc(a, (long)newSize);
}
public static int abs(int v)
{
return Math.Abs(v);
}
public static double pow(double a, double b)
{
return Math.Pow(a, b);
}
public static void SetArray<T>(T[] data, T value)
{
for (var i = 0; i < data.Length; ++i)
data[i] = value;
}
public static double ldexp(double number, int exponent)
{
return number * Math.Pow(2, exponent);
}
public static int strcmp(sbyte* src, string token)
{
var result = 0;
for (var i = 0; i < token.Length; ++i)
{
if (src[i] != token[i])
{
++result;
}
}
return result;
}
public static int strncmp(sbyte* src, string token, ulong size)
{
var result = 0;
for (var i = 0; i < Math.Min(token.Length, (int)size); ++i)
{
if (src[i] != token[i])
{
++result;
}
}
return result;
}
public static long strtol(sbyte* start, sbyte** end, int radix)
{
// First step - determine length
var length = 0;
sbyte* ptr = start;
while (numbers.IndexOf((char)*ptr) != -1)
{
++ptr;
++length;
}
long result = 0;
// Now build up the number
ptr = start;
while (length > 0)
{
long num = numbers.IndexOf((char)*ptr);
long pow = (long)Math.Pow(10, length - 1);
result += num * pow;
++ptr;
--length;
}
if (end != null)
{
*end = ptr;
}
return result;
}
}
}

View File

@@ -0,0 +1,28 @@
using System.Collections.Generic;
using System.Threading;
namespace SabreTools.Compression.zlib
{
public unsafe static class MemoryStats
{
private static int _allocations;
public static int Allocations
{
get
{
return _allocations;
}
}
internal static void Allocated()
{
Interlocked.Increment(ref _allocations);
}
internal static void Freed()
{
Interlocked.Decrement(ref _allocations);
}
}
}

View File

@@ -0,0 +1,88 @@
using System;
using System.Runtime.InteropServices;
namespace SabreTools.Compression.zlib
{
public unsafe class UnsafeArray1D<T> where T : struct
{
private readonly T[] _data;
private readonly GCHandle _pinHandle;
public bool IsFreed { get; private set; }
internal GCHandle PinHandle => _pinHandle;
public T this[int index]
{
get => _data[index];
set
{
_data[index] = value;
}
}
public T this[uint index]
{
get => _data[index];
set
{
_data[index] = value;
}
}
public T[] Data => _data;
public UnsafeArray1D(int size)
{
if (size < 0)
{
throw new ArgumentOutOfRangeException(nameof(size));
}
_data = new T[size];
_pinHandle = GCHandle.Alloc(_data, GCHandleType.Pinned);
IsFreed = false;
}
public UnsafeArray1D(T[] data, int sizeOf)
{
if (sizeOf <= 0)
{
throw new ArgumentOutOfRangeException(nameof(sizeOf));
}
_data = data ?? throw new ArgumentNullException(nameof(data));
_pinHandle = GCHandle.Alloc(_data, GCHandleType.Pinned);
IsFreed = false;
}
public void Free()
{
if (!IsFreed)
{
_pinHandle.Free();
IsFreed = true;
}
}
~UnsafeArray1D()
{
if (!IsFreed)
_pinHandle.Free();
}
public void* ToPointer()
{
return _pinHandle.AddrOfPinnedObject().ToPointer();
}
public static implicit operator void*(UnsafeArray1D<T> array)
{
return array.ToPointer();
}
public static void* operator +(UnsafeArray1D<T> array, int delta)
{
return array.ToPointer();
}
}
}

View File

@@ -0,0 +1,45 @@
using System.Runtime.InteropServices;
namespace SabreTools.Compression.zlib
{
public unsafe class UnsafeArray2D<T> where T : struct
{
private readonly UnsafeArray1D<T>[] _data;
private long[] _pinAddresses;
private readonly GCHandle _pinAddressesHandle;
public UnsafeArray1D<T> this[int index]
{
get => _data[index];
set
{
_data[index] = value;
}
}
public UnsafeArray2D(int size1, int size2)
{
_data = new UnsafeArray1D<T>[size1];
_pinAddresses = new long[size1];
for (var i = 0; i < size1; ++i)
{
_data[i] = new UnsafeArray1D<T>(size2);
_pinAddresses[i] = _data[i].PinHandle.AddrOfPinnedObject().ToInt64();
}
_pinAddressesHandle = GCHandle.Alloc(_pinAddresses, GCHandleType.Pinned);
}
~UnsafeArray2D()
{
_pinAddressesHandle.Free();
}
public void* ToPointer() => _pinAddressesHandle.AddrOfPinnedObject().ToPointer();
public static implicit operator void*(UnsafeArray2D<T> array)
{
return array.ToPointer();
}
}
}

View File

@@ -0,0 +1,9 @@
MIT License
Copyright (c) 2022 Nanook
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,23 @@
Project to port ZLib from C to C# (CSharp).
Src zlib 1.2.12 2022-Mar-28 - https://github.com/madler/zlib
See the Stages folder
1_zlib.c - Created by running 1_zlib.c_Concat.ps1 Builds with Clang (used by hebron to convert)
- Only deflate, inflate, crc32 and adler32 code at the moment. GZip might be added if required.
- The only edits to these files are to remove any #includes that have been combined
- The file list includes a 000_ to insert any #defines etc and 100_ for a main for debugging etc
- Notice crc32.c and trees.c had to be split to allow the single file to build
2_zlib.cs_Converted - The converted output that Hebron produced - https://github.com/HebronFramework/Hebron
- This is a little app that uses Clang to read the C code as DOM and write with Roslyn
- It does a fairly decent job and removes a lot of complication
3_zlib.cs_Working - The fixed up and amended C# that actually runs and matches the C code output
- It's had minimal change so is not the prettiest C# code
- It's Unsafe in places
Deflate and Inflate streams have been added.

View File

@@ -0,0 +1,178 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Runtime.InteropServices;
using System.Text;
namespace SabreTools.Compression.zlib
{
public class ZlibDeflateStream : Stream
{
private readonly bool _leaveOpen;
private ZLib.z_stream_s? _s;
private long _p;
private byte[]? _b;
public ZlibDeflateStream(int level, Stream baseStream) : this(level, false, 0, baseStream, false)
{
}
public ZlibDeflateStream(int level, Stream baseStream, bool leaveOpen) : this(level, false, 0, baseStream, leaveOpen)
{
}
public ZlibDeflateStream(int level, bool headerless, Stream baseStream, bool leaveOpen) : this(level, headerless, 0, baseStream, leaveOpen)
{
}
public ZlibDeflateStream(int level, int bufferSize, Stream baseStream, bool leaveOpen) : this(level, false, bufferSize, baseStream, leaveOpen)
{
}
public ZlibDeflateStream(int level, bool headerless, int bufferSize, Stream baseStream, bool leaveOpen)
{
this.Level = level;
this.Headerless = headerless;
this.BaseStream = baseStream;
_leaveOpen = leaveOpen;
_s = null;
_b = new byte[bufferSize == 0 ? 0x10000 : bufferSize];
}
public override bool CanRead => false;
public override bool CanSeek => false;
public override bool CanWrite => true;
public override long Length => _p;
public override long Position { get => _p; set => throw new NotImplementedException(); }
public int Level { get; }
public bool Headerless { get; }
public Stream BaseStream { get; }
public string Version { get => ZLib.zlibVersion(); }
public override void Flush()
{
}
public override int Read(byte[] buffer, int offset, int count)
{
throw new NotImplementedException();
}
public override long Seek(long offset, SeekOrigin origin)
{
throw new NotImplementedException();
}
public override void SetLength(long value)
{
throw new NotImplementedException();
}
unsafe public override void Write(byte[] buffer, int offset, int count)
{
if (buffer == null) throw new ArgumentNullException();
if (offset < 0 || count < 0) throw new ArgumentOutOfRangeException();
if ((offset + count) > buffer.Length) throw new ArgumentException();
int err = 0;
int hdr = 0;
if (_s == null)
{
_s = new ZLib.z_stream_s();
ZLib.deflateInit_(_s, this.Level, this.Version, 0); //0 = sizeof(z_stream_s) not used
if (this.Headerless)
hdr = 2;
_s.total_in = 0u;
_s.total_out = 0u;
_s.avail_in = 0u;
_s.avail_out = 0u;
}
_s.avail_in = (uint)count;
fixed (byte* i = buffer, o = _b)
{
_s.next_in = i;
_s.next_out = o + _s.total_out;
while (err >= 0 && _s.avail_in != 0) //process the buffer
{
if (_s.avail_out == 0) //get more data
{
if (_s.total_out != 0)
{
if (hdr != 0)
{
BaseStream.Write(_b!, hdr, (int)_s.total_out - hdr);
_s.total_out -= (uint)hdr;
hdr = 0;
}
else
BaseStream.Write(_b!, 0, (int)_s.total_out);
}
_p += _s.total_out;
_s.avail_out = (uint)_b!.Length;
_s.next_out = o;
_s.total_out = 0;
}
if (_s.avail_in != 0 || _s.avail_out != 0)
err = ZLib.deflate(_s, 2);
}
}
}
/// <summary>
/// Allow blocks to be written to the base stream. Call when write is finished with.
/// Used for creating block seekable files. The caller must manage blocks, indexes and lengths
/// </summary>
unsafe public long BlockFlush()
{
//finish previous stream
if (_s != null)
{
int err = 0;
fixed (byte* o = _b)
{
_s.next_in = null;
_s.avail_in = 0;
_s.next_out = o + _s.total_out; //point to correct location
int hdr = _p == 0 && Headerless ? 2 : 0;
while (err == 0 && (_s.total_out != 0 || _s.state!.pending != 0))
{
this.BaseStream.Write(_b!, hdr, (int)_s.total_out - hdr);
_s.avail_out = (uint)_b!.Length;
_p += _s.total_out - hdr;
hdr = 0;
_s.next_out = o;
_s.total_out = 0;
if (_s.state!.pending != 0)
err = ZLib.deflate(_s, 2);
}
err = ZLib.deflate(_s, 4);
}
ZLib.deflateEnd(_s);
_s = null;
}
long ret = _p;
_p = 0;
return ret;
}
unsafe protected override void Dispose(bool disposing)
{
this.BlockFlush();
_b = null;
if (!_leaveOpen)
this.BaseStream.Dispose();
}
}
}

View File

@@ -0,0 +1,170 @@
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Runtime.InteropServices;
using System.Text;
namespace SabreTools.Compression.zlib
{
public class ZlibInflateStream : Stream
{
private readonly bool _leaveOpen;
private ZLib.z_stream_s? _s;
private long _p;
private byte[]? _b;
private bool _complete;
public ZlibInflateStream(Stream baseStream) : this(0, false, 0, baseStream, false)
{
}
public ZlibInflateStream(Stream baseStream, bool leaveOpen) : this(0, false, 0, baseStream, leaveOpen)
{
}
public ZlibInflateStream(bool headerless, Stream baseStream, bool leaveOpen) : this(0, headerless, 0, baseStream, leaveOpen)
{
}
public ZlibInflateStream(bool headerless, int bufferSize, Stream baseStream, bool leaveOpen) : this(0, headerless, bufferSize, baseStream, leaveOpen)
{
}
public ZlibInflateStream(int bufferSize, Stream baseStream, bool leaveOpen) : this(0, false, bufferSize, baseStream, leaveOpen)
{
}
public ZlibInflateStream(long maxRead, bool headerless, int bufferSize, Stream baseStream, bool leaveOpen)
{
this.MaxRead = maxRead == 0 ? int.MaxValue : maxRead;
this.Headerless = headerless;
this.BaseStream = baseStream;
_leaveOpen = leaveOpen;
_s = null;
_b = new byte[bufferSize == 0 ? 0x10000 : bufferSize];
_complete = false;
}
public override bool CanRead => true;
public override bool CanSeek => false;
public override bool CanWrite => false;
public override long Length => _p;
public override long Position { get => _p; set => throw new NotImplementedException(); }
public long MaxRead { get; private set; }
public bool Headerless { get; }
public Stream BaseStream { get; }
public string Version { get => ZLib.zlibVersion(); }
public override void Flush()
{
}
public override long Seek(long offset, SeekOrigin origin)
{
throw new NotImplementedException();
}
public override void SetLength(long value)
{
throw new NotImplementedException();
}
public override void Write(byte[] buffer, int offset, int count)
{
throw new NotImplementedException();
}
unsafe public override int Read(byte[] buffer, int offset, int count)
{
if (buffer == null) throw new ArgumentNullException();
if (offset < 0 || count < 0) throw new ArgumentOutOfRangeException();
if ((offset + count) > buffer.Length) throw new ArgumentException();
if (_complete)
return 0;
int err = 0;
int hdr = 0;
if (_s == null)
{
_s = new ZLib.z_stream_s();
ZLib.inflateInit_(_s, this.Version, 0); //0 = sizeof(z_stream_s) not used
if (this.Headerless)
{
_b![0] = 0x78;
_b[1] = 0x9c; //da
hdr = 2;
}
_s.total_in = 0u;
_s.total_out = 0u;
_s.avail_in = 0u;
_s.avail_out = 0u;
}
int read;
_s.avail_out = (uint)count;
fixed (byte* i = _b, o = buffer)
{
_s.next_in = i + _s.total_in;
_s.next_out = o;
while (err == 0 && (_s.avail_out != 0 && !_complete)) //process the buffer
{
if (_s.avail_in == 0) //get more data
{
_s.total_in = 0;
read = (int)Math.Min(this.MaxRead - _p, (long)_b!.Length);
if (hdr != 0) //test once to save on the extra calculations
{
_s.avail_in = (uint)(hdr + (read = BaseStream.Read(_b, hdr, Math.Min(read, _b.Length - hdr))));
hdr = 0;
}
else
_s.avail_in = (uint)(read = BaseStream.Read(_b, 0, read));
_complete = read == 0;
_p += (long)read;
_s.next_in = i;
}
if (_s.avail_in != 0 || (!_complete && _s.total_out != 0))
err = ZLib.inflate(_s, 2);
}
}
uint ret = _s.total_out;
_s.total_out = 0u;
return (int)ret;
}
/// <summary>
/// Allow blocks to be read from the base stream without overreading. Call when write is finished with.
/// Used for reading block seekable files. The caller must manage blocks, indexes and lengths. Seek the BaseStream
/// </summary>
public long BlockFlush(int maxRead)
{
this.MaxRead = maxRead;
if (_s != null)
{
ZLib.deflateEnd(_s);
_s = null;
}
_complete = false;
long ret = _p;
_p = 0;
return ret;
}
protected override void Dispose(bool disposing)
{
BlockFlush(0);
_complete = true;
_b = null;
if (!_leaveOpen)
this.BaseStream.Dispose();
}
}
}

File diff suppressed because one or more lines are too long

12
Test/Program.cs Normal file
View File

@@ -0,0 +1,12 @@
using SabreTools.Compression;
namespace Test
{
public static class Program
{
public static void Main(string[] args)
{
// No implementation, used for experimentation
}
}
}

19
Test/Test.csproj Normal file
View File

@@ -0,0 +1,19 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>net20;net35;net40;net452;net462;net472;net48;netcoreapp3.1;net5.0;net6.0;net7.0;net8.0</TargetFrameworks>
<RuntimeIdentifiers>win-x86;win-x64;win-arm64;linux-x64;linux-arm64;osx-x64</RuntimeIdentifiers>
<OutputType>Exe</OutputType>
<CheckEolTargetFramework>false</CheckEolTargetFramework>
<IncludeSourceRevisionInInformationalVersion>false</IncludeSourceRevisionInInformationalVersion>
<LangVersion>latest</LangVersion>
<Nullable>enable</Nullable>
<SuppressTfmSupportBuildWarnings>true</SuppressTfmSupportBuildWarnings>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\SabreTools.Compression\SabreTools.Compression.csproj" />
</ItemGroup>
</Project>

View File

@@ -1,16 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// Base class for all compressor implementations
/// </summary>
public abstract class BaseCompressor : mspack_file
{
#if NET48
public mspack_system system { get; set; }
#else
public mspack_system? system { get; set; }
#endif
public MSPACK_ERR error { get; set; }
}
}

View File

@@ -1,16 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// Base class for all decompressor implementations
/// </summary>
public abstract class BaseDecompressor : mspack_file
{
#if NET48
public mspack_system system { get; set; }
#else
public mspack_system? system { get; set; }
#endif
public MSPACK_ERR error { get; set; }
}
}

View File

@@ -1,49 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public unsafe abstract class BitStream
{
/// <summary>
/// I/O routines
/// </summary>
public mspack_system sys { get; set; }
/// <summary>
/// Input file handle
/// </summary>
public mspack_file input { get; set; }
/// <summary>
/// Output file handle
/// </summary>
public mspack_file output { get; set; }
/// <summary>
/// Decompression offset within window
/// </summary>
public uint window_posn { get; set; }
#region I/O buffering
public byte* inbuf { get; set; }
public byte* i_ptr { get; set; }
public byte* i_end { get; set; }
public byte* o_ptr { get; set; }
public byte* o_end { get; set; }
public int input_end { get; set; }
public uint bit_buffer { get; set; }
public uint bits_left { get; set; }
public uint inbuf_size { get; set; }
#endregion
public MSPACK_ERR error { get; set; }
}
}

View File

@@ -1,231 +0,0 @@
using static SabreTools.Compression.libmspack.macros;
using static SabreTools.Compression.libmspack.CAB.Constants;
namespace SabreTools.Compression.libmspack.CAB
{
public unsafe class CABSystem : mspack_default_system
{
/// <summary>
/// cabd_sys_read is the internal reader function which the decompressors
/// use. will read data blocks (and merge split blocks) from the cabinet
/// and serve the read bytes to the decompressors
/// </summary>
public override int read(mspack_file file, void* buffer, int bytes)
{
Decompressor self = (Decompressor)file;
byte* buf = (byte*)buffer;
mspack_system sys = self.system;
int avail, todo, outlen = 0, ignore_cksum, ignore_blocksize;
ignore_cksum = self.salvage != 0 || (self.fix_mszip != 0 && ((MSCAB_COMP)((int)self.d.comp_type & cffoldCOMPTYPE_MASK) == MSCAB_COMP.MSCAB_COMP_MSZIP)) == true ? 1 : 0;
ignore_blocksize = self.salvage;
todo = bytes;
while (todo > 0)
{
avail = (int)(self.d.i_end - self.d.i_ptr);
// If out of input data, read a new block
if (avail != 0)
{
// Copy as many input bytes available as possible
if (avail > todo) avail = todo;
sys.copy(self.d.i_ptr, buf, avail);
self.d.i_ptr += avail;
buf += avail;
todo -= avail;
}
else
{
// Out of data, read a new block
// Check if we're out of input blocks, advance block counter
if (self.d.block++ >= self.d.folder.num_blocks)
{
if (self.salvage == 0)
{
self.read_error = MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
else
{
System.Console.Error.WriteLine("Ran out of CAB input blocks prematurely");
}
break;
}
// Read a block
self.read_error = ReadBlock(sys, self.d, ref outlen, ignore_cksum, ignore_blocksize);
if (self.read_error != MSPACK_ERR.MSPACK_ERR_OK) return -1;
self.d.outlen += outlen;
// Special Quantum hack -- trailer byte to allow the decompressor
// to realign itself. CAB Quantum blocks, unlike LZX blocks, can have
// anything from 0 to 4 trailing null bytes.
if ((MSCAB_COMP)((int)self.d.comp_type & cffoldCOMPTYPE_MASK) == MSCAB_COMP.MSCAB_COMP_QUANTUM)
{
*self.d.i_end++ = 0xFF;
}
// Is this the last block?
if (self.d.block >= self.d.folder.num_blocks)
{
if ((MSCAB_COMP)((int)self.d.comp_type & cffoldCOMPTYPE_MASK) == MSCAB_COMP.MSCAB_COMP_LZX)
{
// Special LZX hack -- on the last block, inform LZX of the
// size of the output data stream.
lzxd_set_output_length((lzxd_stream)self.d.state, self.d.outlen);
}
}
} /* if (avail) */
} /* while (todo > 0) */
return bytes - todo;
}
/// <summary>
/// cabd_sys_write is the internal writer function which the decompressors
/// use. it either writes data to disk (self.d.outfh) with the real
/// sys.write() function, or does nothing with the data when
/// self.d.outfh == null. advances self.d.offset
/// </summary>
public override int write(mspack_file file, void* buffer, int bytes)
{
Decompressor self = (Decompressor)file;
self.d.offset += (uint)bytes;
if (self.d.outfh != null)
{
return self.system.write(self.d.outfh, buffer, bytes);
}
return bytes;
}
/// <summary>
/// Reads a whole data block from a cab file. the block may span more than
/// one cab file, if it does then the fragments will be reassembled
/// </summary>
private static MSPACK_ERR ReadBlock(mspack_system sys, mscabd_decompress_state d, ref int @out, int ignore_cksum, int ignore_blocksize)
{
FixedArray<byte> hdr = new FixedArray<byte>(cfdata_SIZEOF);
uint cksum;
int len, full_len;
// Reset the input block pointer and end of block pointer
d.i_ptr = d.i_end = d.input;
do
{
// Read the block header
if (sys.read(d.infh, hdr, cfdata_SIZEOF) != cfdata_SIZEOF)
{
return MSPACK_ERR.MSPACK_ERR_READ;
}
// Skip any reserved block headers
if (d.data.cab.block_resv != 0 &&
sys.seek(d.infh, d.data.cab.block_resv, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_CUR) != 0)
{
return MSPACK_ERR.MSPACK_ERR_SEEK;
}
// Blocks must not be over CAB_INPUTMAX in size
len = EndGetI16(hdr, cfdata_CompressedSize);
full_len = (int)(d.i_end - d.i_ptr + len); // Include cab-spanning blocks */
if (full_len > CAB_INPUTMAX)
{
System.Console.Error.WriteLine($"Block size {full_len} > CAB_INPUTMAX");
// In salvage mode, blocks can be 65535 bytes but no more than that
if (ignore_blocksize == 0 || full_len > CAB_INPUTMAX_SALVAGE)
{
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
}
// Blocks must not expand to more than CAB_BLOCKMAX
if (EndGetI16(hdr, cfdata_UncompressedSize) > CAB_BLOCKMAX)
{
System.Console.Error.WriteLine("block size > CAB_BLOCKMAX");
if (ignore_blocksize == 0) return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
// Read the block data
if (sys.read(d.infh, d.i_end, len) != len)
{
return MSPACK_ERR.MSPACK_ERR_READ;
}
// Perform checksum test on the block (if one is stored)
if ((cksum = EndGetI32(hdr, cfdata_CheckSum)) != 0)
{
uint sum2 = Checksum(d.i_end, (uint)len, 0);
if (Checksum(hdr, 4, 4, sum2) != cksum)
{
if (ignore_cksum == 0) return MSPACK_ERR.MSPACK_ERR_CHECKSUM;
sys.message(d.infh, "WARNING; bad block checksum found");
}
}
// Advance end of block pointer to include newly read data
d.i_end += len;
// Uncompressed size == 0 means this block was part of a split block
// and it continues as the first block of the next cabinet in the set.
// otherwise, this is the last part of the block, and no more block
// reading needs to be done.
// EXIT POINT OF LOOP -- uncompressed size != 0
if ((@out = EndGetI16(hdr, cfdata_UncompressedSize)) != 0)
{
return MSPACK_ERR.MSPACK_ERR_OK;
}
// Otherwise, advance to next cabinet
// Close current file handle
sys.close(d.infh);
d.infh = null;
// Aadvance to next member in the cabinet set
if ((d.data = d.data.next) == null)
{
sys.message(d.infh, "WARNING; ran out of cabinets in set. Are any missing?");
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
// Open next cab file
d.incab = d.data.cab;
if ((d.infh = sys.open(d.incab.filename, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_READ)) == null)
{
return MSPACK_ERR.MSPACK_ERR_OPEN;
}
// Seek to start of data blocks
if (sys.seek(d.infh, d.data.offset, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_START) != 0)
{
return MSPACK_ERR.MSPACK_ERR_SEEK;
}
} while (true);
// Not reached
return MSPACK_ERR.MSPACK_ERR_OK;
}
private static uint Checksum(FixedArray<byte> data, int ptr, uint bytes, uint cksum)
{
uint len, ul = 0;
for (len = bytes >> 2; len-- > 0; ptr += 4)
{
cksum ^= EndGetI32(data, ptr);
}
switch (bytes & 3)
{
case 3: ul |= (uint)(data[ptr++] << 16); goto case 2;
case 2: ul |= (uint)(data[ptr++] << 8); goto case 1;
case 1: ul |= data[ptr]; break;
}
cksum ^= ul;
return cksum;
}
}
}

View File

@@ -1,17 +0,0 @@
namespace SabreTools.Compression.libmspack.CAB
{
/// <summary>
/// TODO
/// </summary>
public class Compressor : BaseCompressor
{
/// <summary>
/// Creates a new CAB compressor
/// </summary>
public Compressor()
{
this.system = new CABSystem();
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
}
}

View File

@@ -1,68 +0,0 @@
namespace SabreTools.Compression.libmspack.CAB
{
public static class Constants
{
/* structure offsets */
public const byte cfhead_Signature = 0x00;
public const byte cfhead_CabinetSize = 0x08;
public const byte cfhead_FileOffset = 0x10;
public const byte cfhead_MinorVersion = 0x18;
public const byte cfhead_MajorVersion = 0x19;
public const byte cfhead_NumFolders = 0x1A;
public const byte cfhead_NumFiles = 0x1C;
public const byte cfhead_Flags = 0x1E;
public const byte cfhead_SetID = 0x20;
public const byte cfhead_CabinetIndex = 0x22;
public const byte cfhead_SIZEOF = 0x24;
public const byte cfheadext_HeaderReserved = 0x00;
public const byte cfheadext_FolderReserved = 0x02;
public const byte cfheadext_DataReserved = 0x03;
public const byte cfheadext_SIZEOF = 0x04;
public const byte cffold_DataOffset = 0x00;
public const byte cffold_NumBlocks = 0x04;
public const byte cffold_CompType = 0x06;
public const byte cffold_SIZEOF = 0x08;
public const byte cffile_UncompressedSize = 0x00;
public const byte cffile_FolderOffset = 0x04;
public const byte cffile_FolderIndex = 0x08;
public const byte cffile_Date = 0x0A;
public const byte cffile_Time = 0x0C;
public const byte cffile_Attribs = 0x0E;
public const byte cffile_SIZEOF = 0x10;
public const byte cfdata_CheckSum = 0x00;
public const byte cfdata_CompressedSize = 0x04;
public const byte cfdata_UncompressedSize = 0x06;
public const byte cfdata_SIZEOF = 0x08;
/* flags */
public const ushort cffoldCOMPTYPE_MASK = 0x000f;
public const ushort cffileCONTINUED_FROM_PREV = 0xFFFD;
public const ushort cffileCONTINUED_TO_NEXT = 0xFFFE;
public const ushort cffileCONTINUED_PREV_AND_NEXT = 0xFFFF;
/* CAB data blocks are <= 32768 bytes in uncompressed form. Uncompressed
* blocks have zero growth. MSZIP guarantees that it won't grow above
* uncompressed size by more than 12 bytes. LZX guarantees it won't grow
* more than 6144 bytes. Quantum has no documentation, but the largest
* block seen in the wild is 337 bytes above uncompressed size.
*/
public const int CAB_BLOCKMAX = 32768;
public const int CAB_INPUTMAX = CAB_BLOCKMAX + 6144;
/* input buffer needs to be CAB_INPUTMAX + 1 byte to allow for max-sized block
* plus 1 trailer byte added by cabd_sys_read_block() for Quantum alignment.
*
* When MSCABD_PARAM_SALVAGE is set, block size is not checked so can be
* up to 65535 bytes, so max input buffer size needed is 65535 + 1
*/
public const int CAB_INPUTMAX_SALVAGE = 65535;
public const int CAB_INPUTBUF = CAB_INPUTMAX_SALVAGE + 1;
/* There are no more than 65535 data blocks per folder, so a folder cannot
* be more than 32768*65535 bytes in length. As files cannot span more than
* one folder, this is also their max offset, length and offset+length limit.
*/
public const int CAB_FOLDERMAX = 65535;
public const int CAB_LENGTHMAX = CAB_BLOCKMAX * CAB_FOLDERMAX;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,65 +0,0 @@
using static SabreTools.Compression.libmspack.CAB.Constants;
namespace SabreTools.Compression.libmspack
{
public unsafe static class cab
{
#region decomp
/// <summary>
/// cabd_free_decomp frees decompression state, according to which method
/// was used.
/// </summary>
public static MSPACK_ERR cabd_init_decomp(CAB.Decompressor self, MSCAB_COMP ct)
{
mspack_file fh = self;
self.d.comp_type = ct;
switch ((MSCAB_COMP)((int)ct & cffoldCOMPTYPE_MASK))
{
case MSCAB_COMP.MSCAB_COMP_NONE:
self.d = new None.DecompressState(self.d);
self.d.state = new None.State(self.d.sys, fh, fh, self.buf_size);
break;
case MSCAB_COMP.MSCAB_COMP_MSZIP:
self.d = new mscabd_mszipd_decompress_state();
self.d.state = mszipd_init(self.d.sys, fh, fh, self.buf_size, self.fix_mszip);
break;
case MSCAB_COMP.MSCAB_COMP_QUANTUM:
self.d = new mscabd_qtmd_decompress_state();
self.d.state = qtmd_init(self.d.sys, fh, fh, ((int)ct >> 8) & 0x1f, self.buf_size);
break;
case MSCAB_COMP.MSCAB_COMP_LZX:
self.d = new mscabd_lzxd_decompress_state();
self.d.state = lzxd_init(self.d.sys, fh, fh, ((int)ct >> 8) & 0x1f, 0, self.buf_size, 0, 0);
break;
default:
return self.error = MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
return self.error = (self.d.state != null) ? MSPACK_ERR.MSPACK_ERR_OK : MSPACK_ERR.MSPACK_ERR_NOMEMORY;
}
/// <summary>
/// cabd_init_decomp initialises decompression state, according to which
/// decompression method was used. relies on self.d.folder being the same
/// as when initialised.
/// </summary>
public static void cabd_free_decomp(CAB.Decompressor self)
{
if (self == null || self.d == null || self.d.state == null) return;
switch ((MSCAB_COMP)((int)self.d.comp_type & cffoldCOMPTYPE_MASK))
{
case MSCAB_COMP.MSCAB_COMP_MSZIP: mszipd_free((mszipd_stream)self.d.state); break;
case MSCAB_COMP.MSCAB_COMP_QUANTUM: qtmd_free((qtmd_stream)self.d.state); break;
case MSCAB_COMP.MSCAB_COMP_LZX: lzxd_free((lzxd_stream)self.d.state); break;
}
//self.d.decompress = null;
self.d.state = null;
}
#endregion
}
}

View File

@@ -1,125 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents a single cabinet file.
///
/// All fields are READ ONLY.
///
/// If this cabinet is part of a merged cabinet set, the #files and #folders
/// fields are common to all cabinets in the set, and will be identical.
/// </summary>
/// <see cref="mscab_decompressor::open()"/>
/// <see cref="mscab_decompressor::close()"/>
/// <see cref="mscab_decompressor::search()"/>
public unsafe class mscabd_cabinet
{
/// <summary>
/// The next cabinet in a chained list, if this cabinet was opened with
/// mscab_decompressor::search(). May be null to mark the end of the
/// list.
/// </summary>
public mscabd_cabinet next { get; set; }
/// <summary>
/// The filename of the cabinet. More correctly, the filename of the
/// physical file that the cabinet resides in. This is given by the
/// library user and may be in any format.
/// </summary>
public string filename { get; set; }
/// <summary>
/// The file offset of cabinet within the physical file it resides in.
/// </summary>
public long base_offset { get; set; }
/// <summary>
/// The length of the cabinet file in bytes.
/// </summary>
public uint length { get; set; }
/// <summary>
/// The previous cabinet in a cabinet set, or null.
/// </summary>
public mscabd_cabinet prevcab { get; set; }
/// <summary>
/// The next cabinet in a cabinet set, or null.
/// </summary>
public mscabd_cabinet nextcab { get; set; }
/// <summary>
/// The filename of the previous cabinet in a cabinet set, or null.
/// </summary>
public string prevname { get; set; }
/// <summary>
/// The filename of the next cabinet in a cabinet set, or null.
/// </summary>
public string nextname { get; set; }
/// <summary>
/// The name of the disk containing the previous cabinet in a cabinet
/// set, or null.
/// </summary>
public string previnfo { get; set; }
/// <summary>
/// The name of the disk containing the next cabinet in a cabinet set,
/// or null.
/// </summary>
public string nextinfo { get; set; }
/// <summary>
/// A list of all files in the cabinet or cabinet set.
/// </summary>
public mscabd_file files { get; set; }
/// <summary>
/// A list of all folders in the cabinet or cabinet set.
/// </summary>
public mscabd_folder folders { get; set; }
/// <summary>
/// The set ID of the cabinet. All cabinets in the same set should have
/// the same set ID.
/// </summary>
public ushort set_id { get; set; }
/// <summary>
/// The index number of the cabinet within the set. Numbering should
/// start from 0 for the first cabinet in the set, and increment by 1 for
/// each following cabinet.
/// </summary>
public ushort set_index { get; set; }
/// <summary>
/// The number of bytes reserved in the header area of the cabinet.
///
/// If this is non-zero and flags has MSCAB_HDR_RESV set, this data can
/// be read by the calling application. It is of the given length,
/// located at offset (base_offset + MSCAB_HDR_RESV_OFFSET) in the
/// cabinet file.
/// </summary>
public ushort header_resv { get; set; }
/// <summary>
/// Header flags.
/// </summary>
/// <see cref="prevname"/>
/// <see cref="previnfo"/>
/// <see cref="nextname"/>
/// <see cref="nextinfo"/>
/// <see cref="header_resv"/>
public MSCAB_HDR flags { get; set; }
/// <summary>
/// Offset to data blocks
/// </summary>
public long blocks_off { get; set; }
/// <summary>
/// Reserved space in data blocks
/// </summary>
public int block_resv { get; set; }
}
}

View File

@@ -1,83 +0,0 @@
using SabreTools.Compression.libmspack.CAB;
using static SabreTools.Compression.libmspack.CAB.Constants;
namespace SabreTools.Compression.libmspack
{
public unsafe abstract class mscabd_decompress_state
{
/// <summary>
/// Current folder we're extracting from
/// </summary>
public mscabd_folder folder { get; set; }
/// <summary>
/// Current folder split we're in
/// </summary>
public mscabd_folder_data data { get; set; }
/// <summary>
/// Uncompressed offset within folder
/// </summary>
public uint offset { get; set; }
/// <summary>
/// Which block are we decompressing?
/// </summary>
public uint block { get; set; }
/// <summary>
/// Cumulative sum of block output sizes
/// </summary>
public long outlen { get; set; }
/// <summary>
/// Special I/O code for decompressor
/// </summary>
public CABSystem sys { get; set; }
/// <summary>
/// Type of compression used by folder
/// </summary>
public MSCAB_COMP comp_type { get; set; }
/// <summary>
/// Decompressor state
/// </summary>
public object state { get; set; }
/// <summary>
/// Cabinet where input data comes from
/// </summary>
public mscabd_cabinet incab { get; set; }
/// <summary>
/// Input file handle
/// </summary>
public mspack_file infh { get; set; }
/// <summary>
/// Output file handle
/// </summary>
public mspack_file outfh { get; set; }
/// <summary>
/// Input data consumed
/// </summary>
public byte* i_ptr { get; set; }
/// <summary>
/// Input data end
/// </summary>
public byte* i_end { get; set; }
/// <summary>
/// One input block of data
/// </summary>
public FixedArray<byte> input { get; set; } = new FixedArray<byte>(CAB_INPUTBUF);
/// <summary>
/// Decompressor code
/// </summary>
public abstract MSPACK_ERR decompress(object data, long offset);
}
}

View File

@@ -1,74 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents a single file in a cabinet or cabinet set.
///
/// All fields are READ ONLY.
/// </summary>
public class mscabd_file
{
/// <summary>
/// The next file in the cabinet or cabinet set, or null if this is the
/// final file.
/// </summary>
public mscabd_file next { get; set; }
/// <summary>
/// The filename of the file.
///
/// A null terminated string of up to 255 bytes in length, it may be in
/// either ISO-8859-1 or UTF8 format, depending on the file attributes.
/// </summary>
public string filename { get; set; }
/// <summary>
/// The uncompressed length of the file, in bytes.
/// </summary>
public uint length { get; set; }
/// <summary>
/// File attributes.
/// </summary>
public MSCAB_ATTRIB attribs { get; set; }
/// <summary>
/// File's last modified time, hour field.
/// </summary>
public char time_h { get; set; }
/// <summary>
/// File's last modified time, minute field.
/// </summary>
public char time_m { get; set; }
/// <summary>
/// File's last modified time, second field.
/// </summary>
public char time_s { get; set; }
/// <summary>
/// File's last modified date, day field.
/// </summary>
public char date_d { get; set; }
/// <summary>
/// File's last modified date, month field.
/// </summary>
public char date_m { get; set; }
/// <summary>
/// File's last modified date, year field.
/// </summary>
public int date_y;
/// <summary>
/// A pointer to the folder that contains this file.
/// </summary>
public mscabd_folder folder { get; set; }
/// <summary>
/// The uncompressed offset of this file in its folder.
/// </summary>
public uint offset { get; set; }
}
}

View File

@@ -1,53 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents a single folder in a cabinet or cabinet set.
///
/// All fields are READ ONLY.
///
/// A folder is a single compressed stream of data. When uncompressed, it
/// holds the data of one or more files. A folder may be split across more
/// than one cabinet.
/// </summary>
public class mscabd_folder
{
/// <summary>
/// A pointer to the next folder in this cabinet or cabinet set, or null
/// if this is the final folder.
/// </summary>
public mscabd_folder next { get; set; }
/// <summary>
/// The compression format used by this folder.
///
/// The macro MSCABD_COMP_METHOD() should be used on this field to get
/// the algorithm used. The macro MSCABD_COMP_LEVEL() should be used to get
/// the "compression level".
/// </summary>
/// <see cref="MSCABD_COMP_METHOD()"/>
/// <see cref="MSCABD_COMP_LEVEL()"/>
public MSCAB_COMP comp_type { get; set; }
/// <summary>
/// The total number of data blocks used by this folder. This includes
/// data blocks present in other files, if this folder spans more than
/// one cabinet.
/// </summary>
public uint num_blocks { get; set; }
/// <summary>
/// Where are the data blocks?
/// </summary>
public mscabd_folder_data data { get; set; }
/// <summary>
/// First file needing backwards merge
/// </summary>
public mscabd_file merge_prev { get; set; }
/// <summary>
/// First file needing forwards merge
/// </summary>
public mscabd_file merge_next { get; set; }
}
}

View File

@@ -1,20 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// There is one of these for every cabinet a folder spans
/// </summary>
public class mscabd_folder_data
{
public mscabd_folder_data next { get; set; }
/// <summary>
/// Cabinet file of this folder span
/// </summary>
public mscabd_cabinet cab { get; set; }
/// <summary>
/// Cabinet offset of first datablock
/// </summary>
public long offset { get; set; }
}
}

View File

@@ -1,169 +0,0 @@
namespace SabreTools.Compression.libmspack.CHM
{
/// <summary>
/// A compressor for .CHM (Microsoft HTMLHelp) files.
///
/// All fields are READ ONLY.
/// </summary>
/// <see cref="mspack.DestroyCHMCompressor(Compressor)"/>
public class Compressor : BaseCompressor
{
public string temp_file { get; private set; }
public int use_temp_file { get; private set; }
/// <summary>
/// Creates a new CHM compressor
/// </summary>
public Compressor()
{
this.system = new mspack_default_system();
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
/// <summary>
/// Generates a CHM help file.
///
/// The help file will contain up to two sections, an Uncompressed
/// section and potentially an MSCompressed (LZX compressed)
/// section.
///
/// While the contents listing of a CHM file is always in lexical order,
/// the file list passed in will be taken as the correct order for files
/// within the sections. It is in your interest to place similar files
/// together for better compression.
///
/// There are two modes of generation, to use a temporary file or not to
/// use one. See use_temporary_file() for the behaviour of generate() in
/// these two different modes.
/// </summary>
/// <param name="file_list">
/// An array of mschmc_file structures, terminated
/// with an entry whose mschmc_file::section field is
/// #MSCHMC_ENDLIST. The order of the list is
/// preserved within each section. The length of any
/// mschmc_file::chm_filename string cannot exceed
/// roughly 4096 bytes. Each source file must be able
/// to supply as many bytes as given in the
/// mschmc_file::length field.
/// </param>
/// <param name="output_file">
/// The file to write the generated CHM helpfile to.
/// This is passed directly to mspack_system::open()
/// </param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
/// <see cref="use_temporary_file(int in string)"/>
/// <see cref="set_param(MSCHMC_PARAM, int)"/>
public MSPACK_ERR generate(mschmc_file[] file_list, in string output_file) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Specifies whether a temporary file is used during CHM generation.
///
/// The CHM file format includes data about the compressed section (such
/// as its overall size) that is stored in the output CHM file prior to
/// the compressed section itself. This unavoidably requires that the
/// compressed section has to be generated, before these details can be
/// set. There are several ways this can be handled. Firstly, the
/// compressed section could be generated entirely in memory before
/// writing any of the output CHM file. This approach is not used in
/// libmspack, as the compressed section can exceed the addressable
/// memory space on most architectures.
///
/// libmspack has two options, either to write these unknowable sections
/// with blank data, generate the compressed section, then re-open the
/// output file for update once the compressed section has been
/// completed, or to write the compressed section to a temporary file,
/// then write the entire output file at once, performing a simple
/// file-to-file copy for the compressed section.
///
/// The simple solution of buffering the entire compressed section in
/// memory can still be used, if desired. As the temporary file's
/// filename is passed directly to mspack_system::open(), it is possible
/// for a custom mspack_system implementation to hold this file in memory,
/// without writing to a disk.
///
/// If a temporary file is set, generate() performs the following
/// sequence of events: the temporary file is opened for writing, the
/// compression algorithm writes to the temporary file, the temporary
/// file is closed. Then the output file is opened for writing and the
/// temporary file is re-opened for reading. The output file is written
/// and the temporary file is read from. Both files are then closed. The
/// temporary file itself is not deleted. If that is desired, the
/// temporary file should be deleted after the completion of generate(),
/// if it exists.
///
/// If a temporary file is set not to be used, generate() performs the
/// following sequence of events: the output file is opened for writing,
/// then it is written and closed. The output file is then re-opened for
/// update, the appropriate sections are seek()ed to and re-written, then
/// the output file is closed.
/// </summary>
/// <param name="use_temp_file">
/// Non-zero if the temporary file should be used,
/// zero if the temporary file should not be used.
/// </param>
/// <param name="temp_file">
/// A file to temporarily write compressed data to,
/// before opening it for reading and copying the
/// contents to the output file. This is passed
/// directly to mspack_system::open().
/// </param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
/// <see cref="generate(mschmc_file[], in string)"/>
public MSPACK_ERR use_temporary_file(int use_temp_file, in string temp_file) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Sets a CHM compression engine parameter.
///
/// The following parameters are defined:
///
/// - #MSCHMC_PARAM_TIMESTAMP: Sets the "timestamp" of the CHM file
/// generated. This is not a timestamp, see mschmd_header::timestamp
/// for a description. If this timestamp is 0, generate() will use its
/// own algorithm for making a unique ID, based on the lengths and
/// names of files in the CHM itself. Defaults to 0, any value between
/// 0 and (2^32)-1 is valid.
/// - #MSCHMC_PARAM_LANGUAGE: Sets the "language" of the CHM file
/// generated. This is not the language used in the CHM file, but the
/// language setting of the user who ran the HTMLHelp compiler. It
/// defaults to 0x0409. The valid range is between 0x0000 and 0x7F7F.
/// - #MSCHMC_PARAM_LZXWINDOW: Sets the size of the LZX history window,
/// which is also the interval at which the compressed data stream can be
/// randomly accessed. The value is not a size in bytes, but a power of
/// two. The default value is 16 (which makes the window 2^16 bytes, or
/// 64 kilobytes), the valid range is from 15 (32 kilobytes) to 21 (2
/// megabytes).
/// - #MSCHMC_PARAM_DENSITY: Sets the "density" of quick reference
/// entries stored at the end of directory listing chunk. Each chunk is
/// 4096 bytes in size, and contains as many file entries as there is
/// room for. At the other end of the chunk, a list of "quick reference"
/// pointers is included. The offset of every 'N'th file entry is given a
/// quick reference, where N = (2^density) + 1. The default density is
/// 2. The smallest density is 0 (N=2), the maximum is 10 (N=1025). As
/// each file entry requires at least 5 bytes, the maximum number of
/// entries in a single chunk is roughly 800, so the maximum value 10
/// can be used to indicate there are no quickrefs at all.
/// - #MSCHMC_PARAM_INDEX: Sets whether or not to include quick lookup
/// index chunk(s), in addition to normal directory listing chunks. A
/// value of zero means no index chunks will be created, a non-zero value
/// means index chunks will be created. The default is zero, "don't
/// create an index".
/// </summary>
/// <param name="param">The parameter to set</param>
/// <param name="value">The value to set the parameter to</param>
/// <returns>
/// MSPACK_ERR_OK if all is OK, or MSPACK_ERR_ARGS if there
/// is a problem with either parameter or value.
/// </returns>
/// <see cref="generate(mschmc_file[], in string)"/>
public MSPACK_ERR set_param(MSCHMC_PARAM param, int value) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Returns the error code set by the most recently called method.
/// </summary>
/// <returns>The most recent error code</returns>
/// <see cref="set_param(int, int)"/>
/// <see cref="generate(mschmc_file[], in string)"/>
public MSPACK_ERR last_error() => MSPACK_ERR.MSPACK_ERR_OK;
}
}

View File

@@ -1,82 +0,0 @@
namespace SabreTools.Compression.libmspack.CHM
{
public static class Constants
{
public const ushort chmhead_Signature = 0x0000;
public const ushort chmhead_Version = 0x0004;
public const ushort chmhead_HeaderLen = 0x0008;
public const ushort chmhead_Unknown1 = 0x000C;
public const ushort chmhead_Timestamp = 0x0010;
public const ushort chmhead_LanguageID = 0x0014;
public const ushort chmhead_GUID1 = 0x0018;
public const ushort chmhead_GUID2 = 0x0028;
public const ushort chmhead_SIZEOF = 0x0038;
public const ushort chmhst_OffsetHS0 = 0x0000;
public const ushort chmhst_LengthHS0 = 0x0008;
public const ushort chmhst_OffsetHS1 = 0x0010;
public const ushort chmhst_LengthHS1 = 0x0018;
public const ushort chmhst_SIZEOF = 0x0020;
public const ushort chmhst3_OffsetCS0 = 0x0020;
public const ushort chmhst3_SIZEOF = 0x0028;
public const ushort chmhs0_Unknown1 = 0x0000;
public const ushort chmhs0_Unknown2 = 0x0004;
public const ushort chmhs0_FileLen = 0x0008;
public const ushort chmhs0_Unknown3 = 0x0010;
public const ushort chmhs0_Unknown4 = 0x0014;
public const ushort chmhs0_SIZEOF = 0x0018;
public const ushort chmhs1_Signature = 0x0000;
public const ushort chmhs1_Version = 0x0004;
public const ushort chmhs1_HeaderLen = 0x0008;
public const ushort chmhs1_Unknown1 = 0x000C;
public const ushort chmhs1_ChunkSize = 0x0010;
public const ushort chmhs1_Density = 0x0014;
public const ushort chmhs1_Depth = 0x0018;
public const ushort chmhs1_IndexRoot = 0x001C;
public const ushort chmhs1_FirstPMGL = 0x0020;
public const ushort chmhs1_LastPMGL = 0x0024;
public const ushort chmhs1_Unknown2 = 0x0028;
public const ushort chmhs1_NumChunks = 0x002C;
public const ushort chmhs1_LanguageID = 0x0030;
public const ushort chmhs1_GUID = 0x0034;
public const ushort chmhs1_Unknown3 = 0x0044;
public const ushort chmhs1_Unknown4 = 0x0048;
public const ushort chmhs1_Unknown5 = 0x004C;
public const ushort chmhs1_Unknown6 = 0x0050;
public const ushort chmhs1_SIZEOF = 0x0054;
public const ushort pmgl_Signature = 0x0000;
public const ushort pmgl_QuickRefSize = 0x0004;
public const ushort pmgl_Unknown1 = 0x0008;
public const ushort pmgl_PrevChunk = 0x000C;
public const ushort pmgl_NextChunk = 0x0010;
public const ushort pmgl_Entries = 0x0014;
public const ushort pmgl_headerSIZEOF = 0x0014;
public const ushort pmgi_Signature = 0x0000;
public const ushort pmgi_QuickRefSize = 0x0004;
public const ushort pmgi_Entries = 0x0008;
public const ushort pmgi_headerSIZEOF = 0x000C;
public const ushort lzxcd_Length = 0x0000;
public const ushort lzxcd_Signature = 0x0004;
public const ushort lzxcd_Version = 0x0008;
public const ushort lzxcd_ResetInterval = 0x000C;
public const ushort lzxcd_WindowSize = 0x0010;
public const ushort lzxcd_CacheSize = 0x0014;
public const ushort lzxcd_Unknown1 = 0x0018;
public const ushort lzxcd_SIZEOF = 0x001C;
public const ushort lzxrt_Unknown1 = 0x0000;
public const ushort lzxrt_NumEntries = 0x0004;
public const ushort lzxrt_EntrySize = 0x0008;
public const ushort lzxrt_TableOffset = 0x000C;
public const ushort lzxrt_UncompLen = 0x0010;
public const ushort lzxrt_CompLen = 0x0018;
public const ushort lzxrt_FrameLen = 0x0020;
public const ushort lzxrt_Entries = 0x0028;
public const ushort lzxrt_headerSIZEOF = 0x0028;
}
}

View File

@@ -1,922 +0,0 @@
using System;
using System.Linq;
using System.Runtime.InteropServices;
using static SabreTools.Compression.libmspack.CHM.Constants;
using static SabreTools.Compression.libmspack.macros;
using static SabreTools.Compression.libmspack.system;
namespace SabreTools.Compression.libmspack.CHM
{
/// <summary>
/// A decompressor for .CHM (Microsoft HTMLHelp) files
///
/// All fields are READ ONLY.
/// </summary>
/// <see cref="mspack.DestroyCHMDecomperssor(Decompressor)"/>
public unsafe class Decompressor : BaseDecompressor
{
public mschmd_decompress_state d { get; private set; }
// Filenames of the system files used for decompression.
// Content and ControlData are essential.
// ResetTable is preferred, but SpanInfo can be used if not available
private const string content_name = "::DataSpace/Storage/MSCompressed/Content";
private const string control_name = "::DataSpace/Storage/MSCompressed/ControlData";
private const string spaninfo_name = "::DataSpace/Storage/MSCompressed/SpanInfo";
private const string rtable_name = "::DataSpace/Storage/MSCompressed/Transform/{7FC28940-9D31-11D0-9B27-00A0C91E9C7C}/InstanceData/ResetTable";
// The GUIDs found in CHM header
private static readonly byte[] guids = new byte[32]
{
// {7C01FD10-7BAA-11D0-9E0C-00A0-C922-E6EC}
0x10, 0xFD, 0x01, 0x7C, 0xAA, 0x7B, 0xD0, 0x11,
0x9E, 0x0C, 0x00, 0xA0, 0xC9, 0x22, 0xE6, 0xEC,
// {7C01FD11-7BAA-11D0-9E0C-00A0-C922-E6EC}
0x11, 0xFD, 0x01, 0x7C, 0xAA, 0x7B, 0xD0, 0x11,
0x9E, 0x0C, 0x00, 0xA0, 0xC9, 0x22, 0xE6, 0xEC
};
/// <summary>
/// Creates a new CHM decompressor
/// </summary>
public Decompressor()
{
this.system = new mspack_default_system();
error = MSPACK_ERR.MSPACK_ERR_OK;
d = null;
}
/// <summary>
/// Destroys an existing CHM decompressor
/// </summary>
~Decompressor()
{
mspack_system sys = this.system;
if (this.d != null)
{
if (this.d.infh != null) sys.close(this.d.infh);
if (this.d.state != null) lzxd_free(this.d.state);
//sys.free(this.d);
}
//sys.free(this);
}
/// <summary>
/// Opens a CHM helpfile and reads its contents.
///
/// If the file opened is a valid CHM helpfile, all headers will be read
/// and a mschmd_header structure will be returned, with a full list of
/// files.
///
/// In the case of an error occuring, null is returned and the error code
/// is available from last_error().
///
/// The filename pointer should be considered "in use" until close() is
/// called on the CHM helpfile.
/// </summary>
/// <param name="filename">
/// The filename of the CHM helpfile. This is passed
/// directly to mspack_system::open().
/// </param>
/// <returns>A pointer to a mschmd_header structure, or null on failure</returns>
/// <see cref="close(mschmd_header)"/>
public mschmd_header open(in string filename)
{
return chmd_real_open(filename, 1);
}
/// <summary>
/// Closes a previously opened CHM helpfile.
///
/// This closes a CHM helpfile, frees the mschmd_header and all
/// mschmd_file structures associated with it (if any). This works on
/// both helpfiles opened with open() and helpfiles opened with
/// fast_open().
///
/// The CHM header pointer is now invalid and cannot be used again. All
/// mschmd_file pointers referencing that CHM are also now invalid, and
/// cannot be used again.
/// </summary>
/// <param name="chm">The CHM helpfile to close</param>
/// <see cref="open(in string)"/>
/// <see cref="fast_open(in string)"/>
public void close(mschmd_header chm)
{
mschmd_file fi, nfi;
mspack_system sys;
uint i;
sys = this.system;
this.error = MSPACK_ERR.MSPACK_ERR_OK;
// Free files
for (fi = chm.files; fi != null; fi = nfi)
{
nfi = fi.next;
//sys.free(fi);
}
for (fi = chm.sysfiles; fi != null; fi = nfi)
{
nfi = fi.next;
//sys.free(fi);
}
// If this CHM was being decompressed, free decompression state
if (this.d != null && (this.d.chm == chm))
{
if (this.d.infh != null) sys.close(this.d.infh);
if (this.d.state != null) lzxd_free(this.d.state);
//sys.free(this.d);
this.d = null;
}
// If this CHM had a chunk cache, free it and contents
if (chm.chunk_cache != null)
{
for (i = 0; i < chm.num_chunks; i++) sys.free(chm.chunk_cache[i]);
sys.free(chm.chunk_cache);
}
//sys.free(chm);
}
/// <summary>
/// Reads the basic CHM file headers. If the "entire" parameter is
/// non-zero, all file entries will also be read. fills out a pre-existing
/// mschmd_header structure, allocates memory for files as necessary
/// </summary>
private MSPACK_ERR chmd_read_headers(mspack_system sys, mspack_file fh, mschmd_header chm, int entire)
{
uint errors, num_chunks;
FixedArray<byte> buf = new FixedArray<byte>(0x54);
FixedArray<byte> chunk = null;
byte* name, p, end;
mschmd_file fi, link = null;
long offset_hs0, filelen;
int num_entries;
MSPACK_ERR err = MSPACK_ERR.MSPACK_ERR_OK;
// Initialise pointers
chm.files = null;
chm.sysfiles = null;
chm.chunk_cache = null;
chm.sec0.chm = chm;
chm.sec0.id = 0;
chm.sec1.chm = chm;
chm.sec1.id = 1;
chm.sec1.content = null;
chm.sec1.control = null;
chm.sec1.spaninfo = null;
chm.sec1.rtable = null;
// Read the first header
if (sys.read(fh, buf, chmhead_SIZEOF) != chmhead_SIZEOF)
{
return MSPACK_ERR.MSPACK_ERR_READ;
}
// Check ITSF signature
if (EndGetI32(buf, chmhead_Signature) != 0x46535449)
{
return MSPACK_ERR.MSPACK_ERR_SIGNATURE;
}
// Check both header GUIDs
if (!buf.ToArray().Skip(chmhead_GUID1).Take(guids.Length).SequenceEqual(guids))
{
Console.Error.WriteLine("Incorrect GUIDs");
return MSPACK_ERR.MSPACK_ERR_SIGNATURE;
}
chm.version = EndGetI32(buf, chmhead_Version);
chm.timestamp = EndGetM32(buf, chmhead_Timestamp);
chm.language = EndGetI32(buf, chmhead_LanguageID);
if (chm.version > 3)
{
sys.message(fh, "WARNING; CHM version > 3");
}
// Read the header section table
if (sys.read(fh, buf, chmhst3_SIZEOF) != chmhst3_SIZEOF)
{
return MSPACK_ERR.MSPACK_ERR_READ;
}
// chmhst3_OffsetCS0 does not exist in version 1 or 2 CHM files.
// The offset will be corrected later, once HS1 is read.
if (read_off64(&offset_hs0, &buf[chmhst_OffsetHS0], sys, fh) ||
read_off64(&chm.dir_offset, &buf[chmhst_OffsetHS1], sys, fh) ||
read_off64(&chm.sec0.offset, &buf[chmhst3_OffsetCS0], sys, fh))
{
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
// Seek to header section 0
if (sys.seek(fh, offset_hs0, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_START) != 0)
{
return MSPACK_ERR.MSPACK_ERR_SEEK;
}
// Read header section 0
if (sys.read(fh, buf, chmhs0_SIZEOF) != chmhs0_SIZEOF)
{
return MSPACK_ERR.MSPACK_ERR_READ;
}
if (read_off64(&chm.length, &buf[chmhs0_FileLen], sys, fh))
{
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
// Compare declared CHM file size against actual size
if (mspack_sys_filelen(sys, fh, &filelen) == 0)
{
if (chm.length > filelen)
{
sys.message(fh, $"WARNING; file possibly truncated by {chm.length - filelen} bytes");
}
else if (chm.length < filelen)
{
sys.message(fh, $"WARNING; possible {filelen - chm.length} extra bytes at end of file");
}
}
// Seek to header section 1
if (sys.seek(fh, chm.dir_offset, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_START) != 0)
{
return MSPACK_ERR.MSPACK_ERR_SEEK;
}
// Read header section 1
if (sys.read(fh, buf, chmhs1_SIZEOF) != chmhs1_SIZEOF)
{
return MSPACK_ERR.MSPACK_ERR_READ;
}
chm.dir_offset = sys.tell(fh);
chm.chunk_size = EndGetI32(buf, chmhs1_ChunkSize);
chm.density = EndGetI32(buf, chmhs1_Density);
chm.depth = EndGetI32(buf, chmhs1_Depth);
chm.index_root = EndGetI32(buf, chmhs1_IndexRoot);
chm.num_chunks = EndGetI32(buf, chmhs1_NumChunks);
chm.first_pmgl = EndGetI32(buf, chmhs1_FirstPMGL);
chm.last_pmgl = EndGetI32(buf, chmhs1_LastPMGL);
if (chm.version < 3)
{
// Versions before 3 don't have chmhst3_OffsetCS0
chm.sec0.offset = chm.dir_offset + (chm.chunk_size * chm.num_chunks);
}
// Check if content offset or file size is wrong
if (chm.sec0.offset > chm.length)
{
Console.Error.WriteLine("content section begins after file has ended");
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
// Ensure there are chunks and that chunk size is
// large enough for signature and num_entries
if (chm.chunk_size < (pmgl_Entries + 2))
{
Console.Error.WriteLine("chunk size not large enough");
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
if (chm.num_chunks == 0)
{
Console.Error.WriteLine("no chunks");
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
// The chunk_cache data structure is not great; large values for num_chunks
// or num_chunks*chunk_size can exhaust all memory. Until a better chunk
// cache is implemented, put arbitrary limits on num_chunks and chunk size.
if (chm.num_chunks > 100000)
{
Console.Error.WriteLine("more than 100,000 chunks");
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
if (chm.chunk_size > 8192)
{
Console.Error.WriteLine("chunk size over 8192 (get in touch if this is valid)");
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
if ((long)chm.chunk_size * (long)chm.num_chunks > chm.length)
{
Console.Error.WriteLine("chunks larger than entire file");
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
// Common sense checks on header section 1 fields
if (chm.chunk_size != 4096)
{
sys.message(fh, "WARNING; chunk size is not 4096");
}
if (chm.first_pmgl != 0)
{
sys.message(fh, "WARNING; first PMGL chunk is not zero");
}
if (chm.first_pmgl > chm.last_pmgl)
{
Console.Error.WriteLine("first pmgl chunk is after last pmgl chunk");
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
if (chm.index_root != 0xFFFFFFFF && chm.index_root >= chm.num_chunks)
{
Console.Error.WriteLine("index_root outside valid range");
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
// If we are doing a quick read, stop here!
if (entire == 0)
{
return MSPACK_ERR.MSPACK_ERR_OK;
}
// Seek to the first PMGL chunk, and reduce the number of chunks to read
if (chm.first_pmgl != 0)
{
long pmgl_offset = (long)chm.first_pmgl * (long)chm.chunk_size;
if (sys.seek(fh, pmgl_offset, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_CUR) != 0)
{
return MSPACK_ERR.MSPACK_ERR_SEEK;
}
}
num_chunks = chm.last_pmgl - chm.first_pmgl + 1;
chunk = new FixedArray<byte>((int)chm.chunk_size);
// Read and process all chunks from FirstPMGL to LastPMGL
errors = 0;
while (num_chunks-- > 0)
{
// Read next chunk
if (sys.read(fh, chunk, (int)chm.chunk_size) != (int)chm.chunk_size)
{
sys.free(chunk);
return MSPACK_ERR.MSPACK_ERR_READ;
}
// Process only directory (PMGL) chunks
if (EndGetI32(chunk, pmgl_Signature) != 0x4C474D50) continue;
if (EndGetI32(chunk, pmgl_QuickRefSize) < 2)
{
sys.message(fh, "WARNING; PMGL quickref area is too small");
}
if (EndGetI32(chunk, pmgl_QuickRefSize) >
(chm.chunk_size - pmgl_Entries))
{
sys.message(fh, "WARNING; PMGL quickref area is too large");
}
p = (byte*)chunk.Pointer + pmgl_Entries;
end = (byte*)chunk.Pointer + chm.chunk_size - 2;
num_entries = EndGetI16(chunk, (int)(chm.chunk_size - 2));
while (num_entries-- > 0)
{
uint name_len, section;
long offset, length;
name_len = read_encint(&p, end, &err);
if (err != MSPACK_ERR.MSPACK_ERR_OK || (name_len > (uint)(end - p))) goto encint_err;
name = p; p += name_len;
section = read_encint(&p, end, &err);
offset = read_encint(&p, end, &err);
length = read_encint(&p, end, &err);
if (err != MSPACK_ERR.MSPACK_ERR_OK) goto encint_err;
// Ignore blank or one-char (e.g. "/") filenames we'd return as blank */
if (name_len < 2 || name[0] == 0x00 || name[1] == 0x00) continue;
// Empty files and directory names are stored as a file entry at
// offset 0 with length 0. We want to keep empty files, but not
// directory names, which end with a "/"
if ((offset == 0) && (length == 0))
{
if ((name_len > 0) && (name[name_len - 1] == '/')) continue;
}
if (section > 1)
{
sys.message(fh, $"Invalid section number '{section}'.");
continue;
}
fi = new mschmd_file();
fi.next = null;
fi.section = (section == 0 ? (mschmd_section)chm.sec0 : (mschmd_section)chm.sec1);
fi.offset = offset;
fi.length = length;
char[] filenameArr = new char[name_len];
Marshal.Copy((IntPtr)name, filenameArr, 0, (int)name_len);
filenameArr[(int)name_len] = '\0';
fi.filename = new string(filenameArr);
if (name[0] == ':' && name[1] == ':')
{
// System file
if (name_len == 40 && fi.filename.StartsWith(content_name))
{
chm.sec1.content = fi;
}
else if (name_len == 44 && fi.filename.StartsWith(control_name))
{
chm.sec1.control = fi;
}
else if (name_len == 41 && fi.filename.StartsWith(spaninfo_name))
{
chm.sec1.spaninfo = fi;
}
else if (name_len == 105 && fi.filename.StartsWith(rtable_name))
{
chm.sec1.rtable = fi;
}
fi.next = chm.sysfiles;
chm.sysfiles = fi;
}
else
{
// Normal file
if (link != null) link.next = fi; else chm.files = fi;
link = fi;
}
}
// This is reached either when num_entries runs out, or if
// an ENCINT is badly encoded
encint_err:
if (num_entries >= 0)
{
Console.Error.WriteLine("bad encint before all entries could be read");
errors++;
}
}
sys.free(chunk);
return (errors > 0) ? MSPACK_ERR.MSPACK_ERR_DATAFORMAT : MSPACK_ERR.MSPACK_ERR_OK;
}
/// <summary>
/// Extracts a file from a CHM helpfile.
///
/// This extracts a file from a CHM helpfile and writes it to the given
/// filename. The filename of the file, mscabd_file::filename, is not
/// used by extract(), but can be used by the caller as a guide for
/// constructing an appropriate filename.
///
/// This method works both with files found in the mschmd_header::files
/// and mschmd_header::sysfiles list and mschmd_file structures generated
/// on the fly by fast_find().
/// </summary>
/// <param name="file">The file to be decompressed</param>
/// <param name="filename">The filename of the file being written to</param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
public MSPACK_ERR extract(mschmd_file file, in string filename) => throw new NotImplementedException();
/// <summary>
/// Returns the error code set by the most recently called method.
///
/// This is useful for open() and fast_open(), which do not return an
/// error code directly.
/// </summary>
/// <returns>The most recent error code</returns>
/// <see cref="open(in string)"/>
/// <see cref="extract(mschmd_file, in string)"/>
public MSPACK_ERR last_error() => throw new NotImplementedException();
/// <summary>
/// Opens a CHM helpfile quickly.
///
/// If the file opened is a valid CHM helpfile, only essential headers
/// will be read. A mschmd_header structure will be still be returned, as
/// with open(), but the mschmd_header::files field will be null. No
/// files details will be automatically read. The fast_find() method
/// must be used to obtain file details.
///
/// In the case of an error occuring, null is returned and the error code
/// is available from last_error().
///
/// The filename pointer should be considered "in use" until close() is
/// called on the CHM helpfile.
/// </summary>
/// <param name="filename">
/// The filename of the CHM helpfile. This is passed
/// directly to mspack_system::open().
/// </param>
/// <returns>A pointer to a mschmd_header structure, or null on failure</returns>
/// <see cref="open(in string)"/>
/// <see cref="close(mschmd_header)"/>
/// <see cref="fast_find(mschmd_header, in string, ref mschmd_file, int)"/>
/// <see cref="extract(mschmd_file, in string)"/>
public mschmd_header fast_open(in string filename)
{
return chmd_real_open(filename, 0);
}
/// <summary>
/// The real implementation of chmd_open() and chmd_fast_open(). It simply
/// passes the "entire" parameter to chmd_read_headers(), which will then
/// either read all headers, or a bare mininum.
/// </summary>
private mschmd_header chmd_real_open(in string filename, int entire)
{
mschmd_header chm = null;
MSPACK_ERR error;
mspack_system sys = this.system;
mspack_file fh;
if ((fh = sys.open(filename, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_READ)) != null)
{
chm = new mschmd_header();
chm.filename = filename;
error = chmd_read_headers(sys, fh, chm, entire);
if (error != MSPACK_ERR.MSPACK_ERR_OK)
{
// If the error is DATAFORMAT, and there are some results, return
// partial results with a warning, rather than nothing
if (error == MSPACK_ERR.MSPACK_ERR_DATAFORMAT && (chm.files != null || chm.sysfiles != null))
{
sys.message(fh, "WARNING; contents are corrupt");
error = MSPACK_ERR.MSPACK_ERR_OK;
}
else
{
close(chm);
chm = null;
}
}
this.error = error;
sys.close(fh);
}
else
{
this.error = MSPACK_ERR.MSPACK_ERR_OPEN;
}
return chm;
}
/// <summary>
/// Finds file details quickly.
///
/// Instead of reading all CHM helpfile headers and building a list of
/// files, fast_open() and fast_find() are intended for finding file
/// details only when they are needed. The CHM file format includes an
/// on-disk file index to allow this.
///
/// Given a case-sensitive filename, fast_find() will search the on-disk
/// index for that file.
///
/// If the file was found, the caller-provided mschmd_file structure will
/// be filled out like so:
/// - section: the correct value for the found file
/// - offset: the correct value for the found file
/// - length: the correct value for the found file
/// - all other structure elements: null or 0
///
/// If the file was not found, MSPACK_ERR_OK will still be returned as the
/// result, but the caller-provided structure will be filled out like so:
/// - section: null
/// - offset: 0
/// - length: 0
/// - all other structure elements: null or 0
///
/// This method is intended to be used in conjunction with CHM helpfiles
/// opened with fast_open(), but it also works with helpfiles opened
/// using the regular open().
/// </summary>
/// <param name="chm">The CHM helpfile to search for the file</param>
/// <param name="filename">The filename of the file to search for</param>
/// <param name="f_ptr">A pointer to a caller-provded mschmd_file structure</param>
/// <param name="f_size"><tt>sizeof(mschmd_file)</tt></param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
/// <see cref="open(in string)"/>
/// <see cref="close(mschmd_header)"/>
/// <see cref="fast_find(mschmd_header, in string, ref mschmd_file, int)"/>
/// <see cref="extract(mschmd_file, in string)"/>
public MSPACK_ERR fast_find(mschmd_header chm, in string filename, ref mschmd_file f_ptr, int f_size)
{
mspack_system sys;
mspack_file fh;
// p and end are initialised to prevent MSVC warning about "potentially"
// uninitialised usage. This is provably untrue, but MS won't fix:
// https://developercommunity.visualstudio.com/content/problem/363489/c4701-false-positive-warning.html
FixedArray<byte> chunk;
byte* p = null, end = null;
MSPACK_ERR err = MSPACK_ERR.MSPACK_ERR_OK;
int result = -1;
uint n, sec;
if (chm == null || f_ptr == null)
{
return MSPACK_ERR.MSPACK_ERR_ARGS;
}
sys = this.system;
// Clear the results structure
f_ptr = new mschmd_file();
if ((fh = sys.open(chm.filename, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_READ)) == null)
{
return MSPACK_ERR.MSPACK_ERR_OPEN;
}
// Go through PMGI chunk hierarchy to reach PMGL chunk
if (chm.index_root < chm.num_chunks)
{
n = chm.index_root;
for (; ; )
{
if ((chunk = read_chunk(chm, fh, n)) == null)
{
sys.close(fh);
return this.error;
}
// Search PMGI/PMGL chunk. exit early if no entry found
if ((result = search_chunk(chm, chunk, filename, &p, &end)) <= 0)
{
break;
}
// Found result. loop around for next chunk if this is PMGI
if (chunk[3] == 0x4C) break;
n = read_encint(&p, end, &err);
if (err != MSPACK_ERR.MSPACK_ERR_OK) goto encint_err;
}
}
else
{
// PMGL chunks only, search from first_pmgl to last_pmgl
for (n = chm.first_pmgl; n <= chm.last_pmgl; n = EndGetI32(chunk, pmgl_NextChunk))
{
if ((chunk = read_chunk(chm, fh, n)) == null)
{
err = this.error;
break;
}
// Search PMGL chunk. exit if file found
if ((result = search_chunk(chm, chunk, filename, &p, &end)) > 0)
{
break;
}
// Stop simple infinite loops: can't visit the same chunk twice
if (n == EndGetI32(chunk, pmgl_NextChunk))
{
break;
}
}
}
// If we found a file, read it
if (result > 0)
{
sec = read_encint(&p, end, &err);
f_ptr.section = (sec == 0) ? (mschmd_section)chm.sec0 : (mschmd_section)chm.sec1;
f_ptr.offset = read_encint(&p, end, &err);
f_ptr.length = read_encint(&p, end, &err);
if (err != MSPACK_ERR.MSPACK_ERR_OK) goto encint_err;
}
else if (result < 0)
{
err = MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
sys.close(fh);
return this.error = err;
encint_err:
Console.Error.WriteLine("Bad encint in PGMI/PGML chunk");
sys.close(fh);
return this.error = MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
/// <summary>
/// Reads the given chunk into memory, storing it in a chunk cache
/// so it doesn't need to be read from disk more than once
/// </summary>
/// <returns></returns>
private FixedArray<byte> read_chunk(mschmd_header chm, mspack_file fh, uint chunk_num)
{
mspack_system sys = this.system;
FixedArray<byte> buf;
// Check arguments - most are already checked by chmd_fast_find
if (chunk_num >= chm.num_chunks) return null;
// Ensure chunk cache is available
if (chm.chunk_cache == null)
{
chm.chunk_cache = new FixedArray<byte>[chm.num_chunks];
if (chm.chunk_cache == null)
{
this.error = MSPACK_ERR.MSPACK_ERR_NOMEMORY;
return null;
}
}
// Try to answer out of chunk cache
if (chm.chunk_cache[chunk_num] != null) return chm.chunk_cache[chunk_num];
// Need to read chunk - allocate memory for it
buf = new FixedArray<byte>((int)chm.chunk_size);
// Seek to block and read it
if (sys.seek(fh, chm.dir_offset + (chunk_num * chm.chunk_size), MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_START) != 0)
{
this.error = MSPACK_ERR.MSPACK_ERR_SEEK;
sys.free(buf);
return null;
}
if (sys.read(fh, buf, (int)chm.chunk_size) != (int)chm.chunk_size)
{
this.error = MSPACK_ERR.MSPACK_ERR_READ;
sys.free(buf);
return null;
}
// Check the signature. Is is PMGL or PMGI?
if (!((buf[0] == 0x50) && (buf[1] == 0x4D) && (buf[2] == 0x47) && ((buf[3] == 0x4C) || (buf[3] == 0x49))))
{
this.error = MSPACK_ERR.MSPACK_ERR_SEEK;
sys.free(buf);
return null;
}
// All OK. Store chunk in cache and return it
return chm.chunk_cache[chunk_num] = buf;
}
/// <summary>
/// Searches a PMGI/PMGL chunk for a given filename entry. Returns -1 on
/// data format error, 0 if entry definitely not found, 1 if entry
/// found. In the latter case, *result and *result_end are set pointing
/// to that entry's data (either the "next chunk" ENCINT for a PMGI or
/// the section, offset and length ENCINTs for a PMGL).
///
/// In the case of PMGL chunks, the entry has definitely been
/// found. In the case of PMGI chunks, the entry which points to the
/// chunk that may eventually contain that entry has been found.
/// </summary>
/// <returns></returns>
private int search_chunk(mschmd_header chm, in FixedArray<byte> chunk, in string filename, byte** result, byte** result_end)
{
byte* p;
uint qr_size, num_entries, qr_entries, qr_density, name_len;
uint L, R, M, entries_off, is_pmgl;
int cmp;
MSPACK_ERR err = MSPACK_ERR.MSPACK_ERR_OK;
uint fname_len = (uint)filename.Length;
// PMGL chunk or PMGI chunk? (note: read_chunk() has already
// checked the rest of the characters in the chunk signature)
if (chunk[3] == 0x4C)
{
is_pmgl = 1;
entries_off = pmgl_Entries;
}
else
{
is_pmgl = 0;
entries_off = pmgi_Entries;
}
// Step 1: binary search first filename of each QR entry
// - target filename == entry
// found file
// - target filename < all entries
// file not found
// - target filename > all entries
// proceed to step 2 using final entry
// - target filename between two searched entries
// proceed to step 2
qr_size = EndGetI32(chunk, pmgl_QuickRefSize);
int start = (int)(chm.chunk_size - 2);
int end = (int)(chm.chunk_size - qr_size);
num_entries = EndGetI16(chunk, (int)(chm.chunk_size - 2));
qr_density = (uint)(1 + (1 << (int)chm.density));
qr_entries = (num_entries + qr_density - 1) / qr_density;
if (num_entries == 0)
{
Console.Error.WriteLine("Chunk has no entries");
return -1;
}
if (qr_size > chm.chunk_size)
{
Console.Error.WriteLine("Quickref size > chunk size");
return -1;
}
*result_end = &chunk[end];
if (((int)qr_entries * 2) > (start - end))
{
Console.Error.WriteLine("WARNING; more quickrefs than quickref space");
qr_entries = 0; // But we can live with it
}
if (qr_entries > 0)
{
L = 0;
R = qr_entries - 1;
do
{
// Pick new midpoint
M = (L + R) >> 1;
// Compare filename with entry QR points to
p = &chunk[entries_off + (M != 0 ? EndGetI16(chunk, start - (int)(M << 1)) : 0)];
name_len = read_encint(&p, end, &err);
if (err != MSPACK_ERR.MSPACK_ERR_OK || (name_len > (uint)(end - p))) goto encint_err;
cmp = compare(filename, (char*)p, fname_len, name_len);
if (cmp == 0) break;
else if (cmp < 0) { if (M) R = M - 1; else return 0; }
else if (cmp > 0) L = M + 1;
} while (L <= R);
M = (L + R) >> 1;
if (cmp == 0)
{
/* exact match! */
p += name_len;
*result = p;
return 1;
}
/* otherwise, read the group of entries for QR entry M */
p = &chunk[entries_off + (M ? EndGetI16(chunk, start - (M << 1)) : 0)];
num_entries -= (M * qr_density);
if (num_entries > qr_density) num_entries = qr_density;
}
else
{
p = &chunk[entries_off];
}
/* Step 2: linear search through the set of entries reached in step 1.
* - filename == any entry
* found entry
* - filename < all entries (PMGI) or any entry (PMGL)
* entry not found, stop now
* - filename > all entries
* entry not found (PMGL) / maybe found (PMGI)
* -
*/
*result = null;
while (num_entries-- > 0)
{
name_len = read_encint(&p, end, &err);
if (err || (name_len > (uint)(end - p))) goto encint_err;
cmp = compare(filename, (char*)p, fname_len, name_len);
p += name_len;
if (cmp == 0)
{
/* entry found */
*result = p;
return 1;
}
if (cmp < 0)
{
/* entry not found (PMGL) / maybe found (PMGI) */
break;
}
/* read and ignore the rest of this entry */
if (is_pmgl)
{
while (p < end && (*p++ & 0x80)) ; /* skip section ENCINT */
while (p < end && (*p++ & 0x80)) ; /* skip offset ENCINT */
while (p < end && (*p++ & 0x80)) ; /* skip length ENCINT */
}
else
{
*result = p; /* store potential final result */
while (p < end && (*p++ & 0x80)) ; /* skip chunk number ENCINT */
}
}
/* PMGL? not found. PMGI? maybe found */
return (is_pmgl) ? 0 : (*result ? 1 : 0);
encint_err:
Console.Error.WriteLine("bad encint while searching");
return -1;
}
}
}

View File

@@ -1,37 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents a file to be placed in a CHM helpfile.
///
/// A contiguous array of these structures should be passed to
/// mschm_compressor::generate(). The array list is terminated with an
/// entry whose mschmc_file::section field is set to #MSCHMC_ENDLIST, the
/// other fields in this entry are ignored.
/// </summary>
public class mschmc_file
{
/// <summary>
/// One of <see cref="MSCHMC"/> values.
/// </summary>
public MSCHMC section { get; set; }
/// <summary>
/// The filename of the source file that will be added to the CHM. This
/// is passed directly to mspack_system::open().
/// </summary>
public string filename { get; set; }
/// <summary>
/// The full path and filename of the file within the CHM helpfile, a
/// UTF-1 encoded null-terminated string.
/// </summary>
public string chm_filename { get; set; }
/// <summary>
/// The length of the file, in bytes. This will be adhered to strictly
/// and a read error will be issued if this many bytes cannot be read
/// from the real file at CHM generation time.
/// </summary>
public long length { get; set; }
}
}

View File

@@ -1,45 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public class mschmd_decompress_state
{
/// <summary>
/// CHM file being decompressed
/// </summary>
public mschmd_header chm { get; set; }
/// <summary>
/// Uncompressed length of LZX stream
/// </summary>
public long length { get; set; }
/// <summary>
/// Uncompressed offset within stream
/// </summary>
public long offset { get; set; }
/// <summary>
/// Offset in input file
/// </summary>
public long inoffset { get; set; }
/// <summary>
/// LZX decompressor state
/// </summary>
public lzxd_stream state { get; set; }
/// <summary>
/// Special I/O code for decompressor
/// </summary>
public mspack_system sys { get; set; }
/// <summary>
/// Input file handle
/// </summary>
public mspack_file infh { get; set; }
/// <summary>
/// Output file handle
/// </summary>
public mspack_file outfh { get; set; }
}
}

View File

@@ -1,37 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents a file stored in a CHM helpfile.
///
/// All fields are READ ONLY.
/// </summary>
public class mschmd_file
{
/// <summary>
/// A pointer to the next file in the list, or null if this is the final
/// file.
/// </summary>
public mschmd_file next { get; set; }
/// <summary>
/// A pointer to the section that this file is located in. Indirectly,
/// it also points to the CHM helpfile the file is located in.
/// </summary>
public mschmd_section section { get; set; }
/// <summary>
/// The offset within the section data that this file is located at.
/// </summary>
public long offset { get; set; }
/// <summary>
/// The length of this file, in bytes
/// </summary>
public long length { get; set; }
/// <summary>
/// The filename of this file -- a null terminated string in UTF-8.
/// </summary>
public string filename { get; set; }
}
}

View File

@@ -1,120 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents a CHM helpfile.
///
/// All fields are READ ONLY.
/// </summary>
public unsafe class mschmd_header
{
/// <summary>
/// The version of the CHM file format used in this file.
/// </summary>
public uint version { get; set; }
/// <summary>
/// The "timestamp" of the CHM helpfile.
///
/// It is the lower 32 bits of a 64-bit value representing the number of
/// centiseconds since 1601-01-01 00:00:00 UTC, plus 42. It is not useful
/// as a timestamp, but it is useful as a semi-unique ID.
/// </summary>
public uint timestamp { get; set; }
/// <summary>
/// The default Language and Country ID (LCID) of the user who ran the
/// HTMLHelp Compiler. This is not the language of the CHM file itself.
/// </summary>
public uint language { get; set; }
/// <summary>
/// The filename of the CHM helpfile. This is given by the library user
/// and may be in any format.
/// </summary>
public string filename { get; set; }
/// <summary>
/// The length of the CHM helpfile, in bytes.
/// </summary>
public long length { get; set; }
/// <summary>
/// A list of all non-system files in the CHM helpfile.
/// </summary>
public mschmd_file files { get; set; }
/// <summary>
/// A list of all system files in the CHM helpfile.
///
/// System files are files which begin with "::". They are meta-files
/// generated by the CHM creation process.
/// </summary>
public mschmd_file sysfiles { get; set; }
/// <summary>
/// The section 0 (uncompressed) data in this CHM helpfile.
/// </summary>
public mschmd_sec_uncompressed sec0 { get; set; }
/// <summary>
/// The section 1 (MSCompressed) data in this CHM helpfile.
/// </summary>
public mschmd_sec_mscompressed sec1 { get; set; }
/// <summary>
/// The file offset of the first PMGL/PMGI directory chunk.
/// </summary>
public long dir_offset { get; set; }
/// <summary>
/// The number of PMGL/PMGI directory chunks in this CHM helpfile.
/// </summary>
public uint num_chunks { get; set; }
/// <summary>
/// The size of each PMGL/PMGI chunk, in bytes.
/// </summary>
public uint chunk_size { get; set; }
/// <summary>
/// The "density" of the quick-reference section in PMGL/PMGI chunks.
/// </summary>
public uint density { get; set; }
/// <summary>
/// The depth of the index tree.
///
/// - if 1, there are no PMGI chunks, only PMGL chunks.
/// - if 2, there is 1 PMGI chunk. All chunk indices point to PMGL chunks.
/// - if 3, the root PMGI chunk points to secondary PMGI chunks, which in
/// turn point to PMGL chunks.
/// - and so on...
/// </summary>
public uint depth { get; set; }
/// <summary>
/// The number of the root PMGI chunk.
///
/// If there is no index in the CHM helpfile, this will be 0xFFFFFFFF.
/// </summary>
public uint index_root { get; set; }
/// <summary>
/// The number of the first PMGL chunk. Usually zero.
/// Available only in CHM decoder version 2 and above.
/// </summary>
public uint first_pmgl { get; set; }
/// <summary>
/// The number of the last PMGL chunk. Usually num_chunks-1.
/// Available only in CHM decoder version 2 and above.
/// </summary>
public uint last_pmgl { get; set; }
/// <summary>
/// A cache of loaded chunks, filled in by mschm_decoder::fast_find().
/// Available only in CHM decoder version 2 and above.
/// </summary>
public FixedArray<byte>[] chunk_cache { get; set; }
}
}

View File

@@ -1,31 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents the LZX compressed section of a CHM helpfile.
///
/// All fields are READ ONLY.
/// </summary>
public class mschmd_sec_mscompressed : mschmd_section
{
/// <summary>
/// A pointer to the meta-file which represents all LZX compressed data.
/// </summary>
public mschmd_file content { get; set; }
/// <summary>
/// A pointer to the file which contains the LZX control data.
/// </summary>
public mschmd_file control { get; set; }
/// <summary>
/// A pointer to the file which contains the LZX reset table.
/// </summary>
public mschmd_file rtable { get; set; }
/// <summary>
/// A pointer to the file which contains the LZX span information.
/// Available only in CHM decoder version 2 and above.
/// </summary>
public mschmd_file spaninfo { get; set; }
}
}

View File

@@ -1,15 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents the uncompressed section of a CHM helpfile.
///
/// All fields are READ ONLY.
/// </summary>
public class mschmd_sec_uncompressed : mschmd_section
{
/// <summary>
/// The file offset of where this section begins in the CHM helpfile.
/// </summary>
public long offset { get; set; }
}
}

View File

@@ -1,25 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents a section of a CHM helpfile.
///
/// All fields are READ ONLY.
///
/// Not used directly, but used as a generic base type for
/// mschmd_sec_uncompressed and mschmd_sec_mscompressed.
/// </summary>
public class mschmd_section
{
/// <summary>
/// A pointer to the CHM helpfile that contains this section.
/// </summary>
public mschmd_header chm { get; set; }
/// <summary>
/// The section ID. Either 0 for the uncompressed section
/// mschmd_sec_uncompressed, or 1 for the LZX compressed section
/// mschmd_sec_mscompressed. No other section IDs are known.
/// </summary>
public uint id { get; set; }
}
}

View File

@@ -1,10 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public static class Constants
{
/// <summary>
/// Offset from start of cabinet to the reserved header data (if present).
/// </summary>
public const int MSCAB_HDR_RESV_OFFSET = 0x28;
}
}

View File

@@ -1,422 +0,0 @@
using System;
namespace SabreTools.Compression.libmspack
{
#region mspack.h
/// <summary>
/// mspack_system::open() mode
/// </summary>
public enum MSPACK_SYS_OPEN : int
{
/// <summary>
/// Open existing file for reading
/// </summary>
MSPACK_SYS_OPEN_READ = 0,
/// <summary>
/// Open new file for writing
/// </summary>
MSPACK_SYS_OPEN_WRITE = 1,
/// <summary>
/// Open existing file for writing
/// </summary>
MSPACK_SYS_OPEN_UPDATE = 2,
/// <summary>
/// Open existing file for writing
/// </summary>
MSPACK_SYS_OPEN_APPEND = 3,
}
/// <summary>
/// mspack_system::seek() mode
/// </summary>
public enum MSPACK_SYS_SEEK : int
{
/// <summary>
/// Seek relative to start of file
/// </summary>
MSPACK_SYS_SEEK_START = 0,
/// <summary>
/// Seek relative to current offset
/// </summary>
MSPACK_SYS_SEEK_CUR = 1,
/// <summary>
/// Seek relative to end of file
/// </summary>
MSPACK_SYS_SEEK_END = 2,
}
/// <summary>
/// Error code
/// </summary>
public enum MSPACK_ERR : int
{
MSPACK_ERR_OK = 0,
/// <summary>
/// Bad arguments to method
/// </summary>
MSPACK_ERR_ARGS = 1,
/// <summary>
/// Error opening file
/// </summary>
MSPACK_ERR_OPEN = 2,
/// <summary>
/// Error reading file
/// </summary>
MSPACK_ERR_READ = 3,
/// <summary>
/// Error writing file
/// </summary>
MSPACK_ERR_WRITE = 4,
/// <summary>
/// Seek error
/// </summary>
MSPACK_ERR_SEEK = 5,
/// <summary>
/// Out of memory
/// </summary>
MSPACK_ERR_NOMEMORY = 6,
/// <summary>
/// Bad "magic id" in file
/// </summary>
MSPACK_ERR_SIGNATURE = 7,
/// <summary>
/// Bad or corrupt file format
/// </summary>
MSPACK_ERR_DATAFORMAT = 8,
/// <summary>
/// Bad checksum or CRC
/// </summary>
MSPACK_ERR_CHECKSUM = 9,
/// <summary>
/// Error during compression
/// </summary>
MSPACK_ERR_CRUNCH = 10,
/// <summary>
/// Error during decompression
/// </summary>
MSPACK_ERR_DECRUNCH = 11,
}
/// <summary>
/// Cabinet header flag
/// </summary>
[Flags]
public enum MSCAB_HDR : int
{
/// <summary>
/// Cabinet has a predecessor
/// </summary>
MSCAB_HDR_PREVCAB = 0x01,
/// <summary>
/// Cabinet has a successor
/// </summary>
MSCAB_HDR_NEXTCAB = 0x02,
/// <summary>
/// Cabinet has reserved header space
/// </summary>
MSCAB_HDR_RESV = 0x04,
}
/// <summary>
/// Compression mode
/// </summary>
public enum MSCAB_COMP : int
{
/// <summary>
/// No compression
/// </summary>
MSCAB_COMP_NONE = 0,
/// <summary>
/// MSZIP (deflate) compression
/// </summary>
MSCAB_COMP_MSZIP = 1,
/// <summary>
/// Quantum compression
/// </summary>
MSCAB_COMP_QUANTUM = 2,
/// <summary>
/// LZX compression
/// </summary>
MSCAB_COMP_LZX = 3,
}
/// <summary>
/// mscabd_file::attribs attribute
/// </summary>
[Flags]
public enum MSCAB_ATTRIB : int
{
/// <summary>
/// File is read-only
/// </summary>
MSCAB_ATTRIB_RDONLY = 0x01,
/// <summary>
/// File is hidden
/// </summary>
MSCAB_ATTRIB_HIDDEN = 0x02,
/// <summary>
/// File is an operating system file
/// </summary>
MSCAB_ATTRIB_SYSTEM = 0x04,
/// <summary>
/// File is "archived"
/// </summary>
MSCAB_ATTRIB_ARCH = 0x20,
/// <summary>
/// File is an executable program
/// </summary>
MSCAB_ATTRIB_EXEC = 0x40,
/// <summary>
/// Filename is UTF8, not ISO-8859-1
/// </summary>
MSCAB_ATTRIB_UTF_NAME = 0x80,
}
/// <summary>
/// mschmc_file::section value
/// </summary>
public enum MSCHMC : int
{
/// <summary>
/// End of CHM file list
/// </summary>
MSCHMC_ENDLIST = 0,
/// <summary>
/// This file is in the Uncompressed section
/// </summary>
MSCHMC_UNCOMP = 1,
/// <summary>
/// This file is in the MSCompressed section
/// </summary>
MSCHMC_MSCOMP = 2,
}
/// <summary>
/// msszddd_header::format value
/// </summary>
public enum MSSZDD_FMT : int
{
/// <summary>
/// A regular SZDD file
/// </summary>
MSSZDD_FMT_NORMAL = 0,
/// <summary>
/// A special QBasic SZDD file
/// </summary>
MSSZDD_FMT_QBASIC = 1,
}
/// <summary>
/// WAJ compression type
/// </summary>
public enum MSKWAJ_COMP : int
{
/// <summary>
/// No compression
/// </summary>
MSKWAJ_COMP_NONE = 0,
/// <summary>
/// No compression, 0xFF XOR "encryption"
/// </summary>
MSKWAJ_COMP_XOR = 1,
/// <summary>
/// LZSS (same method as SZDD)
/// </summary>
MSKWAJ_COMP_SZDD = 2,
/// <summary>
/// LZ+Huffman compression
/// </summary>
MSKWAJ_COMP_LZH = 3,
/// <summary>
/// MSZIP
/// </summary>
MSKWAJ_COMP_MSZIP = 4,
}
/// <summary>
/// KWAJ optional header flag
/// </summary>
[Flags]
public enum MSKWAJ_HDR : int
{
/// <summary>
/// Decompressed file length is included
/// </summary>
MSKWAJ_HDR_HASLENGTH = 0x01,
/// <summary>
/// Unknown 2-byte structure is included
/// </summary>
MSKWAJ_HDR_HASUNKNOWN1 = 0x02,
/// <summary>
/// Unknown multi-sized structure is included
/// </summary>
MSKWAJ_HDR_HASUNKNOWN2 = 0x04,
/// <summary>
/// File name (no extension) is included
/// </summary>
MSKWAJ_HDR_HASFILENAME = 0x08,
/// <summary>
/// File extension is included
/// </summary>
MSKWAJ_HDR_HASFILEEXT = 0x10,
/// <summary>
/// Extra text is included
/// </summary>
MSKWAJ_HDR_HASEXTRATEXT = 0x20,
}
#region Parameters
/// <summary>
/// mscab_decompressor::set_param() parameter
/// </summary>
public enum MSCABD_PARAM : int
{
/// <summary>
/// Search buffer size
/// </summary>
MSCABD_PARAM_SEARCHBUF = 0,
/// <summary>
/// Repair MS-ZIP streams?
/// </summary>
MSCABD_PARAM_FIXMSZIP = 1,
/// <summary>
/// Size of decompression buffer
/// </summary>
MSCABD_PARAM_DECOMPBUF = 2,
/// <summary>
/// Salvage data from bad cabinets?
/// If enabled, open() will skip file with bad folder indices or filenames
/// rather than reject the whole cabinet, and extract() will limit rather than
/// reject files with invalid offsets and lengths, and bad data block checksums
/// will be ignored. Available only in CAB decoder version 2 and above.
/// </summary>
MSCABD_PARAM_SALVAGE = 3,
}
/// <summary>
/// mschm_compressor::set_param() parameter
/// </summary>
public enum MSCHMC_PARAM : int
{
/// <summary>
/// "timestamp" header
/// </summary>
MSCHMC_PARAM_TIMESTAMP = 0,
/// <summary>
/// "language" header
/// </summary>
MSCHMC_PARAM_LANGUAGE = 1,
/// <summary>
/// LZX window size
/// </summary>
MSCHMC_PARAM_LZXWINDOW = 2,
/// <summary>
/// Intra-chunk quickref density
/// </summary>
MSCHMC_PARAM_DENSITY = 3,
/// <summary>
/// Whether to create indices
/// </summary>
MSCHMC_PARAM_INDEX = 4,
}
/// <summary>
/// msszdd_compressor::set_param() parameter
/// </summary>
public enum MSSZDDC_PARAM : int
{
/// <summary>
/// The missing character
/// </summary>
MSSZDDC_PARAM_MISSINGCHAR = 0,
}
/// <summary>
/// mskwaj_compressor::set_param() parameter
/// </summary>
public enum MSKWAJC_PARAM : int
{
/// <summary>
/// Compression type
/// </summary>
MSKWAJC_PARAM_COMP_TYPE = 0,
/// <summary>
/// Include the length of the uncompressed file in the header?
/// </summary>
MSKWAJC_PARAM_INCLUDE_LENGTH = 1,
}
/// <summary>
/// msoab_decompressor::set_param() parameter
/// </summary>
public enum MSOABD_PARAM : int
{
/// <summary>
/// Size of decompression buffer
/// </summary>
MSOABD_PARAM_DECOMPBUF = 0,
}
#endregion
#endregion
#region lzss.h
public enum LZSS_MODE : int
{
LZSS_MODE_EXPAND = 0,
LZSS_MODE_MSHELP = 1,
LZSS_MODE_QBASIC = 2,
}
#endregion
}

View File

@@ -1,86 +0,0 @@
using System;
using System.Runtime.InteropServices;
namespace SabreTools.Compression.libmspack
{
public unsafe class FixedArray<T> where T : struct
{
/// <summary>
/// Direct access to the internal pointer
/// </summary>
public IntPtr Pointer { get; private set; }
/// <summary>
/// Size of the T object
/// </summary>
private int sizeofT { get { return Marshal.SizeOf(typeof(T)); } }
/// <summary>
/// Length of the fixed array
/// </summary>
private int _length;
public T this[int i]
{
get
{
if (i < 0 || i >= _length)
return default;
return (T)Marshal.PtrToStructure(Pointer + i * sizeofT, typeof(T));
}
set
{
if (i < 0 || i >= _length)
return;
Marshal.StructureToPtr(value, Pointer + i * sizeofT, false);
}
}
public FixedArray(int length)
{
Pointer = Marshal.AllocHGlobal(sizeofT * length);
_length = 0;
}
~FixedArray()
{
Marshal.FreeHGlobal(Pointer);
}
public static implicit operator T*(FixedArray<T> arr) => (T*)arr.Pointer;
public static implicit operator T[](FixedArray<T> arr) => arr.ToArray();
/// <inheritdoc cref="System.Linq.Enumerable.SequenceEqual{TSource}(System.Collections.Generic.IEnumerable{TSource}, System.Collections.Generic.IEnumerable{TSource})"/>
public bool SequenceEqual(T[] arr)
{
if (arr.Length < _length)
return false;
for (int i = 0; i < _length; i++)
{
if (!this[i].Equals(arr[i]))
return false;
}
return true;
}
/// <summary>
/// Convert the unmanaged data to an array
/// </summary>
/// <returns>Array created from the pointer data</returns>
public T[] ToArray()
{
T[] arr = new T[_length];
for (int i = 0; i < _length; i++)
{
arr[i] = this[i];
}
return arr;
}
}
}

View File

@@ -1,17 +0,0 @@
namespace SabreTools.Compression.libmspack.HLP
{
/// <summary>
/// TODO
/// </summary>
public class Compressor : BaseCompressor
{
/// <summary>
/// Creates a new HLP compressor
/// </summary>
public Compressor()
{
this.system = new mspack_default_system();
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
}
}

View File

@@ -1,17 +0,0 @@
namespace SabreTools.Compression.libmspack.HLP
{
/// <summary>
/// TODO
/// </summary>
public class Decompressor : BaseDecompressor
{
/// <summary>
/// Creates a new HLP decompressor
/// </summary>
public Decompressor()
{
this.system = new mspack_default_system();
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
}
}

View File

@@ -1,111 +0,0 @@
namespace SabreTools.Compression.libmspack.KWAJ
{
/// <summary>
/// A compressor for the KWAJ file format.
///
/// All fields are READ ONLY.
/// </summary>
public unsafe class Compressor : BaseCompressor
{
public int[] param { get; private set; } = new int[2];
/// <summary>
/// Creates a new KWAJ compressor
/// </summary>
public Compressor()
{
this.system = new mspack_default_system();
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
/// <summary>
/// Reads an input file and creates a compressed output file in the
/// KWAJ compressed file format. The KWAJ compression format is quick
/// but gives poor compression. It is possible for the compressed output
/// file to be larger than the input file.
/// </summary>
/// <param name="input">
/// The name of the file to compressed. This is passed
/// passed directly to mspack_system::open()
/// </param>
/// <param name="output">
/// The name of the file to write compressed data to.
/// This is passed directly to mspack_system::open().
/// </param>
/// <param name="length">
/// The length of the uncompressed file, or -1 to indicate
/// that this should be determined automatically by using
/// mspack_system::seek() on the input file.
/// </param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
/// <see cref="set_param(int, int)" />
public MSPACK_ERR compress(in string input, in string output, long length) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Sets an KWAJ compression engine parameter.
///
/// The following parameters are defined:
///
/// - #MSKWAJC_PARAM_COMP_TYPE: the compression method to use. Must
/// be one of #MSKWAJC_COMP_NONE, #MSKWAJC_COMP_XOR, #MSKWAJ_COMP_SZDD
/// or #MSKWAJ_COMP_LZH. The default is #MSKWAJ_COMP_LZH.
///
/// - #MSKWAJC_PARAM_INCLUDE_LENGTH: a boolean; should the compressed
/// output file should include the uncompressed length of the input
/// file in the header? This adds 4 bytes to the size of the output
/// file. A value of zero says "no", non-zero says "yes". The default
/// is "no".
/// </summary>
/// <param name="param">The parameter to set</param>
/// <param name="value">The value to set the parameter to</param>
/// <returns>
/// MSPACK_ERR_OK if all is OK, or MSPACK_ERR_ARGS if there
/// is a problem with either parameter or value.
/// </returns>
/// <see cref="generate()"/>
public MSPACK_ERR set_param(MSKWAJC_PARAM param, int value) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Sets the original filename of the file before compression,
/// which will be stored in the header of the output file.
///
/// The filename should be a null-terminated string, it must be an
/// MS-DOS "8.3" type filename (up to 8 bytes for the filename, then
/// optionally a "." and up to 3 bytes for a filename extension).
///
/// If null is passed as the filename, no filename is included in the
/// header. This is the default.
/// </summary>
/// <param name="filename">The original filename to use</param>
/// <returns>
/// MSPACK_ERR_OK if all is OK, or MSPACK_ERR_ARGS if the
/// filename is too long
/// </returns>
public MSPACK_ERR set_filename(in string filename) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Sets arbitrary data that will be stored in the header of the
/// output file, uncompressed. It can be up to roughly 64 kilobytes,
/// as the overall size of the header must not exceed 65535 bytes.
/// The data can contain null bytes if desired.
///
/// If null is passed as the data pointer, or zero is passed as the
/// length, no extra data is included in the header. This is the
/// default.
/// </summary>
/// <param name="data">A pointer to the data to be stored in the header</param>
/// <param name="bytes">the length of the data in bytes</param>
/// <returns>
/// MSPACK_ERR_OK if all is OK, or MSPACK_ERR_ARGS extra data
/// is too long
/// </returns>
public MSPACK_ERR set_extra_data(void* data, int bytes) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Returns the error code set by the most recently called method.
/// </summary>
/// <returns>The most recent error code</returns>
/// <see cref="compress(in string, in string, long)"/>
public MSPACK_ERR last_error() => MSPACK_ERR.MSPACK_ERR_OK;
}
}

View File

@@ -1,37 +0,0 @@
namespace SabreTools.Compression.libmspack.KWAJ
{
public static class Constants
{
public const byte kwajh_Signature1 = 0x00;
public const byte kwajh_Signature2 = 0x04;
public const byte kwajh_CompMethod = 0x08;
public const byte kwajh_DataOffset = 0x0a;
public const byte kwajh_Flags = 0x0c;
public const byte kwajh_SIZEOF = 0x0e;
/// <summary>
/// Input buffer size during decompression - not worth parameterising IMHO
/// </summary>
public const int KWAJ_INPUT_SIZE = 2048;
/// <summary>
/// Huffman codes that are 9 bits or less are decoded immediately
/// </summary>
public const int KWAJ_TABLEBITS = 9;
// Number of codes in each huffman table
public const int KWAJ_MATCHLEN1_SYMS = 16;
public const int KWAJ_MATCHLEN2_SYMS = 16;
public const int KWAJ_LITLEN_SYMS = 32;
public const int KWAJ_OFFSET_SYMS = 64;
public const int KWAJ_LITERAL_SYMS = 256;
// Define decoding table sizes
public const int KWAJ_TABLESIZE = 1 << KWAJ_TABLEBITS;
public const int KWAJ_MATCHLEN1_TBLSIZE = KWAJ_TABLESIZE + (KWAJ_MATCHLEN1_SYMS * 2);
public const int KWAJ_MATCHLEN2_TBLSIZE = KWAJ_TABLESIZE + (KWAJ_MATCHLEN2_SYMS * 2);
public const int KWAJ_LITLEN_TBLSIZE = KWAJ_TABLESIZE + (KWAJ_LITLEN_SYMS * 2);
public const int KWAJ_OFFSET_TBLSIZE = KWAJ_TABLESIZE + (KWAJ_OFFSET_SYMS * 2);
public const int KWAJ_LITERAL_TBLSIZE = KWAJ_TABLESIZE + (KWAJ_LITERAL_SYMS * 2);
}
}

View File

@@ -1,375 +0,0 @@
using System;
using static SabreTools.Compression.libmspack.KWAJ.Constants;
using static SabreTools.Compression.libmspack.macros;
namespace SabreTools.Compression.libmspack.KWAJ
{
/// <summary>
/// A decompressor for KWAJ compressed files.
///
/// All fields are READ ONLY.
/// </summary>
public unsafe class Decompressor : BaseDecompressor
{
/// <summary>
/// Creates a new KWAJ decompressor.
/// </summary>
public Decompressor()
{
this.system = new mspack_default_system();
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
/// <summary>
/// Destroys an existing KWAJ decompressor
/// </summary>
~Decompressor()
{
mspack_system sys = this.system;
//sys.free(this);
}
/// <summary>
/// Opens a KWAJ file and reads the header.
///
/// If the file opened is a valid KWAJ file, all headers will be read and
/// a mskwajd_header structure will be returned.
///
/// In the case of an error occuring, null is returned and the error code
/// is available from last_error().
///
/// The filename pointer should be considered "in use" until close() is
/// called on the KWAJ file.
/// </summary>
/// <param name="filename">
/// The filename of the KWAJ compressed file. This is
/// passed directly to mspack_system::open().
/// </param>
/// <returns>A pointer to a mskwajd_header structure, or null on failure</returns>
/// <see cref="Close(mskwajd_header)"/>
public mskwajd_header Open(in string filename)
{
mspack_system sys = this.system;
mspack_file fh = sys.open(filename, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_READ);
if (fh == null)
{
this.error = MSPACK_ERR.MSPACK_ERR_OPEN;
return null;
}
mskwajd_header hdr = new mskwajd_header();
hdr.fh = fh;
MSPACK_ERR err;
if ((err = ReadHeaders(sys, fh, hdr)) != MSPACK_ERR.MSPACK_ERR_OK)
{
Close(hdr);
this.error = err;
return null;
}
return hdr;
}
/// <summary>
/// Closes a previously opened KWAJ file.
///
/// This closes a KWAJ file and frees the mskwajd_header associated
/// with it. The KWAJ header pointer is now invalid and cannot be
/// used again.
/// </summary>
/// <param name="kwaj">The KWAJ file to close</param>
/// <see cref="Open(in string)"/>
public void Close(mskwajd_header kwaj)
{
if (this.system == null)
return;
// Close the file handle associated
this.system.close(kwaj.fh);
// Free the memory associated
//this.system.free(hdr.filename);
//this.system.free(hdr.extra);
//this.system.free(hdr);
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
/// <summary>
/// Reads the headers of a KWAJ format file
/// </summary>
private MSPACK_ERR ReadHeaders(mspack_system sys, mspack_file fh, mskwajd_header hdr)
{
FixedArray<byte> buf = new FixedArray<byte>(16);
int i;
// Read in the header
if (sys.read(fh, buf, kwajh_SIZEOF) != kwajh_SIZEOF)
{
return MSPACK_ERR.MSPACK_ERR_READ;
}
// Check for "KWAJ" signature
if ((BitConverter.ToUInt32(buf, kwajh_Signature1) != 0x4A41574B) ||
(BitConverter.ToUInt32(buf, kwajh_Signature2) != 0xD127F088))
{
return MSPACK_ERR.MSPACK_ERR_SIGNATURE;
}
// Basic header fields
hdr.comp_type = (MSKWAJ_COMP)BitConverter.ToUInt16(buf, kwajh_CompMethod);
hdr.data_offset = BitConverter.ToUInt16(buf, kwajh_DataOffset);
hdr.headers = (MSKWAJ_HDR)BitConverter.ToUInt16(buf, kwajh_Flags);
hdr.length = 0;
hdr.filename = null;
hdr.extra = null;
hdr.extra_length = 0;
// Optional headers
// 4 bytes: length of unpacked file
if (hdr.headers.HasFlag(MSKWAJ_HDR.MSKWAJ_HDR_HASLENGTH))
{
if (sys.read(fh, buf, 4) != 4)
return MSPACK_ERR.MSPACK_ERR_READ;
hdr.length = BitConverter.ToUInt32(buf, 0);
}
// 2 bytes: unknown purpose
if (hdr.headers.HasFlag(MSKWAJ_HDR.MSKWAJ_HDR_HASUNKNOWN1))
{
if (sys.read(fh, buf, 2) != 2)
return MSPACK_ERR.MSPACK_ERR_READ;
}
// 2 bytes: length of section, then [length] bytes: unknown purpose
if (hdr.headers.HasFlag(MSKWAJ_HDR.MSKWAJ_HDR_HASUNKNOWN2))
{
if (sys.read(fh, buf, 2) != 2)
return MSPACK_ERR.MSPACK_ERR_READ;
i = BitConverter.ToUInt16(buf, 0);
if (sys.seek(fh, i, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_CUR) != 0)
return MSPACK_ERR.MSPACK_ERR_SEEK;
}
// Filename and extension
if (hdr.headers.HasFlag(MSKWAJ_HDR.MSKWAJ_HDR_HASFILENAME | MSKWAJ_HDR.MSKWAJ_HDR_HASFILEEXT))
{
int len;
// Allocate memory for maximum length filename
char* fn = (char*)sys.alloc(13);
if ((hdr.extra = fn) == null)
return MSPACK_ERR.MSPACK_ERR_NOMEMORY;
// Copy filename if present
if (hdr.headers.HasFlag(MSKWAJ_HDR.MSKWAJ_HDR_HASFILENAME))
{
// Read and copy up to 9 bytes of a null terminated string
if ((len = sys.read(fh, buf, 9)) < 2)
return MSPACK_ERR.MSPACK_ERR_READ;
for (i = 0; i < len; i++)
if ((*fn++ = (char)buf[i]) == '\0')
break;
// If string was 9 bytes with no null terminator, reject it
if (i == 9 && buf[8] != '\0')
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
// Seek to byte after string ended in file
if (sys.seek(fh, i + 1 - len, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_CUR) != 0)
return MSPACK_ERR.MSPACK_ERR_SEEK;
fn--; // Remove the null terminator
}
// Copy extension if present
if (hdr.headers.HasFlag(MSKWAJ_HDR.MSKWAJ_HDR_HASFILEEXT))
{
*fn++ = '.';
// Read and copy up to 4 bytes of a null terminated string
if ((len = sys.read(fh, buf, 4)) < 2)
return MSPACK_ERR.MSPACK_ERR_READ;
for (i = 0; i < len; i++)
if ((*fn++ = (char)buf[i]) == '\0')
break;
// If string was 4 bytes with no null terminator, reject it
if (i == 4 && buf[3] != '\0')
return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
// Seek to byte after string ended in file
if (sys.seek(fh, i + 1 - len, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_CUR) != 0)
return MSPACK_ERR.MSPACK_ERR_SEEK;
fn--; // Remove the null terminator
}
*fn = '\0';
}
// 2 bytes: extra text length then [length] bytes of extra text data
if (hdr.headers.HasFlag(MSKWAJ_HDR.MSKWAJ_HDR_HASEXTRATEXT))
{
if (sys.read(fh, buf, 2) != 2)
return MSPACK_ERR.MSPACK_ERR_READ;
i = EndGetI16(buf, 0);
hdr.extra = (char*)sys.alloc(i + 1);
if (hdr.extra == null)
return MSPACK_ERR.MSPACK_ERR_NOMEMORY;
if (sys.read(fh, hdr.extra, i) != i)
return MSPACK_ERR.MSPACK_ERR_READ;
hdr.extra[i] = '\0';
hdr.extra_length = (ushort)i;
}
return MSPACK_ERR.MSPACK_ERR_OK;
}
/// <summary>
/// Extracts the compressed data from a KWAJ file.
///
/// This decompresses the compressed KWAJ data stream and writes it to
/// an output file.
/// </summary>
/// <param name="kwaj">The KWAJ file to extract data from</param>
/// <param name="filename">
/// The filename to write the decompressed data to. This
/// is passed directly to mspack_system::open().
/// </param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
public MSPACK_ERR Extract(mskwajd_header kwaj, in string filename)
{
if (kwaj == null)
return this.error = MSPACK_ERR.MSPACK_ERR_ARGS;
mspack_system sys = this.system;
mspack_file fh = kwaj.fh;
// Seek to the compressed data
if (sys.seek(fh, kwaj.data_offset, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_START) != 0)
{
return this.error = MSPACK_ERR.MSPACK_ERR_SEEK;
}
// Open file for output
mspack_file outfh;
if ((outfh = sys.open(filename, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_WRITE)) == null)
{
return this.error = MSPACK_ERR.MSPACK_ERR_OPEN;
}
this.error = MSPACK_ERR.MSPACK_ERR_OK;
// Decompress based on format
if (kwaj.comp_type == MSKWAJ_COMP.MSKWAJ_COMP_NONE || kwaj.comp_type == MSKWAJ_COMP.MSKWAJ_COMP_XOR)
{
// NONE is a straight copy. XOR is a copy xored with 0xFF
byte* buf = (byte*)sys.alloc(KWAJ_INPUT_SIZE);
if (buf != null)
{
int read, i;
while ((read = sys.read(fh, buf, KWAJ_INPUT_SIZE)) > 0)
{
if (kwaj.comp_type == MSKWAJ_COMP.MSKWAJ_COMP_XOR)
{
for (i = 0; i < read; i++)
buf[i] ^= 0xFF;
}
if (sys.write(outfh, buf, read) != read)
{
this.error = MSPACK_ERR.MSPACK_ERR_WRITE;
break;
}
}
if (read < 0)
this.error = MSPACK_ERR.MSPACK_ERR_READ;
sys.free(buf);
}
else
{
this.error = MSPACK_ERR.MSPACK_ERR_NOMEMORY;
}
}
else if (kwaj.comp_type == MSKWAJ_COMP.MSKWAJ_COMP_SZDD)
{
this.error = lzss_decompress(sys, fh, outfh, KWAJ_INPUT_SIZE, LZSS_MODE.LZSS_MODE_QBASIC);
}
else if (kwaj.comp_type == MSKWAJ_COMP.MSKWAJ_COMP_LZH)
{
kwajd_stream lzh = lzh_init(sys, fh, outfh);
this.error = (lzh != null) ? lzh_decompress(lzh) : MSPACK_ERR.MSPACK_ERR_NOMEMORY;
lzh_free(lzh);
}
else if (kwaj.comp_type == MSKWAJ_COMP.MSKWAJ_COMP_MSZIP)
{
mszipd_stream zip = mszipd_init(sys, fh, outfh, KWAJ_INPUT_SIZE, 0);
this.error = (zip != null) ? mszipd_decompress_kwaj(zip) : MSPACK_ERR.MSPACK_ERR_NOMEMORY;
mszipd_free(zip);
}
else
{
this.error = MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
}
// Close output file
sys.close(outfh);
return this.error;
}
/// <summary>
/// Decompresses an KWAJ file to an output file in one step.
///
/// This opens an KWAJ file as input, reads the header, then decompresses
/// the compressed data immediately to an output file, finally closing
/// both the input and output file. It is more convenient to use than
/// open() then extract() then close(), if you do not need to know the
/// KWAJ output size or output filename.
/// </summary>
/// <param name="input">
/// The filename of the input KWAJ file. This is passed
/// directly to mspack_system::open().
/// </param>
/// <param name="output">
/// The filename to write the decompressed data to. This
/// is passed directly to mspack_system::open().
/// </param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
public MSPACK_ERR Decompress(in string input, in string output)
{
mskwajd_header hdr;
if ((hdr = Open(input)) == null)
return this.error;
MSPACK_ERR error = Extract(hdr, output);
Close(hdr);
return this.error = error;
}
/// <summary>
/// Returns the error code set by the most recently called method.
///
/// This is useful for open() which does not return an
/// error code directly.
/// </summary>
/// <returns>The most recent error code</returns>
/// <see cref="Open(in string)"/>
/// <see cref="search()"/>
public MSPACK_ERR LastError()
{
return this.error;
}
}
}

View File

@@ -1,60 +0,0 @@
using static SabreTools.Compression.libmspack.KWAJ.Constants;
using static SabreTools.Compression.libmspack.lzss;
namespace SabreTools.Compression.libmspack
{
public unsafe class kwajd_stream : readbits
{
#region Huffman code lengths
public byte[] MATCHLEN1_len { get; set; } = new byte[KWAJ_MATCHLEN1_SYMS];
public byte[] MATCHLEN2_len { get; set; } = new byte[KWAJ_MATCHLEN2_SYMS];
public byte[] LITLEN_len { get; set; } = new byte[KWAJ_LITLEN_SYMS];
public byte[] OFFSET_len { get; set; } = new byte[KWAJ_OFFSET_SYMS];
public byte[] LITERAL_len { get; set; } = new byte[KWAJ_LITERAL_SYMS];
#endregion
#region Huffman decoding tables
public ushort[] MATCHLEN1_table { get; set; } = new ushort[KWAJ_MATCHLEN1_TBLSIZE];
public ushort[] MATCHLEN2_table { get; set; } = new ushort[KWAJ_MATCHLEN2_TBLSIZE];
public ushort[] LITLEN_table { get; set; } = new ushort[KWAJ_LITLEN_TBLSIZE];
public ushort[] OFFSET_table { get; set; } = new ushort[KWAJ_OFFSET_TBLSIZE];
public ushort[] LITERAL_table { get; set; } = new ushort[KWAJ_LITERAL_TBLSIZE];
#endregion
#region Input buffer
public new byte[] inbuf { get; set; } = new byte[KWAJ_INPUT_SIZE];
#endregion
#region History window
public byte[] window { get; set; } = new byte[LZSS_WINDOW_SIZE];
#endregion
public override void READ_BYTES()
{
if (i_ptr >= i_end)
{
if ((err = lzh_read_input(lzh)))
return err;
i_ptr = lzh.i_ptr;
i_end = lzh.i_end;
}
INJECT_BITS_MSB(*i_ptr++, 8);
}
}
}

View File

@@ -1,48 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents an KWAJ compressed file.
///
/// All fields are READ ONLY.
/// </summary>
public unsafe class mskwajd_header
{
/// <summary>
/// The compression type
/// </summary>
public MSKWAJ_COMP comp_type { get; set; }
/// <summary>
/// The offset in the file where the compressed data stream begins
/// </summary>
public long data_offset { get; set; }
/// <summary>
/// Flags indicating which optional headers were included.
/// </summary>
public MSKWAJ_HDR headers { get; set; }
/// <summary>
/// The amount of uncompressed data in the file, or 0 if not present.
/// </summary>
public long length { get; set; }
/// <summary>
/// Output filename, or null if not present
/// </summary>
public char* filename { get; set; }
/// <summary>
/// Extra uncompressed data (usually text) in the header.
/// This data can contain nulls so use extra_length to get the size.
/// </summary>
public char* extra { get; set; }
/// <summary>
/// Length of extra uncompressed data in the header
/// </summary>
public ushort extra_length { get; set; }
public mspack_file fh { get; set; }
}
}

View File

@@ -1,17 +0,0 @@
namespace SabreTools.Compression.libmspack.LIT
{
/// <summary>
/// TODO
/// </summary>
public class Compressor : BaseCompressor
{
/// <summary>
/// Creates a new LIT compressor
/// </summary>
public Compressor()
{
this.system = new mspack_default_system();
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
}
}

View File

@@ -1,17 +0,0 @@
namespace SabreTools.Compression.libmspack.LIT
{
/// <summary>
/// TODO
/// </summary>
public class Decompressor : BaseDecompressor
{
/// <summary>
/// Creates a new LIT decompressor
/// </summary>
public Decompressor()
{
this.system = new mspack_default_system();
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
}
}

View File

@@ -1,39 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public static class lzss
{
public const int LZSS_WINDOW_SIZE = 4096;
public const byte LZSS_WINDOW_FILL = 0x20;
/// <summary>
/// Decompresses an LZSS stream.
///
/// Input bytes will be read in as necessary using the system->read()
/// function with the input file handle given. This will continue until
/// system->read() returns 0 bytes, or an error. Errors will be passed
/// out of the function as MSPACK_ERR_READ errors. Input streams should
/// convey an "end of input stream" by refusing to supply all the bytes
/// that LZSS asks for when they reach the end of the stream, rather
/// than return an error code.
///
/// Output bytes will be passed to the system->write() function, using
/// the output file handle given. More than one call may be made to
/// system->write().
///
/// As EXPAND.EXE (SZDD/KWAJ), Microsoft Help and QBasic have slightly
/// different encodings for the control byte and matches, a "mode"
/// parameter is allowed, to choose the encoding.
/// </summary>
/// <param name="system">
/// An mspack_system structure used to read from
/// the input stream and write to the output
/// stream, also to allocate and free memory.
/// </param>
/// <param name="input">An input stream with the LZSS data.</param>
/// <param name="output">An output stream to write the decoded data to.</param>
/// <param name="input_buffer_size">The number of bytes to use as an input bitstream buffer.</param>
/// <param name="mode">One of <see cref="LZSS_MODE"/> values</param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
public static MSPACK_ERR lzss_decompress(mspack_system system, mspack_file input, mspack_file output, int input_buffer_size, LZSS_MODE mode) => MSPACK_ERR.MSPACK_ERR_OK;
}
}

View File

@@ -1,147 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public static class lzx
{
// Some constants defined by the LZX specification
public const int LZX_MIN_MATCH = 2;
public const int LZX_MAX_MATCH = 257;
public const int LZX_NUM_CHARS = 256;
public const int LZX_BLOCKTYPE_INVALID = 0; /* also blocktypes 4-7 invalid */
public const int LZX_BLOCKTYPE_VERBATIM = 1;
public const int LZX_BLOCKTYPE_ALIGNED = 2;
public const int LZX_BLOCKTYPE_UNCOMPRESSED = 3;
public const int LZX_PRETREE_NUM_ELEMENTS = 20;
public const int LZX_ALIGNED_NUM_ELEMENTS = 8; /* aligned offset tree #elements */
public const int LZX_NUM_PRIMARY_LENGTHS = 7; /* this one missing from spec! */
public const int LZX_NUM_SECONDARY_LENGTHS = 249; /* length tree #elements */
// LZX huffman defines: tweak tablebits as desired
public const int LZX_PRETREE_MAXSYMBOLS = LZX_PRETREE_NUM_ELEMENTS;
public const int LZX_PRETREE_TABLEBITS = 6;
public const int LZX_MAINTREE_MAXSYMBOLS = LZX_NUM_CHARS + 290 * 8;
public const int LZX_MAINTREE_TABLEBITS = 12;
public const int LZX_LENGTH_MAXSYMBOLS = LZX_NUM_SECONDARY_LENGTHS + 1;
public const int LZX_LENGTH_TABLEBITS = 12;
public const int LZX_ALIGNED_MAXSYMBOLS = LZX_ALIGNED_NUM_ELEMENTS;
public const int LZX_ALIGNED_TABLEBITS = 7;
public const int LZX_LENTABLE_SAFETY = 64; /* table decoding overruns are allowed */
public const int LZX_FRAME_SIZE = 32768; /* the size of a frame in LZX */
/// <summary>
/// Allocates and initialises LZX decompression state for decoding an LZX
/// stream.
///
/// This routine uses system->alloc() to allocate memory. If memory
/// allocation fails, or the parameters to this function are invalid,
/// null is returned.
/// </summary>
/// <param name="system">
/// An mspack_system structure used to read from
/// the input stream and write to the output
/// stream, also to allocate and free memory.
/// </param>
/// <param name="input">An input stream with the LZX data.</param>
/// <param name="output">An output stream to write the decoded data to.</param>
/// <param name="window_bits">
/// The size of the decoding window, which must be
/// between 15 and 21 inclusive for regular LZX
/// data, or between 17 and 25 inclusive for
/// LZX DELTA data.
/// </param>
/// <param name="reset_interval">
/// The interval at which the LZX bitstream is
/// reset, in multiples of LZX frames (32678
/// bytes), e.g. a value of 2 indicates the input
/// stream resets after every 65536 output bytes.
/// A value of 0 indicates that the bitstream never
/// resets, such as in CAB LZX streams.
/// </param>
/// <param name="input_buffer_size">The number of bytes to use as an input bitstream buffer.</param>
/// <param name="output_length">
/// The length in bytes of the entirely
/// decompressed output stream, if known in
/// advance. It is used to correctly perform the
/// Intel E8 transformation, which must stop 6
/// bytes before the very end of the
/// decompressed stream. It is not otherwise used
/// or adhered to. If the full decompressed
/// length is known in advance, set it here.
/// If it is NOT known, use the value 0, and call
/// lzxd_set_output_length() once it is
/// known. If never set, 4 of the final 6 bytes
/// of the output stream may be incorrect.
/// </param>
/// <param name="is_delta">
/// Should be zero for all regular LZX data,
/// non-zero for LZX DELTA encoded data.
/// </param>
/// <returns>
/// A pointer to an initialised lzxd_stream structure, or null if
/// there was not enough memory or parameters to the function were wrong.
/// </returns>
public static lzxd_stream lzxd_init(mspack_system system, mspack_file input, mspack_file output, int window_bits, int reset_interval, int input_buffer_size, long output_length, char is_delta) => null;
/// <summary>
/// See description of output_length in lzxd_init()
/// </summary>
public static void lzxd_set_output_length(lzxd_stream lzx, long output_length) { }
/// <summary>
/// Reads LZX DELTA reference data into the window and allows
/// lzxd_decompress() to reference it.
///
/// Call this before the first call to lzxd_decompress().
/// </summary>
/// <param name="lzx">The LZX stream to apply this reference data to</param>
/// <param name="system">
/// An mspack_system implementation to use with the
/// input param. Only read() will be called.
/// </param>
/// <param name="input">
/// An input file handle to read reference data using
/// system->read().
/// </param>
/// <param name="length">
/// The length of the reference data. Cannot be longer
/// than the LZX window size.
/// </param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
public static MSPACK_ERR lzxd_set_reference_data(lzxd_stream lzx, mspack_system system, mspack_file input, uint length) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Decompresses entire or partial LZX streams.
///
/// The number of bytes of data that should be decompressed is given as the
/// out_bytes parameter. If more bytes are decoded than are needed, they
/// will be kept over for a later invocation.
///
/// The output bytes will be passed to the system->write() function given in
/// lzxd_init(), using the output file handle given in lzxd_init(). More than
/// one call may be made to system->write().
///
/// Input bytes will be read in as necessary using the system->read()
/// function given in lzxd_init(), using the input file handle given in
/// lzxd_init(). This will continue until system->read() returns 0 bytes,
/// or an error. Errors will be passed out of the function as
/// MSPACK_ERR_READ errors. Input streams should convey an "end of input
/// stream" by refusing to supply all the bytes that LZX asks for when they
/// reach the end of the stream, rather than return an error code.
///
/// If any error code other than MSPACK_ERR_OK is returned, the stream
/// should be considered unusable and lzxd_decompress() should not be
/// called again on this stream.
/// </summary>
/// <param name="lzx">LZX decompression state, as allocated by lzxd_init().</param>
/// <param name="out_bytes">The number of bytes of data to decompress.</param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
public static MSPACK_ERR lzxd_decompress(lzxd_stream lzx, long out_bytes) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Frees all state associated with an LZX data stream. This will call
/// system->free() using the system pointer given in lzxd_init().
/// </summary>
/// <param name="lzx">LZX decompression state to free.</param>
public static void lzxd_free(lzxd_stream lzx) { }
}
}

View File

@@ -1,148 +0,0 @@
using static SabreTools.Compression.libmspack.lzx;
namespace SabreTools.Compression.libmspack
{
public unsafe class lzxd_stream : readbits
{
/// <summary>
/// Number of bytes actually output
/// </summary>
public long offset { get; set; }
/// <summary>
/// Overall decompressed length of stream
/// </summary>
public long length { get; set; }
/// <summary>
/// Decoding window
/// </summary>
public byte* window { get; set; }
/// <summary>
/// Window size
/// </summary>
public uint window_size { get; set; }
/// <summary>
/// LZX DELTA reference data size
/// </summary>
public uint ref_data_size { get; set; }
/// <summary>
/// Number of match_offset entries in table
/// </summary>
public uint num_offsets { get; set; }
/// <summary>
/// Current frame offset within in window
/// </summary>
public uint frame_posn { get; set; }
/// <summary>
/// The number of 32kb frames processed
/// </summary>
public uint frame { get; set; }
/// <summary>
/// Which frame do we reset the compressor?
/// </summary>
public uint reset_interval { get; set; }
/// <summary>
/// For the LRU offset system
/// </summary>
public uint R0 { get; set; }
/// <summary>
/// For the LRU offset system
/// </summary>
public uint R1 { get; set; }
/// <summary>
/// For the LRU offset system
/// </summary>
public uint R2 { get; set; }
/// <summary>
/// Uncompressed length of this LZX block
/// </summary>
public uint block_length { get; set; }
/// <summary>
/// Uncompressed bytes still left to decode
/// </summary>
public uint block_remaining { get; set; }
/// <summary>
/// Magic header value used for transform
/// </summary>
public int intel_filesize { get; set; }
/// <summary>
/// Has intel E8 decoding started?
/// </summary>
public byte intel_started { get; set; }
/// <summary>
/// Type of the current block
/// </summary>
public byte block_type { get; set; }
/// <summary>
/// Have we started decoding at all yet?
/// </summary>
public byte header_read { get; set; }
/// <summary>
/// Have we reached the end of input?
/// </summary>
public new byte input_end { get; set; }
/// <summary>
/// Does stream follow LZX DELTA spec?
/// </summary>
public byte is_delta { get; set; }
#region Huffman code lengths
public byte[] PRETREE_len { get; set; } = new byte[LZX_PRETREE_MAXSYMBOLS + LZX_LENTABLE_SAFETY];
public byte[] MAINTREE_len { get; set; } = new byte[LZX_MAINTREE_MAXSYMBOLS + LZX_LENTABLE_SAFETY];
public byte[] LENGTH_len { get; set; } = new byte[LZX_LENGTH_MAXSYMBOLS + LZX_LENTABLE_SAFETY];
public byte[] ALIGNED_len { get; set; } = new byte[LZX_ALIGNED_MAXSYMBOLS + LZX_LENTABLE_SAFETY];
#endregion
#region Huffman decoding tables
public ushort[] PRETREE_table { get; set; } = new ushort[(1 << LZX_PRETREE_TABLEBITS) + (LZX_PRETREE_MAXSYMBOLS * 2)];
public ushort[] MAINTREE_table { get; set; } = new ushort[(1 << LZX_MAINTREE_TABLEBITS) + (LZX_MAINTREE_MAXSYMBOLS * 2)];
public ushort[] LENGTH_table { get; set; } = new ushort[(1 << LZX_LENGTH_TABLEBITS) + (LZX_LENGTH_MAXSYMBOLS * 2)];
public ushort[] ALIGNED_table { get; set; } = new ushort[(1 << LZX_ALIGNED_TABLEBITS) + (LZX_ALIGNED_MAXSYMBOLS * 2)];
public byte LENGTH_empty { get; set; }
#endregion
/// <summary>
/// This is used purely for doing the intel E8 transform
/// </summary>
public byte[] e8_buf { get; set; } = new byte[LZX_FRAME_SIZE];
public override void READ_BYTES()
{
byte b0, b1;
READ_IF_NEEDED(ref i_ptr, ref i_end);
b0 = *i_ptr++;
READ_IF_NEEDED(ref i_ptr, ref i_end);
b1 = *i_ptr++;
INJECT_BITS_MSB((b1 << 8) | b0, 16);
}
}
}

View File

@@ -1,91 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public static class mszip
{
/// <summary>
/// Size of LZ history window
/// </summary>
public const int MSZIP_FRAME_SIZE = 32768;
/// <summary>
/// Literal/length huffman tree
/// </summary>
public const int MSZIP_LITERAL_MAXSYMBOLS = 288;
public const int MSZIP_LITERAL_TABLEBITS = 9;
/// <summary>
/// Distance huffman tree
/// </summary>
public const int MSZIP_DISTANCE_MAXSYMBOLS = 32;
public const int MSZIP_DISTANCE_TABLEBITS = 6;
// If there are less direct lookup entries than symbols, the longer
// code pointers will be <= maxsymbols. This must not happen, or we
// will decode entries badly
public const int MSZIP_LITERAL_TABLESIZE = MSZIP_LITERAL_MAXSYMBOLS * 4;
public const int MSZIP_DISTANCE_TABLESIZE = 1 << MSZIP_DISTANCE_TABLEBITS + (MSZIP_DISTANCE_MAXSYMBOLS * 2);
/// <summary>
/// Allocates MS-ZIP decompression stream for decoding the given stream.
///
/// - uses system->alloc() to allocate memory
///
/// - returns null if not enough memory
///
/// - input_buffer_size is how many bytes to use as an input bitstream buffer
///
/// - if repair_mode is non-zero, errors in decompression will be skipped
/// and 'holes' left will be filled with zero bytes. This allows at least
/// a partial recovery of erroneous data.
/// </summary>
/// <param name="system"></param>
/// <param name="input"></param>
/// <param name="output"></param>
/// <param name="input_buffer_size"></param>
/// <param name="repair_mode"></param>
/// <returns></returns>
public static mszipd_stream mszipd_init(mspack_system system, mspack_file input, mspack_file output, int input_buffer_size, int repair_mode) => null;
/// <summary>
/// Decompresses, or decompresses more of, an MS-ZIP stream.
///
/// - out_bytes of data will be decompressed and the function will return
/// with an MSPACK_ERR_OK return code.
///
/// - decompressing will stop as soon as out_bytes is reached. if the true
/// amount of bytes decoded spills over that amount, they will be kept for
/// a later invocation of mszipd_decompress().
///
/// - the output bytes will be passed to the system->write() function given in
/// mszipd_init(), using the output file handle given in mszipd_init(). More
/// than one call may be made to system->write()
///
/// - MS-ZIP will read input bytes as necessary using the system->read()
/// function given in mszipd_init(), using the input file handle given in
/// mszipd_init(). This will continue until system->read() returns 0 bytes,
/// or an error.
/// </summary>
/// <param name="zip"></param>
/// <param name="out_bytes"></param>
/// <returns></returns>
public static MSPACK_ERR mszipd_decompress(mszipd_stream zip, long out_bytes) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Decompresses an entire MS-ZIP stream in a KWAJ file. Acts very much
/// like mszipd_decompress(), but doesn't take an out_bytes parameter
/// </summary>
/// <param name="zip"></param>
/// <returns></returns>
public static MSPACK_ERR mszipd_decompress_kwaj(mszipd_stream zip) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Frees all stream associated with an MS-ZIP data stream
///
/// - calls system->free() using the system pointer given in mszipd_init()
/// </summary>
/// <param name="zip"></param>
public static void mszipd_free(mszipd_stream zip) { }
}
}

View File

@@ -1,46 +0,0 @@
using static SabreTools.Compression.libmspack.lzx;
using static SabreTools.Compression.libmspack.mszip;
namespace SabreTools.Compression.libmspack
{
public unsafe class mszipd_stream : readbits
{
/// <summary>
/// inflate() will call this whenever the window should be emptied.
/// </summary>
/// <param name="val"></param>
/// <returns></returns>
public int flush_window(uint val) => 0;
public int repair_mode { get; set; }
public int bytes_output { get; set; }
#region Huffman code lengths
public byte[] LITERAL_len { get; set; } = new byte[MSZIP_LITERAL_MAXSYMBOLS + LZX_LENTABLE_SAFETY];
public byte[] DISTANCE_len { get; set; } = new byte[MSZIP_DISTANCE_MAXSYMBOLS + LZX_LENTABLE_SAFETY];
#endregion
#region Huffman decoding tables
public ushort[] LITERAL_table { get; set; } = new ushort[(1 << MSZIP_LITERAL_TABLESIZE) + (LZX_PRETREE_MAXSYMBOLS * 2)];
public ushort[] DISTANCE_table { get; set; } = new ushort[(1 << MSZIP_DISTANCE_TABLESIZE) + (LZX_MAINTREE_MAXSYMBOLS * 2)];
#endregion
/// <summary>
/// 32kb history window
/// </summary>
public byte[] window { get; set; } = new byte[MSZIP_FRAME_SIZE];
public override void READ_BYTES()
{
READ_IF_NEEDED;
INJECT_BITS_LSB(*i_ptr++, 8);
}
}
}

View File

@@ -1,47 +0,0 @@
namespace SabreTools.Compression.libmspack.None
{
public unsafe class DecompressState : mscabd_decompress_state
{
public DecompressState()
{
this.comp_type = MSCAB_COMP.MSCAB_COMP_NONE;
this.state = null;
}
public DecompressState(mscabd_decompress_state oldstate)
{
this.comp_type = MSCAB_COMP.MSCAB_COMP_NONE;
this.state = null;
if (oldstate != null)
{
this.folder = oldstate.folder;
this.data = oldstate.data;
this.offset = oldstate.offset;
this.block = oldstate.block;
this.outlen = oldstate.outlen;
this.sys = oldstate.sys;
this.incab = oldstate.incab;
this.infh = oldstate.infh;
this.outfh = oldstate.outfh;
this.i_ptr = oldstate.i_ptr;
this.i_end = oldstate.i_end;
this.input = oldstate.input;
}
}
/// <inheritdoc/>
public override unsafe MSPACK_ERR decompress(object data, long bytes)
{
State s = data as State;
while (bytes > 0)
{
int run = (bytes > s.BufferSize) ? s.BufferSize : (int)bytes;
if (s.InternalSystem.read(s.Input, s.Buffer, run) != run) return MSPACK_ERR.MSPACK_ERR_READ;
if (s.InternalSystem.write(s.Output, s.Buffer, run) != run) return MSPACK_ERR.MSPACK_ERR_WRITE;
bytes -= run;
}
return MSPACK_ERR.MSPACK_ERR_OK;
}
}
}

View File

@@ -1,34 +0,0 @@
namespace SabreTools.Compression.libmspack.None
{
/// <summary>
/// The "not compressed" method decompressor
/// </summary>
public unsafe class State
{
public mspack_system InternalSystem { get; private set; }
public mspack_file Input { get; private set; }
public mspack_file Output { get; private set; }
public FixedArray<byte> Buffer { get; private set; }
public int BufferSize { get; private set; }
public State(mspack_system sys, mspack_file infh, mspack_file outfh, int bufsize)
{
this.InternalSystem = sys;
this.Input = infh;
this.Output = outfh;
this.Buffer = new FixedArray<byte>(bufsize);
this.BufferSize = bufsize;
}
~State()
{
mspack_system sys = this.InternalSystem;
sys.free(this.Buffer);
//sys.free(this);
}
}
}

View File

@@ -1,70 +0,0 @@
using System;
namespace SabreTools.Compression.libmspack.OAB
{
/// <summary>
/// A compressor for the Offline Address Book (OAB) format.
///
/// All fields are READ ONLY.
/// </summary>
public class Compressor : BaseCompressor
{
/// <summary>
/// Creates a new OAB compressor
/// </summary>
public Compressor()
{
throw new NotImplementedException();
}
/// <summary>
/// Destroys an existing OAB compressor
/// </summary>
~Compressor()
{
throw new NotImplementedException();
}
/// <summary>
/// Compress a full OAB file.
///
/// The input file will be read and the compressed contents written to the
/// output file.
/// </summary>
/// <param name="input">
/// The filename of the input file. This is passed
/// directly to mspack_system::open().
/// </param>
/// <param name="output">
/// The filename of the output file. This is passed
/// directly to mspack_system::open().
/// </param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
public MSPACK_ERR compress(in string input, in string output) => throw new NotImplementedException();
/// <summary>
/// Generate a compressed incremental OAB patch file.
///
/// The two uncompressed files "input" and "base" will be read, and an
/// incremental patch to generate "input" from "base" will be written to
/// the output file.
/// </summary>
/// <param name="input">
/// The filename of the input file containing the new
/// version of its contents. This is passed directly
/// to mspack_system::open().
/// </param>
/// <param name="base">
/// The filename of the original base file containing
/// the old version of its contents, against which the
/// incremental patch shall generated. This is passed
/// directly to mspack_system::open().
/// </param>
/// <param name="output">
/// The filename of the output file. This is passed
/// directly to mspack_system::open().
/// </param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
public MSPACK_ERR compress_incremental(in string input, in string @base, in string output) => throw new NotImplementedException();
}
}

View File

@@ -1,400 +0,0 @@
using static SabreTools.Compression.libmspack.macros;
using static SabreTools.Compression.libmspack.oab;
namespace SabreTools.Compression.libmspack.OAB
{
/// <summary>
/// A decompressor for .LZX (Offline Address Book) files
///
/// All fields are READ ONLY.
/// </summary>
/// <see cref="mspack_create_oab_decompressor()"/>
/// <see cref="mspack_destroy_oab_decompressor()"/>
public unsafe class Decompressor : BaseDecompressor
{
public int buf_size { get; private set; }
/// <summary>
/// Creates a new OAB decompressor
/// </summary>
public Decompressor()
{
this.system = new OABSystem();
this.error = MSPACK_ERR.MSPACK_ERR_OK;
this.buf_size = 4096;
}
/// <summary>
/// Decompresses a full Offline Address Book file.
///
/// If the input file is a valid compressed Offline Address Book file,
/// it will be read and the decompressed contents will be written to
/// the output file.
/// </summary>
/// <param name="input">
/// The filename of the input file. This is passed
/// directly to mspack_system::open().
/// </param>
/// <param name="output">
/// The filename of the output file. This is passed
/// directly to mspack_system::open().
/// </param>
/// <returns>An error code, or MSPACK_ERR.MSPACK_ERR_OK if successful</returns>
public MSPACK_ERR decompress(in string input, in string output)
{
mspack_system sys;
mspack_file infh = null;
mspack_file outfh = null;
FixedArray<byte> buf = null;
FixedArray<byte> hdrbuf = new FixedArray<byte>(oabhead_SIZEOF);
uint block_max, target_size;
lzxd_stream lzx = null;
OABSystem oabd_sys;
uint window_bits;
MSPACK_ERR ret = MSPACK_ERR.MSPACK_ERR_OK;
sys = this.system;
infh = sys.open(input, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_READ);
if (infh == null)
{
ret = MSPACK_ERR.MSPACK_ERR_OPEN;
goto outlbl;
}
if (sys.read(infh, hdrbuf, oabhead_SIZEOF) != oabhead_SIZEOF)
{
ret = MSPACK_ERR.MSPACK_ERR_READ;
goto outlbl;
}
if (EndGetI32(hdrbuf, oabhead_VersionHi) != 3 ||
EndGetI32(hdrbuf, oabhead_VersionLo) != 1)
{
ret = MSPACK_ERR.MSPACK_ERR_SIGNATURE;
goto outlbl;
}
block_max = EndGetI32(hdrbuf, oabhead_BlockMax);
target_size = EndGetI32(hdrbuf, oabhead_TargetSize);
outfh = sys.open(output, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_WRITE);
if (outfh == null)
{
ret = MSPACK_ERR.MSPACK_ERR_OPEN;
goto outlbl;
}
buf = new FixedArray<byte>(this.buf_size);
if (buf == null)
{
ret = MSPACK_ERR.MSPACK_ERR_NOMEMORY;
goto outlbl;
}
oabd_sys = sys as OABSystem;
oabd_file in_ofh = new oabd_file();
in_ofh.orig_sys = sys;
in_ofh.orig_file = infh;
oabd_file out_ofh = new oabd_file();
out_ofh.orig_sys = sys;
out_ofh.orig_file = outfh;
while (target_size > 0)
{
uint blk_csize, blk_dsize, blk_crc, blk_flags;
if (sys.read(infh, buf, oabblk_SIZEOF) != oabblk_SIZEOF)
{
ret = MSPACK_ERR.MSPACK_ERR_READ;
goto outlbl;
}
blk_flags = EndGetI32(buf, oabblk_Flags);
blk_csize = EndGetI32(buf, oabblk_CompSize);
blk_dsize = EndGetI32(buf, oabblk_UncompSize);
blk_crc = EndGetI32(buf, oabblk_CRC);
if (blk_dsize > block_max || blk_dsize > target_size || blk_flags > 1)
{
ret = MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
goto outlbl;
}
if (blk_flags == 0)
{
// Uncompressed block
if (blk_dsize != blk_csize)
{
ret = MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
goto outlbl;
}
ret = copy_fh(sys, infh, outfh, (int)blk_dsize, buf, this.buf_size);
if (ret != MSPACK_ERR.MSPACK_ERR_OK) goto outlbl;
}
else
{
// LZX compressed block
window_bits = 17;
while (window_bits < 25 && (1 << (int)window_bits) < blk_dsize)
window_bits++;
in_ofh.available = (int)blk_csize;
out_ofh.crc = 0xffffffff;
lzx = lzxd_init(&oabd_sys, (void*)&in_ofh, (void*)&out_ofh, window_bits, 0, this.buf_size, blk_dsize, 1);
if (lzx == null)
{
ret = MSPACK_ERR.MSPACK_ERR_NOMEMORY;
goto outlbl;
}
ret = lzxd_decompress(lzx, blk_dsize);
if (ret != MSPACK_ERR.MSPACK_ERR_OK)
goto outlbl;
lzxd_free(lzx);
lzx = null;
// Consume any trailing padding bytes before the next block
ret = copy_fh(sys, infh, null, in_ofh.available, buf, this.buf_size);
if (ret != MSPACK_ERR.MSPACK_ERR_OK) goto outlbl;
if (out_ofh.crc != blk_crc)
{
ret = MSPACK_ERR.MSPACK_ERR_CHECKSUM;
goto outlbl;
}
}
target_size -= blk_dsize;
}
outlbl:
if (lzx != null) lzxd_free(lzx);
if (outfh != null) sys.close(outfh);
if (infh != null) sys.close(infh);
sys.free(buf);
return ret;
}
/// <summary>
/// Decompresses an Offline Address Book with an incremental patch file.
///
/// This requires both a full UNCOMPRESSED Offline Address Book file to
/// act as the "base", and a compressed incremental patch file as input.
/// If the input file is valid, it will be decompressed with reference to
/// the base file, and the decompressed contents will be written to the
/// output file.
///
/// There is no way to tell what the right base file is for the given
/// incremental patch, but if you get it wrong, this will usually result
/// in incorrect data being decompressed, which will then fail a checksum
/// test.
/// </summary>
/// <param name="input">
/// The filename of the input file. This is passed
/// directly to mspack_system::open().
/// </param>
/// <param name="base">
/// The filename of the base file to which the
/// incremental patch shall be applied. This is passed
/// directly to mspack_system::open().
/// </param>
/// <param name="output">
/// The filename of the output file. This is passed
/// directly to mspack_system::open().
/// </param>
/// <returns>An error code, or MSPACK_ERR.MSPACK_ERR_OK if successful</returns>
public MSPACK_ERR decompress_incremental(in string input, in string @base, in string output)
{
mspack_file infh = null, basefh = null, outfh = null;
lzxd_stream lzx = null;
FixedArray<byte> buf = null;
uint window_bits, window_size;
MSPACK_ERR ret = MSPACK_ERR.MSPACK_ERR_OK;
mspack_system sys = this.system;
infh = sys.open(input, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_READ);
if (infh == null)
{
ret = MSPACK_ERR.MSPACK_ERR_OPEN;
goto outlbl;
}
FixedArray<byte> hdrbuf = new FixedArray<byte>(patchhead_SIZEOF);
if (sys.read(infh, hdrbuf, patchhead_SIZEOF) != patchhead_SIZEOF)
{
ret = MSPACK_ERR.MSPACK_ERR_READ;
goto outlbl;
}
if (EndGetI32(hdrbuf, patchhead_VersionHi) != 3 ||
EndGetI32(hdrbuf, patchhead_VersionLo) != 2)
{
ret = MSPACK_ERR.MSPACK_ERR_SIGNATURE;
goto outlbl;
}
uint block_max = EndGetI32(hdrbuf, patchhead_BlockMax);
uint target_size = EndGetI32(hdrbuf, patchhead_TargetSize);
// We use it for reading block headers too
if (block_max < patchblk_SIZEOF)
block_max = patchblk_SIZEOF;
basefh = sys.open(@base, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_READ);
if (basefh == null)
{
ret = MSPACK_ERR.MSPACK_ERR_OPEN;
goto outlbl;
}
outfh = sys.open(output, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_WRITE);
if (outfh == null)
{
ret = MSPACK_ERR.MSPACK_ERR_OPEN;
goto outlbl;
}
buf = new FixedArray<byte>(this.buf_size);
if (buf == null)
{
ret = MSPACK_ERR.MSPACK_ERR_NOMEMORY;
goto outlbl;
}
OABSystem oabd_sys = sys as OABSystem;
oabd_file in_ofh = new oabd_file();
in_ofh.orig_sys = sys;
in_ofh.orig_file = infh;
oabd_file out_ofh = new oabd_file();
out_ofh.orig_sys = sys;
out_ofh.orig_file = outfh;
while (target_size > 0)
{
if (sys.read(infh, buf, patchblk_SIZEOF) != patchblk_SIZEOF)
{
ret = MSPACK_ERR.MSPACK_ERR_READ;
goto outlbl;
}
uint blk_csize = EndGetI32(buf, patchblk_PatchSize);
uint blk_dsize = EndGetI32(buf, patchblk_TargetSize);
uint blk_ssize = EndGetI32(buf, patchblk_SourceSize);
uint blk_crc = EndGetI32(buf, patchblk_CRC);
if (blk_dsize > block_max || blk_dsize > target_size ||
blk_ssize > block_max)
{
ret = MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
goto outlbl;
}
window_size = (uint)((blk_ssize + 32767) & ~32767);
window_size += blk_dsize;
window_bits = 17;
while (window_bits < 25 && (1 << (int)window_bits) < window_size)
window_bits++;
in_ofh.available = (int)blk_csize;
out_ofh.crc = 0xffffffff;
lzx = lzxd_init(&oabd_sys, (void*)&in_ofh, (void*)&out_ofh, window_bits, 0, 4096, blk_dsize, 1);
if (lzx == null)
{
ret = MSPACK_ERR.MSPACK_ERR_NOMEMORY;
goto outlbl;
}
ret = lzxd_set_reference_data(lzx, sys, basefh, blk_ssize);
if (ret != MSPACK_ERR.MSPACK_ERR_OK)
goto outlbl;
ret = lzxd_decompress(lzx, blk_dsize);
if (ret != MSPACK_ERR.MSPACK_ERR_OK)
goto outlbl;
lzxd_free(lzx);
lzx = null;
// Consume any trailing padding bytes before the next block
ret = copy_fh(sys, infh, null, in_ofh.available, buf, this.buf_size);
if (ret != MSPACK_ERR.MSPACK_ERR_OK) goto outlbl;
if (out_ofh.crc != blk_crc)
{
ret = MSPACK_ERR.MSPACK_ERR_CHECKSUM;
goto outlbl;
}
target_size -= blk_dsize;
}
outlbl:
if (lzx != null) lzxd_free(lzx);
if (outfh != null) sys.close(outfh);
if (basefh != null) sys.close(basefh);
if (infh != null) sys.close(infh);
//if (buf != null) sys.free(buf);
return ret;
}
private static MSPACK_ERR copy_fh(mspack_system sys, mspack_file infh, mspack_file outfh, int bytes_to_copy, byte* buf, int buf_size)
{
while (bytes_to_copy > 0)
{
int run = buf_size;
if (run > bytes_to_copy)
{
run = bytes_to_copy;
}
if (sys.read(infh, buf, run) != run)
{
return MSPACK_ERR.MSPACK_ERR_READ;
}
if (outfh != null && sys.write(outfh, buf, run) != run)
{
return MSPACK_ERR.MSPACK_ERR_WRITE;
}
bytes_to_copy -= run;
}
return MSPACK_ERR.MSPACK_ERR_OK;
}
/// <summary>
/// Sets an OAB decompression engine parameter. Available only in OAB
/// decompressor version 2 and above.
///
/// - #MSOABD_PARAM_DECOMPBUF: How many bytes should be used as an input
/// buffer by decompressors? The minimum value is 16. The default value
/// is 4096.
/// </summary>
/// <param name="param">The parameter to set</param>
/// <param name="value">The value to set the parameter to</param>
/// <returns>
/// MSPACK_ERR.MSPACK_ERR_OK if all is OK, or MSPACK_ERR.MSPACK_ERR_ARGS if there
/// is a problem with either parameter or value.
/// </returns>
public MSPACK_ERR set_param(MSOABD_PARAM param, int value)
{
if (param == MSOABD_PARAM.MSOABD_PARAM_DECOMPBUF && value >= 16)
{
// Must be at least 16 bytes (patchblk_SIZEOF, oabblk_SIZEOF)
this.buf_size = value;
return MSPACK_ERR.MSPACK_ERR_OK;
}
return MSPACK_ERR.MSPACK_ERR_ARGS;
}
}
}

View File

@@ -1,34 +0,0 @@
namespace SabreTools.Compression.libmspack.OAB
{
public unsafe class OABSystem : mspack_default_system
{
/// <inheritdoc/>
public override unsafe int read(mspack_file base_file, void* buf, int size)
{
oabd_file file = (oabd_file)base_file;
int bytes_read;
if (size > file.available)
size = file.available;
bytes_read = file.orig_sys.read(file.orig_file, buf, size);
if (bytes_read < 0)
return bytes_read;
file.available -= bytes_read;
return bytes_read;
}
/// <inheritdoc/>
public override unsafe int write(mspack_file base_file, void* buf, int size)
{
oabd_file file = (oabd_file)base_file;
int bytes_written = file.orig_sys.write(file.orig_file, buf, size);
if (bytes_written > 0)
file.crc = mspack.crc32(file.crc, buf, bytes_written);
return bytes_written;
}
}
}

View File

@@ -1,32 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public static class oab
{
public const int oabhead_VersionHi = 0x0000;
public const int oabhead_VersionLo = 0x0004;
public const int oabhead_BlockMax = 0x0008;
public const int oabhead_TargetSize = 0x000c;
public const int oabhead_SIZEOF = 0x0010;
public const int oabblk_Flags = 0x0000;
public const int oabblk_CompSize = 0x0004;
public const int oabblk_UncompSize = 0x0008;
public const int oabblk_CRC = 0x000c;
public const int oabblk_SIZEOF = 0x0010;
public const int patchhead_VersionHi = 0x0000;
public const int patchhead_VersionLo = 0x0004;
public const int patchhead_BlockMax = 0x0008;
public const int patchhead_SourceSize = 0x000c;
public const int patchhead_TargetSize = 0x0010;
public const int patchhead_SourceCRC = 0x0014;
public const int patchhead_TargetCRC = 0x0018;
public const int patchhead_SIZEOF = 0x001c;
public const int patchblk_PatchSize = 0x0000;
public const int patchblk_TargetSize = 0x0004;
public const int patchblk_SourceSize = 0x0008;
public const int patchblk_CRC = 0x000c;
public const int patchblk_SIZEOF = 0x0010;
}
}

View File

@@ -1,13 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public class oabd_file : mspack_file
{
public mspack_system orig_sys { get; set; }
public mspack_file orig_file { get; set; }
public uint crc { get; set; }
public int available { get; set; }
}
}

View File

@@ -1,241 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public unsafe static class qtm
{
public const int QTM_FRAME_SIZE = 32768;
#region Quantum static data tables
/*
* Quantum uses 'position slots' to represent match offsets. For every
* match, a small 'position slot' number and a small offset from that slot
* are encoded instead of one large offset.
*
* position_base[] is an index to the position slot bases
*
* extra_bits[] states how many bits of offset-from-base data is needed.
*
* length_base[] and length_extra[] are equivalent in function, but are
* used for encoding selector 6 (variable length match) match lengths,
* instead of match offsets.
*
* They are generated with the following code:
* unsigned int i, offset;
* for (i = 0, offset = 0; i < 42; i++) {
* position_base[i] = offset;
* extra_bits[i] = ((i < 2) ? 0 : (i - 2)) >> 1;
* offset += 1 << extra_bits[i];
* }
* for (i = 0, offset = 0; i < 26; i++) {
* length_base[i] = offset;
* length_extra[i] = (i < 2 ? 0 : i - 2) >> 2;
* offset += 1 << length_extra[i];
* }
* length_base[26] = 254; length_extra[26] = 0;
*/
private static readonly uint[] position_base = new uint[42]
{
0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768,
1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576, 32768, 49152,
65536, 98304, 131072, 196608, 262144, 393216, 524288, 786432, 1048576, 1572864
};
private static readonly byte[] extra_bits = new byte[42]
{
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10,
11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19
};
private static readonly byte[] length_base = new byte[27]
{
0, 1, 2, 3, 4, 5, 6, 8, 10, 12, 14, 18, 22, 26,
30, 38, 46, 54, 62, 78, 94, 110, 126, 158, 190, 222, 254
};
private static readonly byte[] length_extra = new byte[27]
{
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0
};
#endregion
private static void qtmd_update_model(qtmd_model model)
{
qtmd_modelsym tmp;
int i, j;
if (--model.shiftsleft > 0)
{
for (i = model.entries - 1; i >= 0; i--)
{
/* -1, not -2; the 0 entry saves this */
model.syms[i].cumfreq >>= 1;
if (model.syms[i].cumfreq <= model.syms[i + 1].cumfreq)
{
model.syms[i].cumfreq = (ushort)(model.syms[i + 1].cumfreq + 1);
}
}
}
else
{
model.shiftsleft = 50;
for (i = 0; i < model.entries; i++)
{
/* no -1, want to include the 0 entry */
/* this converts cumfreqs into frequencies, then shifts right */
model.syms[i].cumfreq -= model.syms[i + 1].cumfreq;
model.syms[i].cumfreq++; /* avoid losing things entirely */
model.syms[i].cumfreq >>= 1;
}
/* now sort by frequencies, decreasing order -- this must be an
* inplace selection sort, or a sort with the same (in)stability
* characteristics */
for (i = 0; i < model.entries - 1; i++)
{
for (j = i + 1; j < model.entries; j++)
{
if (model.syms[i].cumfreq < model.syms[j].cumfreq)
{
tmp = model.syms[i];
model.syms[i] = model.syms[j];
model.syms[j] = tmp;
}
}
}
/* then convert frequencies back to cumfreq */
for (i = model.entries - 1; i >= 0; i--)
{
model.syms[i].cumfreq += model.syms[i + 1].cumfreq;
}
}
}
/// <summary>
/// Allocates Quantum decompression state for decoding the given stream.
///
/// - returns null if window_bits is outwith the range 10 to 21 (inclusive).
///
/// - uses system.alloc() to allocate memory
///
/// - returns null if not enough memory
///
/// - window_bits is the size of the Quantum window, from 1Kb (10) to 2Mb (21).
///
/// - input_buffer_size is the number of bytes to use to store bitstream data.
/// </summary>
/// <param name="system"></param>
/// <param name="input"></param>
/// <param name="output"></param>
/// <param name="window_bits"></param>
/// <param name="input_buffer_size"></param>
/// <returns></returns>
public static qtmd_stream qtmd_init(mspack_system system, mspack_file input, mspack_file output, int window_bits, int input_buffer_size)
{
uint window_size = (uint)(1 << window_bits);
int i;
if (system == null) return null;
// Quantum supports window sizes of 2^10 (1Kb) through 2^21 (2Mb)
if (window_bits < 10 || window_bits > 21) return null;
// Round up input buffer size to multiple of two
input_buffer_size = (input_buffer_size + 1) & -2;
if (input_buffer_size < 2) return null;
// Allocate decompression state
qtmd_stream qtm = new qtmd_stream();
// Allocate decompression window and input buffer
qtm.window = (byte*)system.alloc((int)window_size);
qtm.inbuf = (byte*)system.alloc((int)input_buffer_size);
if (qtm.window == null || qtm.inbuf == null)
{
system.free(qtm.window);
system.free(qtm.inbuf);
//system.free(qtm);
return null;
}
// Initialise decompression state
qtm.sys = system;
qtm.input = input;
qtm.output = output;
qtm.inbuf_size = (uint)input_buffer_size;
qtm.window_size = window_size;
qtm.window_posn = 0;
qtm.frame_todo = QTM_FRAME_SIZE;
qtm.header_read = 0;
qtm.error = MSPACK_ERR.MSPACK_ERR_OK;
qtm.i_ptr = qtm.i_end = &qtm.inbuf[0];
qtm.o_ptr = qtm.o_end = &qtm.window[0];
qtm.input_end = 0;
qtm.bits_left = 0;
qtm.bit_buffer = 0;
// Initialise arithmetic coding models
// - model 4 depends on window size, ranges from 20 to 24
// - model 5 depends on window size, ranges from 20 to 36
// - model 6pos depends on window size, ranges from 20 to 42
i = window_bits * 2;
qtm.model0 = new qtmd_model(qtm.m0sym, 0, 64);
qtm.model1 = new qtmd_model(qtm.m1sym, 64, 64);
qtm.model2 = new qtmd_model(qtm.m2sym, 128, 64);
qtm.model3 = new qtmd_model(qtm.m3sym, 192, 64);
qtm.model4 = new qtmd_model(qtm.m4sym, 0, (i > 24) ? 24 : i);
qtm.model5 = new qtmd_model(qtm.m5sym, 0, (i > 36) ? 36 : i);
qtm.model6 = new qtmd_model(qtm.m6sym, 0, i);
qtm.model6len = new qtmd_model(qtm.m6lsym, 0, 27);
qtm.model7 = new qtmd_model(qtm.m7sym, 0, 7);
// All ok
return qtm;
}
/// <summary>
/// Decompresses, or decompresses more of, a Quantum stream.
///
/// - out_bytes of data will be decompressed and the function will return
/// with an MSPACK_ERR_OK return code.
///
/// - decompressing will stop as soon as out_bytes is reached. if the true
/// amount of bytes decoded spills over that amount, they will be kept for
/// a later invocation of qtmd_decompress().
///
/// - the output bytes will be passed to the system.write() function given in
/// qtmd_init(), using the output file handle given in qtmd_init(). More
/// than one call may be made to system.write()
///
/// - Quantum will read input bytes as necessary using the system.read()
/// function given in qtmd_init(), using the input file handle given in
/// qtmd_init(). This will continue until system.read() returns 0 bytes,
/// or an error.
/// </summary>
/// <param name="qtm"></param>
/// <param name="out_bytes"></param>
/// <returns></returns>
public static MSPACK_ERR qtmd_decompress(qtmd_stream qtm, long out_bytes) => MSPACK_ERR.MSPACK_ERR_OK;
/// <summary>
/// Frees all state associated with a Quantum data stream
///
/// - calls system.free() using the system pointer given in qtmd_init()
/// </summary>
/// <param name="qtm"></param>
public static void qtmd_free(qtmd_stream qtm)
{
mspack_system sys;
if (qtm != null)
{
sys = qtm.sys;
//sys.free(qtm.window);
//sys.free(qtm.inbuf);
//sys.free(qtm);
}
}
}
}

View File

@@ -1,27 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public unsafe class qtmd_model
{
public int shiftsleft { get; set; }
public int entries { get; set; }
public qtmd_modelsym[] syms { get; set; }
/// <summary>
/// Initialises a model to decode symbols from [start] to [start]+[len]-1
/// </summary>
public qtmd_model(qtmd_modelsym[] syms, int start, int len)
{
this.shiftsleft = 4;
this.entries = len;
this.syms = syms;
for (int i = 0; i <= len; i++)
{
syms[i].sym = (ushort)(start + i); // Actual symbol
syms[i].cumfreq = (ushort)(len - i); // Current frequency of that symbol
}
}
}
}

View File

@@ -1,9 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public class qtmd_modelsym
{
public ushort sym { get; set; }
public ushort cumfreq { get; set; }
}
}

View File

@@ -1,126 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public unsafe class qtmd_stream : readbits
{
/// <summary>
/// Decoding window
/// </summary>
public byte* window { get; set; }
/// <summary>
/// Window size
/// </summary>
public uint window_size { get; set; }
/// <summary>
/// Bytes remaining for current frame
/// </summary>
public uint frame_todo { get; set; }
/// <summary>
/// High arith coding state
/// </summary>
public ushort H { get; set; }
/// <summary>
/// Low arith coding state
/// </summary>
public ushort L { get; set; }
/// <summary>
/// Current arith coding state
/// </summary>
public ushort C { get; set; }
/// <summary>
/// Have we started decoding a new frame?
/// </summary>
public byte header_read { get; set; }
#region Models
#region Four literal models, each representing 64 symbols
/// <summary>
/// model0 for literals from 0 to 63 (selector = 0)
/// </summary>
public qtmd_model model0 { get; set; }
/// <summary>
/// model1 for literals from 64 to 127 (selector = 1)
/// </summary>
public qtmd_model model1 { get; set; }
/// <summary>
/// model2 for literals from 128 to 191 (selector = 2)
/// </summary>
public qtmd_model model2 { get; set; }
/// <summary>
/// model3 for literals from 129 to 255 (selector = 3)
/// </summary>
public qtmd_model model3 { get; set; }
#endregion
#region Three match models
/// <summary>
/// model4 for match with fixed length of 3 bytes
/// </summary>
public qtmd_model model4 { get; set; }
/// <summary>
/// model5 for match with fixed length of 4 bytes
/// </summary>
public qtmd_model model5 { get; set; }
/// <summary>
/// model6 for variable length match, encoded with model6len model
/// </summary>
public qtmd_model model6 { get; set; }
public qtmd_model model6len { get; set; }
#endregion
/// <summary>
/// selector model. 0-6 to say literal (0,1,2,3) or match (4,5,6)
/// </summary>
public qtmd_model model7 { get; set; }
#endregion
#region Symbol arrays for all models
public qtmd_modelsym[] m0sym { get; set; } = new qtmd_modelsym[64 + 1];
public qtmd_modelsym[] m1sym { get; set; } = new qtmd_modelsym[64 + 1];
public qtmd_modelsym[] m2sym { get; set; } = new qtmd_modelsym[64 + 1];
public qtmd_modelsym[] m3sym { get; set; } = new qtmd_modelsym[64 + 1];
public qtmd_modelsym[] m4sym { get; set; } = new qtmd_modelsym[24 + 1];
public qtmd_modelsym[] m5sym { get; set; } = new qtmd_modelsym[36 + 1];
public qtmd_modelsym[] m6sym { get; set; } = new qtmd_modelsym[42 + 1];
public qtmd_modelsym[] m6lsym { get; set; } = new qtmd_modelsym[27 + 1];
public qtmd_modelsym[] m7sym { get; set; } = new qtmd_modelsym[7 + 1];
#endregion
public override void READ_BYTES()
{
byte b0, b1;
READ_IF_NEEDED;
b0 = *i_ptr++;
READ_IF_NEEDED;
b1 = *i_ptr++;
INJECT_BITS_MSB((b0 << 8) | b1, 16);
}
}
}

View File

@@ -1,88 +0,0 @@
namespace SabreTools.Compression.libmspack.SZDD
{
/// <summary>
/// A compressor for the SZDD file format.
///
/// All fields are READ ONLY.
/// </summary>
public abstract class Compressor : BaseCompressor
{
/// <summary>
/// Creates a new SZDD compressor
/// </summary>
public Compressor()
{
this.system = new mspack_default_system();
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
/// <summary>
/// Reads an input file and creates a compressed output file in the
/// SZDD compressed file format. The SZDD compression format is quick
/// but gives poor compression. It is possible for the compressed output
/// file to be larger than the input file.
///
/// Conventionally, SZDD compressed files have the final character in
/// their filename replaced with an underscore, to show they are
/// compressed. The missing character is stored in the compressed file
/// itself. This is due to the restricted filename conventions of MS-DOS,
/// most operating systems, such as UNIX, simply append another file
/// extension to the existing filename. As mspack does not deal with
/// filenames, this is left up to you. If you wish to set the missing
/// character stored in the file header, use set_param() with the
/// #MSSZDDC_PARAM_MISSINGCHAR parameter.
///
/// "Stream" compression (where the length of the input data is not
/// known) is not possible. The length of the input data is stored in the
/// header of the SZDD file and must therefore be known before any data
/// is compressed. Due to technical limitations of the file format, the
/// maximum size of uncompressed file that will be accepted is 2147483647
/// bytes.
/// </summary>
/// <param name="input">
/// The name of the file to compressed. This is passed
/// passed directly to mspack_system::open()
/// </param>
/// <param name="output">
/// The name of the file to write compressed data to.
/// This is passed directly to mspack_system::open().
/// </param>
/// <param name="length">
/// The length of the uncompressed file, or -1 to indicate
/// that this should be determined automatically by using
/// mspack_system::seek() on the input file.
/// </param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
/// <see cref="set_param(int, int)"/>
public abstract MSPACK_ERR compress(in string input, in string output, long length);
/// <summary>
/// Sets an SZDD compression engine parameter.
///
/// The following parameters are defined:
///
/// - #MSSZDDC_PARAM_CHARACTER: the "missing character", the last character
/// in the uncompressed file's filename, which is traditionally replaced
/// with an underscore to show the file is compressed. Traditionally,
/// this can only be a character that is a valid part of an MS-DOS,
/// filename, but libmspack permits any character between 0x00 and 0xFF
/// to be stored. 0x00 is the default, and it represents "no character
/// stored".
/// </summary>
/// <param name="param">The parameter to set</param>
/// <param name="value">The value to set the parameter to</param>
/// <returns>
/// MSPACK_ERR_OK if all is OK, or MSPACK_ERR_ARGS if there
/// is a problem with either parameter or value.
/// </returns>
/// <see cref="compress(in string, in string, long)"/>
public abstract MSPACK_ERR set_param(MSSZDDC_PARAM param, int value);
/// <summary>
/// Returns the error code set by the most recently called method.
/// </summary>
/// <returns>The most recent error code</returns>
/// <see cref="compress(in string, in string, long)"/>
public abstract MSPACK_ERR last_error();
}
}

View File

@@ -1,19 +0,0 @@
/* This file is part of libmspack.
* (C) 2003-2004 Stuart Caie.
*
* libmspack is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License (LGPL) version 2.1
*
* For further details, see the file COPYING.LIB distributed with libmspack
*/
namespace SabreTools.Compression.libmspack.SZDD
{
public static class Constants
{
/// <summary>
/// Input buffer size during decompression - not worth parameterising IMHO
/// </summary>
public const int SZDD_INPUT_SIZE = 2048;
}
}

View File

@@ -1,231 +0,0 @@
using static SabreTools.Compression.libmspack.macros;
using static SabreTools.Compression.libmspack.SZDD.Constants;
namespace SabreTools.Compression.libmspack.SZDD
{
/// <summary>
/// A decompressor for SZDD compressed files.
///
/// All fields are READ ONLY.
/// </summary>
/// <see cref="mspack.mspack_create_szdd_decompressor()"/>
/// <see cref="mspack.mspack_destroy_szdd_decompressor(Decompressor)"/>
public unsafe class Decompressor : BaseDecompressor
{
/// <summary>
/// Creates a new SZDD decompressor
/// </summary>
public Decompressor()
{
this.system = new mspack_default_system();
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
/// <summary>
/// Destroys an existing SZDD decompressor
/// </summary>
~Decompressor()
{
mspack_system sys = this.system;
//sys.free(self);
}
/// <summary>
/// Opens a SZDD file and reads the header.
///
/// If the file opened is a valid SZDD file, all headers will be read and
/// a msszddd_header structure will be returned.
///
/// In the case of an error occuring, null is returned and the error code
/// is available from last_error().
///
/// The filename pointer should be considered "in use" until close() is
/// called on the SZDD file.
/// </summary>
/// <param name="filename">
/// The filename of the SZDD compressed file. This is
/// passed directly to mspack_system::open().
/// </param>
/// <returns>A pointer to a msszddd_header structure, or null on failure</returns>
/// <see cref="close(msszddd_header)"/>
public msszddd_header open(in string filename)
{
mspack_system sys = this.system;
mspack_file fh = sys.open(filename, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_READ);
msszddd_header hdr = new msszddd_header();
hdr.fh = fh;
this.error = ReadHeaders(sys, fh, hdr);
if (this.error != MSPACK_ERR.MSPACK_ERR_OK)
{
if (fh != null) sys.close(fh);
//sys.free(hdr);
hdr = null;
}
return hdr;
}
/// <summary>
/// Closes a previously opened SZDD file.
///
/// This closes a SZDD file and frees the msszddd_header associated with
/// it.
///
/// The SZDD header pointer is now invalid and cannot be used again.
/// </summary>
/// <param name="szdd">The SZDD file to close</param>
public void close(msszddd_header szdd)
{
if (this.system == null) return;
// Close the file handle associated
this.system.close(szdd.fh);
// Free the memory associated
//this.system.free(hdr);
this.error = MSPACK_ERR.MSPACK_ERR_OK;
}
private static readonly byte[] szdd_signature_expand = new byte[8]
{
0x53, 0x5A, 0x44, 0x44, 0x88, 0xF0, 0x27, 0x33
};
private static readonly byte[] szdd_signature_qbasic = new byte[8]
{
0x53, 0x5A, 0x20, 0x88, 0xF0, 0x27, 0x33, 0xD1
};
/// <summary>
/// Reads the headers of an SZDD format file
/// </summary>
/// <param name="sys"></param>
/// <param name="fh"></param>
/// <param name="hdr"></param>
/// <returns></returns>
private static MSPACK_ERR ReadHeaders(mspack_system sys, mspack_file fh, msszddd_header hdr)
{
FixedArray<byte> buf = new FixedArray<byte>(8);
// Read and check signature
if (sys.read(fh, buf, 8) != 8) return MSPACK_ERR.MSPACK_ERR_READ;
if (buf.SequenceEqual(szdd_signature_expand))
{
// Common SZDD
hdr.format = MSSZDD_FMT.MSSZDD_FMT_NORMAL;
// Read the rest of the header
if (sys.read(fh, buf, 6) != 6) return MSPACK_ERR.MSPACK_ERR_READ;
if (buf[0] != 0x41) return MSPACK_ERR.MSPACK_ERR_DATAFORMAT;
hdr.missing_char = (char)buf[1];
hdr.length = EndGetI32(buf, 2);
}
else if (buf.SequenceEqual(szdd_signature_qbasic))
{
// Special QBasic SZDD
hdr.format = MSSZDD_FMT.MSSZDD_FMT_QBASIC;
if (sys.read(fh, buf, 4) != 4) return MSPACK_ERR.MSPACK_ERR_READ;
hdr.missing_char = '\0';
hdr.length = EndGetI32(buf, 0);
}
else
{
return MSPACK_ERR.MSPACK_ERR_SIGNATURE;
}
return MSPACK_ERR.MSPACK_ERR_OK;
}
/// <summary>
/// Extracts the compressed data from a SZDD file.
///
/// This decompresses the compressed SZDD data stream and writes it to
/// an output file.
/// </summary>
/// <param name="szdd">The SZDD file to extract data from</param>
/// <param name="filename">
/// The filename to write the decompressed data to. This
/// is passed directly to mspack_system::open().
/// </param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
public MSPACK_ERR extract(msszddd_header szdd, in string filename)
{
if (szdd == null) return this.error = MSPACK_ERR.MSPACK_ERR_ARGS;
mspack_system sys = this.system;
mspack_file fh = szdd.fh;
// Seek to the compressed data
long data_offset = (szdd.format == MSSZDD_FMT.MSSZDD_FMT_NORMAL) ? 14 : 12;
if (sys.seek(fh, data_offset, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_START) != 0)
{
return this.error = MSPACK_ERR.MSPACK_ERR_SEEK;
}
// Open file for output
mspack_file outfh;
if ((outfh = sys.open(filename, MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_WRITE)) == null)
{
return this.error = MSPACK_ERR.MSPACK_ERR_OPEN;
}
// Decompress the data
this.error = lzss_decompress(sys, fh, outfh, SZDD_INPUT_SIZE,
szdd.format == MSSZDD_FMT.MSSZDD_FMT_NORMAL
? LZSS_MODE.LZSS_MODE_EXPAND
: LZSS_MODE.LZSS_MODE_QBASIC);
// Close output file
sys.close(outfh);
return this.error;
}
/// <summary>
/// Decompresses an SZDD file to an output file in one step.
///
/// This opens an SZDD file as input, reads the header, then decompresses
/// the compressed data immediately to an output file, finally closing
/// both the input and output file. It is more convenient to use than
/// open() then extract() then close(), if you do not need to know the
/// SZDD output size or missing character.
/// </summary>
/// <param name="input">
/// The filename of the input SZDD file. This is passed
/// directly to mspack_system::open().
/// </param>
/// <param name="output">
/// The filename to write the decompressed data to. This
/// is passed directly to mspack_system::open().
/// </param>
/// <returns>An error code, or MSPACK_ERR_OK if successful</returns>
public MSPACK_ERR decompress(in string input, in string output)
{
msszddd_header hdr;
if ((hdr = open(input)) == null) return this.error;
MSPACK_ERR error = extract(hdr, output);
close(hdr);
return this.error = error;
}
/// <summary>
/// Returns the error code set by the most recently called method.
///
/// This is useful for open() which does not return an
/// error code directly.
/// </summary>
/// <returns>The most recent error code</returns>
/// <see cref="open(in string)"/>
/// <see cref="extract(msszddd_header, in string)"/>
/// <see cref="decompress(in string, in string)"/>
public MSPACK_ERR last_error()
{
return this.error;
}
}
}

View File

@@ -1,31 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents an SZDD compressed file.
///
/// All fields are READ ONLY.
/// </summary>
public class msszddd_header
{
public mspack_file fh { get; set; }
/// <summary>
/// The file format
/// </summary>
public MSSZDD_FMT format { get; set; }
/// <summary>
/// The amount of data in the SZDD file once uncompressed.
/// </summary>
public long length { get; set; }
/// <summary>
/// The last character in the filename, traditionally replaced with an
/// underscore to show the file is compressed. The null character is used
/// to show that this character has not been stored (e.g. because the
/// filename is not known). Generally, only characters that may appear in
/// an MS-DOS filename (except ".") are valid.
/// </summary>
public char missing_char { get; set; }
}
}

View File

@@ -1,118 +0,0 @@
/*
* COPYRIGHT (C) 1986 Gary S. Brown. You may use this program, or
* code or tables extracted from it, as desired without restriction.
*
* First, the polynomial itself and its table of feedback terms. The
* polynomial is
* X^32+X^26+X^23+X^22+X^16+X^12+X^11+X^10+X^8+X^7+X^5+X^4+X^2+X^1+X^0
*
* Note that we take it "backwards" and put the highest-order term in
* the lowest-order bit. The X^32 term is "implied"; the LSB is the
* X^31 term, etc. The X^0 term (usually shown as "+1") results in
* the MSB being 1
*
* Note that the usual hardware shift register implementation, which
* is what we're using (we're merely optimizing it by doing eight-bit
* chunks at a time) shifts bits into the lowest-order term. In our
* implementation, that means shifting towards the right. Why do we
* do it this way? Because the calculated CRC must be transmitted in
* order from highest-order term to lowest-order term. UARTs transmit
* characters in order from LSB to MSB. By storing the CRC this way
* we hand it to the UART in the order low-byte to high-byte; the UART
* sends each low-bit to hight-bit; and the result is transmission bit
* by bit from highest- to lowest-order term without requiring any bit
* shuffling on our part. Reception works similarly
*
* The feedback terms table consists of 256, 32-bit entries. Notes
*
* The table can be generated at runtime if desired; code to do so
* is shown later. It might not be obvious, but the feedback
* terms simply represent the results of eight shift/xor opera
* tions for all combinations of data and CRC register values
*
* The values must be right-shifted by eight bits by the "updcrc
* logic; the shift must be unsigned (bring in zeroes). On some
* hardware you could probably optimize the shift in assembler by
* using byte-swap instructions
* polynomial $edb88320
*/
namespace SabreTools.Compression.libmspack
{
public unsafe static partial class mspack
{
private static readonly uint[] crc32_table = new uint[256]
{
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
};
/// <summary>
/// Return a 32-bit CRC of the contents of the buffer.
/// </summary>
/// <param name="val"></param>
/// <param name="ss"></param>
/// <param name="len"></param>
/// <returns></returns>
public static uint crc32(uint val, void* ss, int len)
{
byte* s = (byte*)ss;
while (--len >= 0)
{
val = crc32_table[(val ^ *s++) & 0xff] ^ (val >> 8);
}
return val;
}
}
}

View File

@@ -1,35 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public static class macros
{
private static uint __egi32(FixedArray<byte> a, int n)
{
return (uint)((a[n + 3] << 24) | (a[n + 2] << 16) | (a[n + 1] << 8) | (a[n + 0]));
}
public static ulong EndGetI64(FixedArray<byte> a, int n)
{
return (__egi32(a, n + 4) << 32) | __egi32(a, n + 0);
}
public static uint EndGetI32(FixedArray<byte> a, int n)
{
return __egi32(a, n + 0);
}
public static ushort EndGetI16(FixedArray<byte> a, int n)
{
return (ushort)((a[n + 1] << 8) | a[n + 0]);
}
public static uint EndGetM32(FixedArray<byte> a, int n)
{
return (uint)((a[n + 0] << 24) | (a[n + 1] << 16) | (a[n + 2] << 8) | (a[n + 3]));
}
public static ushort EndGetM16(FixedArray<byte> a, int n)
{
return (ushort)((a[n + 0] << 8) | a[n + 1]);
}
}
}

View File

@@ -1,140 +0,0 @@
using System;
using System.IO;
using System.Runtime.InteropServices;
namespace SabreTools.Compression.libmspack
{
public class mspack_default_system : mspack_system
{
/// <inheritdoc/>
public override mspack_file open(in string filename, MSPACK_SYS_OPEN mode)
{
FileMode fmode;
FileAccess faccess;
switch (mode)
{
case MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_READ: fmode = FileMode.Open; faccess = FileAccess.Read; break;
case MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_WRITE: fmode = FileMode.OpenOrCreate; faccess = FileAccess.Write; break;
case MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_UPDATE: fmode = FileMode.Open; faccess = FileAccess.ReadWrite; break;
case MSPACK_SYS_OPEN.MSPACK_SYS_OPEN_APPEND: fmode = FileMode.Append; faccess = FileAccess.ReadWrite; break;
default: return null;
}
try
{
var fh = new mspack_file_p
{
name = filename,
fh = File.Open(filename, fmode, faccess),
};
return fh;
}
catch
{
return null;
}
}
/// <inheritdoc/>
public override void close(mspack_file file)
{
if (file is mspack_file_p self)
self.fh?.Dispose();
}
/// <inheritdoc/>
public override unsafe int read(mspack_file file, void* buffer, int bytes)
{
try
{
if (file is mspack_file_p self && buffer != null && bytes >= 0)
{
var ums = new UnmanagedMemoryStream((byte*)buffer, bytes);
self.fh.CopyTo(ums, bytes);
return bytes;
}
}
catch { }
return -1;
}
/// <inheritdoc/>
public override unsafe int write(mspack_file file, void* buffer, int bytes)
{
try
{
if (file is mspack_file_p self && buffer != null && bytes >= 0)
{
var ums = new UnmanagedMemoryStream((byte*)buffer, bytes);
ums.CopyTo(self.fh, bytes);
return bytes;
}
}
catch { }
return -1;
}
/// <inheritdoc/>
public override int seek(mspack_file file, long offset, MSPACK_SYS_SEEK mode)
{
try
{
if (file is mspack_file_p self)
{
SeekOrigin origin;
switch (mode)
{
case MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_START: origin = SeekOrigin.Begin; break;
case MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_CUR: origin = SeekOrigin.Current; break;
case MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_END: origin = SeekOrigin.End; break;
default: return -1;
}
self.fh.Seek(offset, origin);
return 0;
}
}
catch { }
return -1;
}
/// <inheritdoc/>
public override long tell(mspack_file file)
{
var self = file as mspack_file_p;
return self != null ? self.fh.Position : 0;
}
/// <inheritdoc/>
public override void message(mspack_file file, in string format, params string[] args)
{
if (file != null) Console.Write((file as mspack_file_p)?.name);
Console.Write(format, args);
Console.Write("\n");
}
/// <inheritdoc/>
public override unsafe void* alloc(int bytes)
{
return (void*)new FixedArray<byte>(bytes).Pointer;
}
/// <inheritdoc/>
public override unsafe void free(void* ptr)
{
Marshal.FreeCoTaskMem((IntPtr)ptr);
}
/// <inheritdoc/>
public override unsafe void copy(void* src, void* dest, int bytes)
{
byte[] temp = new byte[bytes];
Marshal.Copy((IntPtr)src, temp, 0, bytes);
Marshal.Copy(temp, 0, (IntPtr)dest, bytes);
}
}
}

View File

@@ -1,9 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which represents an open file handle. The contents of this
/// structure are determined by the implementation of the
/// mspack_system::open() method.
/// </summary>
public unsafe abstract class mspack_file { }
}

View File

@@ -1,11 +0,0 @@
using System.IO;
namespace SabreTools.Compression.libmspack
{
public class mspack_file_p : mspack_file
{
public Stream fh { get; set; }
public string name { get; set; }
}
}

View File

@@ -1,170 +0,0 @@
namespace SabreTools.Compression.libmspack
{
/// <summary>
/// A structure which abstracts file I/O and memory management.
///
/// The library always uses the mspack_system structure for interaction
/// with the file system and to allocate, free and copy all memory. It also
/// uses it to send literal messages to the library user.
///
/// When the library is compiled normally, passing null to a compressor or
/// decompressor constructor will result in a default mspack_system being
/// used, where all methods are implemented with the standard C library.
///
/// However, all constructors support being given a custom created
/// mspack_system structure, with the library user's own methods. This
/// allows for more abstract interaction, such as reading and writing files
/// directly to memory, or from a network socket or pipe.
///
/// Implementors of an mspack_system structure should read all
/// documentation entries for every structure member, and write methods
/// which conform to those standards.
/// </summary>
public unsafe abstract class mspack_system
{
/// <summary>
/// Opens a file for reading, writing, appending or updating
/// </summary>
/// <param name="filename">
/// The file to be opened. It is passed directly from the
/// library caller without being modified, so it is up to
/// the caller what this parameter actually represents.
/// </param>
/// <param name="mode">One of <see cref="MSPACK_SYS_OPEN"/> values</param>
/// <returns>
/// A pointer to a mspack_file structure. This structure officially
/// contains no members, its true contents are up to the
/// mspack_system implementor. It should contain whatever is needed
/// for other mspack_system methods to operate. Returning the null
/// pointer indicates an error condition.
/// </returns>
/// <see cref="close(mspack_file)"/>
/// <see cref="read(mspack_file, void*, int)"/>
/// <see cref="write(mspack_file, void*, int)"/>
/// <see cref="seek(mspack_file, int, MSPACK_SYS_SEEK)"/>
/// <see cref="tell(mspack_file)"/>
/// <see cref="message(mspack_file, in string, string[])"/>
public abstract mspack_file open(in string filename, MSPACK_SYS_OPEN mode);
/// <summary>
/// Closes a previously opened file. If any memory was allocated for this
/// particular file handle, it should be freed at this time.
/// </summary>
/// <param name="file">The file to close</param>
/// <see cref="open(in string, MSPACK_SYS_OPEN)"/>
public abstract void close(mspack_file file);
/// <summary>
/// Reads a given number of bytes from an open file.
/// </summary>
/// <param name="file">The file to read from</param>
/// <param name="buffer">The location where the read bytes should be stored</param>
/// <param name="bytes">The number of bytes to read from the file</param>
/// <returns>
/// The number of bytes successfully read (this can be less than
/// the number requested), zero to mark the end of file, or less
/// than zero to indicate an error. The library does not "retry"
/// reads and assumes short reads are due to EOF, so you should
/// avoid returning short reads because of transient errors.
/// </returns>
/// <see cref="open(in string, MSPACK_SYS_OPEN)"/>
/// <see cref="write(mspack_file, void*, int)"/>
public abstract int read(mspack_file file, void* buffer, int bytes);
/// <summary>
/// Writes a given number of bytes to an open file.
/// </summary>
/// <param name="file">The file to write to</param>
/// <param name="buffer">The location where the written bytes should be read from</param>
/// <param name="bytes">The number of bytes to write to the file</param>
/// <returns>
/// The number of bytes successfully written, this can be less
/// than the number requested. Zero or less can indicate an error
/// where no bytes at all could be written. All cases where less
/// bytes were written than requested are considered by the library
/// to be an error.
/// </returns>
/// <see cref="open(in string, MSPACK_SYS_OPEN)"/>
/// <see cref="read(mspack_file, void*, int)"/>
public abstract int write(mspack_file file, void* buffer, int bytes);
/// <summary>
/// Seeks to a specific file offset within an open file.
///
/// Sometimes the library needs to know the length of a file. It does
/// this by seeking to the end of the file with seek(file, 0,
/// MSPACK_SYS_SEEK_END), then calling tell(). Implementations may want
/// to make a special case for this.
///
/// Due to the potentially varying 32/64 bit datatype off_t on some
/// architectures, the #MSPACK_SYS_SELFTEST macro MUST be used before
/// using the library. If not, the error caused by the library passing an
/// inappropriate stackframe to seek() is subtle and hard to trace.
/// </summary>
/// <param name="file">The file to be seeked</param>
/// <param name="offset">An offset to seek, measured in bytes</param>
/// <param name="mode">One of <see cref="MSPACK_SYS_SEEK"/> values</param>
/// <returns>Zero for success, non-zero for an error</returns>
/// <see cref="open(in string, MSPACK_SYS_OPEN)"/>
/// <see cref="tell(mspack_file)"/>
public abstract int seek(mspack_file file, long offset, MSPACK_SYS_SEEK mode);
/// <summary>
/// Returns the current file position (in bytes) of the given file.
/// </summary>
/// <param name="file">The file whose file position is wanted</param>
/// <returns>The current file position of the file</returns>
/// <see cref="open(in string, MSPACK_SYS_OPEN)"/>
/// <see cref="seek(mspack_file, int, MSPACK_SYS_SEEK)"/>
public abstract long tell(mspack_file file);
/// <summary>
/// Used to send messages from the library to the user.
///
/// Occasionally, the library generates warnings or other messages in
/// plain english to inform the human user. These are informational only
/// and can be ignored if not wanted.
/// </summary>
/// <param name="file">
/// May be a file handle returned from open() if this message
/// pertains to a specific open file, or null if not related to
/// a specific file.
/// </param>
/// <param name="format">
/// a printf() style format string. It does NOT include a
/// trailing newline.
/// </param>
public abstract void message(mspack_file file, in string format, params string[] args);
/// <summary>
/// Allocates memory
/// </summary>
/// <param name="bytes">The number of bytes to allocate</param>
/// <returns>
/// A pointer to the requested number of bytes, or null if
/// not enough memory is available
/// </returns>
/// <see cref="free(void*)"/>
public abstract void* alloc(int bytes);
/// <summary>
/// Frees memory
/// </summary>
/// <param name="ptr">The memory to be freed. null is accepted and ignored.</param>
/// <see cref="alloc(int)"/>
public abstract void free(void* ptr);
/// <summary>
/// Copies from one region of memory to another.
///
/// The regions of memory are guaranteed not to overlap, are usually less
/// than 256 bytes, and may not be aligned. Please note that the source
/// parameter comes before the destination parameter, unlike the standard
/// C function memcpy().
/// </summary>
/// <param name="src">The region of memory to copy from</param>
/// <param name="dest">The region of memory to copy to</param>
/// <param name="bytes">The size of the memory region, in bytes</param>
public abstract void copy(void* src, void* dest, int bytes);
}
}

View File

@@ -1,245 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public unsafe abstract class readbits
{
/// <summary>
/// I/O routines
/// </summary>
public mspack_system sys { get; set; }
/// <summary>
/// Input file handle
/// </summary>
public mspack_file input { get; set; }
/// <summary>
/// Output file handle
/// </summary>
public mspack_file output { get; set; }
/// <summary>
/// Decompression offset within window
/// </summary>
public uint window_posn { get; set; }
#region I/O buffering
public byte* inbuf { get; set; }
public byte* i_ptr { get; set; }
public byte* i_end { get; set; }
public byte* o_ptr { get; set; }
public byte* o_end { get; set; }
public int input_end { get; set; }
public uint bit_buffer { get; set; }
public uint bits_left { get; set; }
public uint inbuf_size { get; set; }
#endregion
public MSPACK_ERR error { get; set; }
/// <see href="https://github.com/kyz/libmspack/blob/master/libmspack/mspack/readbits.h"/>
#region readbits.h
private const int BITBUF_WIDTH = 64;
private static readonly ushort[] lsb_bit_mask = new ushort[17]
{
0x0000, 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
};
public void INIT_BITS()
{
this.i_ptr = inbuf;
this.i_end = inbuf;
this.bit_buffer = 0;
this.bits_left = 0;
this.input_end = 0;
}
public void STORE_BITS(byte* i_ptr, byte* i_end, uint bit_buffer, uint bits_left)
{
this.i_ptr = i_ptr;
this.i_end = i_end;
this.bit_buffer = bit_buffer;
this.bits_left = bits_left;
}
public void RESTORE_BITS(out byte* i_ptr, out byte* i_end, out uint bit_buffer, out uint bits_left)
{
i_ptr = this.i_ptr;
i_end = this.i_end;
bit_buffer = this.bit_buffer;
bits_left = this.bits_left;
}
public void ENSURE_BITS(byte nbits, ref uint bits_left)
{
while (bits_left < nbits)
{
this.READ_BYTES();
}
}
#region MSB
public void READ_BITS_MSB(out int val, byte nbits, ref uint bit_buffer, ref uint bits_left)
{
this.ENSURE_BITS(nbits, ref bits_left);
val = PEEK_BITS_MSB(nbits, bit_buffer);
REMOVE_BITS_MSB(nbits, ref bit_buffer, ref bits_left);
}
public void READ_MANY_BITS_MSB(out int val, byte bits, ref uint bit_buffer, ref uint bits_left)
{
byte needed = bits;
byte bitrun;
val = 0;
while (needed > 0)
{
if (bits_left < (int)(BITBUF_WIDTH - 16))
this.READ_BYTES();
bitrun = (bits_left < needed) ? (byte)bits_left : needed;
val = (val << bitrun) | PEEK_BITS_MSB(bitrun, bit_buffer);
REMOVE_BITS_MSB(bitrun, ref bit_buffer, ref bits_left);
needed -= bitrun;
}
}
public int PEEK_BITS_MSB(byte nbits, uint bit_buffer)
{
return (int)(bit_buffer >> (BITBUF_WIDTH - nbits));
}
public void REMOVE_BITS_MSB(byte nbits, ref uint bit_buffer, ref uint bits_left)
{
bit_buffer <<= nbits;
bits_left -= nbits;
}
public void INJECT_BITS_MSB(uint bitdata, byte nbits, ref uint bit_buffer, ref uint bits_left)
{
bit_buffer |= (uint)(int)(bitdata << (int)(BITBUF_WIDTH - nbits - bits_left));
bits_left += nbits;
}
#endregion
#region LSB
public void READ_BITS_LSB(out int val, byte nbits, ref uint bit_buffer, ref uint bits_left)
{
this.ENSURE_BITS(nbits, ref bits_left);
val = PEEK_BITS_LSB(nbits, bit_buffer);
REMOVE_BITS_LSB(nbits, ref bit_buffer, ref bits_left);
}
public void READ_MANY_BITS_LSB(out int val, byte bits, ref uint bit_buffer, ref uint bits_left)
{
byte needed = bits;
byte bitrun;
val = 0;
while (needed > 0)
{
if (bits_left < (int)(BITBUF_WIDTH - 16))
this.READ_BYTES();
bitrun = (bits_left < needed) ? (byte)bits_left : needed;
val = (val << bitrun) | PEEK_BITS_LSB(bitrun, bit_buffer);
REMOVE_BITS_LSB(bitrun, ref bit_buffer, ref bits_left);
needed -= bitrun;
}
}
public int PEEK_BITS_LSB(byte nbits, uint bit_buffer)
{
return (int)(bit_buffer & ((uint)(1 << nbits) - 1));
}
public void REMOVE_BITS_LSB(byte nbits, ref uint bit_buffer, ref uint bits_left)
{
bit_buffer >>= nbits;
bits_left -= nbits;
}
public void INJECT_BITS_LSB(uint bitdata, byte nbits, ref uint bit_buffer, ref uint bits_left)
{
bit_buffer |= bitdata << (int)bits_left;
bits_left += nbits;
}
#endregion
#region LSB_T
public int PEEK_BITS_LSB_T(byte nbits, uint bit_buffer)
{
return (int)(bit_buffer & lsb_bit_mask[nbits]);
}
public void READ_BITS_LSB_T(out int val, byte nbits, ref uint bit_buffer, ref uint bits_left)
{
this.ENSURE_BITS(nbits, ref bits_left);
val = PEEK_BITS_LSB_T(nbits, bit_buffer);
REMOVE_BITS_LSB(nbits, ref bit_buffer, ref bits_left);
}
#endregion
public abstract void READ_BYTES();
public MSPACK_ERR READ_IF_NEEDED(ref byte* i_ptr, ref byte* i_end)
{
if (i_ptr >= i_end)
{
if (ReadInput() != MSPACK_ERR.MSPACK_ERR_OK)
return this.error;
i_ptr = this.i_ptr;
i_end = this.i_end;
}
return MSPACK_ERR.MSPACK_ERR_OK;
}
private MSPACK_ERR ReadInput()
{
int read = this.sys.read(this.input, this.inbuf, (int)this.inbuf_size);
if (read < 0) return this.error = MSPACK_ERR.MSPACK_ERR_READ;
/* we might overrun the input stream by asking for bits we don't use,
* so fake 2 more bytes at the end of input */
if (read == 0)
{
if (this.input_end != 0)
{
System.Console.Error.WriteLine("Out of input bytes");
return this.error = MSPACK_ERR.MSPACK_ERR_READ;
}
else
{
read = 2;
this.inbuf[0] = this.inbuf[1] = 0;
this.input_end = 1;
}
}
// Update i_ptr and i_end
this.i_ptr = this.inbuf;
this.i_end = this.inbuf + read;
return MSPACK_ERR.MSPACK_ERR_OK;
}
#endregion
}
}

View File

@@ -1,206 +0,0 @@
namespace SabreTools.Compression.libmspack
{
public unsafe static class readhuff
{
public const int HUFF_MAXBITS = 16;
#region MSB
/// <summary>
/// This function was originally coded by David Tritscher.
/// It builds a fast huffman decoding table from
/// a canonical huffman code lengths table.
/// </summary>
/// <param name="nsyms">Total number of symbols in this huffman tree.</param>
/// <param name="nbits">
/// Any symbols with a code length of nbits or less can be decoded
/// in one lookup of the table.
/// </param>
/// <param name="length">A table to get code lengths from [0 to nsyms-1]</param>
/// <param name="table">
/// The table to fill up with decoded symbols and pointers.
/// Should be ((1<<nbits) + (nsyms*2)) in length.
/// </param>
/// <returns>Returns 0 for OK or 1 for error</returns>
public static int make_decode_table_MSB(uint nsyms, uint nbits, byte* length, ushort* table)
{
ushort sym, next_symbol;
uint leaf, fill;
byte bit_num;
uint pos = 0; // The current position in the decode table
uint table_mask = (uint)(1 << (int)nbits);
uint bit_mask = table_mask >> 1; // Don't do 0 length codes
// Fill entries for codes short enough for a direct mapping
for (bit_num = 1; bit_num <= nbits; bit_num++)
{
for (sym = 0; sym < nsyms; sym++)
{
if (length[sym] != bit_num) continue;
leaf = pos;
if ((pos += bit_mask) > table_mask) return 1; // Table overrun
// Fill all possible lookups of this symbol with the symbol itself
for (fill = bit_mask; fill-- > 0;) table[leaf++] = sym;
}
bit_mask >>= 1;
}
// Exit with success if table is now complete
if (pos == table_mask) return 0;
// Mark all remaining table entries as unused
for (sym = (ushort)pos; sym < table_mask; sym++)
{
table[sym] = 0xFFFF;
}
// next_symbol = base of allocation for long codes
next_symbol = (ushort)(((table_mask >> 1) < nsyms) ? nsyms : (table_mask >> 1));
// Give ourselves room for codes to grow by up to 16 more bits.
// codes now start at bit nbits+16 and end at (nbits+16-codelength)
pos <<= 16;
table_mask <<= 16;
bit_mask = 1 << 15;
for (bit_num = (byte)(nbits + 1); bit_num <= HUFF_MAXBITS; bit_num++)
{
for (sym = 0; sym < nsyms; sym++)
{
if (length[sym] != bit_num) continue;
if (pos >= table_mask) return 1; // Table overflow
leaf = pos >> 16;
for (fill = 0; fill < (bit_num - nbits); fill++)
{
// If this path hasn't been taken yet, 'allocate' two entries
if (table[leaf] == 0xFFFF)
{
table[(next_symbol << 1)] = 0xFFFF;
table[(next_symbol << 1) + 1] = 0xFFFF;
table[leaf] = next_symbol++;
}
// Follow the path and select either left or right for next bit
leaf = (uint)(table[leaf] << 1);
if (((pos >> (int)(15 - fill)) & 1) != 0) leaf++;
}
table[leaf] = sym;
pos += bit_mask;
}
bit_mask >>= 1;
}
// Full table?
return (pos == table_mask) ? 0 : 1;
}
#endregion
#region LSB
/// <summary>
/// This function was originally coded by David Tritscher.
/// It builds a fast huffman decoding table from
/// a canonical huffman code lengths table.
/// </summary>
/// <param name="nsyms">Total number of symbols in this huffman tree.</param>
/// <param name="nbits">
/// Any symbols with a code length of nbits or less can be decoded
/// in one lookup of the table.
/// </param>
/// <param name="length">A table to get code lengths from [0 to nsyms-1]</param>
/// <param name="table">
/// The table to fill up with decoded symbols and pointers.
/// Should be ((1<<nbits) + (nsyms*2)) in length.
/// </param>
/// <returns>Returns 0 for OK or 1 for error</returns>
public static int make_decode_table_LSB(uint nsyms, uint nbits, byte* length, ushort* table)
{
ushort sym, next_symbol;
uint leaf, fill;
uint reverse;
byte bit_num;
uint pos = 0; // The current position in the decode table
uint table_mask = (uint)(1 << (int)nbits);
uint bit_mask = table_mask >> 1; // Don't do 0 length codes
// Fill entries for codes short enough for a direct mapping
for (bit_num = 1; bit_num <= nbits; bit_num++)
{
for (sym = 0; sym < nsyms; sym++)
{
if (length[sym] != bit_num) continue;
// Reverse the significant bits
fill = length[sym]; reverse = pos >> (int)(nbits - fill); leaf = 0;
do { leaf <<= 1; leaf |= reverse & 1; reverse >>= 1; } while (--fill > 0);
if ((pos += bit_mask) > table_mask) return 1; // Table overrun
// Fill all possible lookups of this symbol with the symbol itself
fill = bit_mask; next_symbol = (ushort)(1 << bit_num);
do { table[leaf] = sym; leaf += next_symbol; } while (--fill > 0);
}
bit_mask >>= 1;
}
// Exit with success if table is now complete
if (pos == table_mask) return 0;
// Mark all remaining table entries as unused
for (sym = (ushort)pos; sym < table_mask; sym++)
{
reverse = sym; leaf = 0; fill = nbits;
do { leaf <<= 1; leaf |= reverse & 1; reverse >>= 1; } while (--fill > 0);
table[leaf] = 0xFFFF;
}
// next_symbol = base of allocation for long codes
next_symbol = (ushort)(((table_mask >> 1) < nsyms) ? nsyms : (table_mask >> 1));
// Give ourselves room for codes to grow by up to 16 more bits.
// codes now start at bit nbits+16 and end at (nbits+16-codelength)
pos <<= 16;
table_mask <<= 16;
bit_mask = 1 << 15;
for (bit_num = (byte)(nbits + 1); bit_num <= HUFF_MAXBITS; bit_num++)
{
for (sym = 0; sym < nsyms; sym++)
{
if (length[sym] != bit_num) continue;
if (pos >= table_mask) return 1; // Table overflow
// leaf = the first nbits of the code, reversed
reverse = pos >> 16; leaf = 0; fill = nbits;
do { leaf <<= 1; leaf |= reverse & 1; reverse >>= 1; } while (--fill > 0);
for (fill = 0; fill < (bit_num - nbits); fill++)
{
// If this path hasn't been taken yet, 'allocate' two entries
if (table[leaf] == 0xFFFF)
{
table[(next_symbol << 1)] = 0xFFFF;
table[(next_symbol << 1) + 1] = 0xFFFF;
table[leaf] = next_symbol++;
}
// Follow the path and select either left or right for next bit
leaf = (uint)(table[leaf] << 1);
if (((pos >> (int)(15 - fill)) & 1) != 0) leaf++;
}
table[leaf] = sym;
pos += bit_mask;
}
bit_mask >>= 1;
}
// Full table?
return (pos == table_mask) ? 0 : 1;
}
#endregion
}
}

View File

@@ -1,42 +0,0 @@
/* This file is part of libmspack.
* (C) 2003-2018 Stuart Caie.
*
* libmspack is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License (LGPL) version 2.1
*
* For further details, see the file COPYING.LIB distributed with libmspack
*/
namespace SabreTools.Compression.libmspack
{
public unsafe static class system
{
/// <summary>
/// Returns the length of a file opened for reading
/// </summary>
public static MSPACK_ERR mspack_sys_filelen(mspack_system system, mspack_file file, long* length)
{
if (system == null || file == null || length == null) return MSPACK_ERR.MSPACK_ERR_OPEN;
// Get current offset
long current = system.tell(file);
// Seek to end of file
if (system.seek(file, 0, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_END) != 0)
{
return MSPACK_ERR.MSPACK_ERR_SEEK;
}
// Get offset of end of file
*length = system.tell(file);
// Seek back to original offset
if (system.seek(file, current, MSPACK_SYS_SEEK.MSPACK_SYS_SEEK_START) != 0)
{
return MSPACK_ERR.MSPACK_ERR_SEEK;
}
return MSPACK_ERR.MSPACK_ERR_OK;
}
}
}