ZArchive support (#75)

* ZArchive support

* Fix offset record format

* Simplfiy Extensions

* Delete unused writers and test data

* Rework reader

* Fix build
This commit is contained in:
Deterous
2026-04-02 15:18:47 +09:00
committed by GitHub
parent 4035d9db86
commit 5bb8557555
23 changed files with 1292 additions and 0 deletions

View File

@@ -74,6 +74,7 @@ Options:
| XBox Package File (XZP) | | | XBox Package File (XZP) | |
| Xbox DVD Filesystem (XISO) | | | Xbox DVD Filesystem (XISO) | |
| xz archive (XZ) | .NET Framework 4.6.2 and greater | | xz archive (XZ) | .NET Framework 4.6.2 and greater |
| ZArchive | |
## Namespaces ## Namespaces

View File

@@ -0,0 +1,19 @@
using SabreTools.Data.Models.ZArchive;
using SabreTools.Numerics;
using Xunit;
namespace SabreTools.Data.Extensions.Test
{
public class ZArchiveExtensionsTests
{
[Fact]
public void GetName_Null()
{
var de = new DirectoryEntry();
NameTable nt = new NameTable();
string? expected = null;
string? actual = de.GetName(nt);
Assert.Equal(expected, actual);
}
}
}

View File

@@ -0,0 +1,37 @@
using System;
using System.Text;
using SabreTools.Data.Models.ZArchive;
using SabreTools.Numerics;
namespace SabreTools.Data.Extensions
{
public static class ZArchiveExtensions
{
/// <summary>
/// Retrieves the name of the specified node from the NameTable
/// </summary>
/// <param name="node">Node in the file tree</param>
/// <param name="nameTable">ZArchive NameTable</param>
/// <returns>UTF-8 string representing node's name</returns>
public static string? GetName(this FileDirectoryEntry node, NameTable nameTable)
{
// Check for a valid offset into the NameTable
uint nameOffset = node.NameOffsetAndTypeFlag & Constants.RootNode;
if (nameOffset == Constants.RootNode)
return null;
// Get the index into the name table
var index = Array.IndexOf(nameTable.NameTableOffsets, nameOffset);
if (index < 0)
return null;
// Get the name entry for the requested index
var nameEntry = nameTable.NameEntries[index];
if (nameEntry is null)
return null;
// Decode name to UTF-8
return Encoding.UTF8.GetString(nameEntry.NodeName);
}
}
}

View File

@@ -90,6 +90,7 @@ Below is a list of all existing namespaces with the `SabreTools.Data.Models` pre
| `XDVDFS` | Xbox DVD Filesystem (XISO) | | `XDVDFS` | Xbox DVD Filesystem (XISO) |
| `XZ` | xz archive | | `XZ` | xz archive |
| `XZP` | XBox Package File | | `XZP` | XBox Package File |
| `ZArchive` | ZArchive (ZAR) |
| `ZSTD` | ZSTD archive | | `ZSTD` | ZSTD archive |
## Notable Information Sources ## Notable Information Sources

View File

@@ -0,0 +1,50 @@
namespace SabreTools.Data.Models.ZArchive
{
/// <summary>
/// Represents a single ZAR archive
/// Most fields are Big Endian
/// </summary>
/// <see href="https://github.com/Exzap/ZArchive/"/>
public class Archive
{
/// <summary>
/// Zstd compressed file data, from 65536-byte blocks of the original files
/// Blocks are stored uncompressed if ZStd does not decrease the size
/// Due to the file size, this field is not usually filled in but remains here for completeness
/// </summary>
public byte[]? CompressedData { get; set; }
/// <summary>
/// Padding bytes to be added after compressed blocks to ensure 8-byte alignment
/// Padding bytes are all NULL (0x00)
/// </summary>
public byte[]? Padding { get; set; }
/// <summary>
/// Records containing the offsets and block sizes of each group of blocks
/// This allows the reader to jump to any 65536-byte boundary in the uncompressed stream.
/// </summary>
public OffsetRecord[] OffsetRecords { get; set; } = [];
/// <summary>
/// UTF-8 strings, prepended by string lengths
/// </summary>
public NameTable NameTable { get; set; } = new();
/// <summary>
/// Serialized file tree structure using a queue of nodes
/// </summary>
public FileDirectoryEntry[] FileTree { get; set; } = [];
/// <summary>
/// Section for custom key-value pairs and properties
/// </summary>
public Metadata? Metadata { get; set; }
/// <summary>
/// Archive footer containing the offsets and sizes of all other sections
/// Ends with a SHA256 hash/size of the entire archive, and magic bytes
/// </summary>
public Footer Footer { get; set; } = new();
}
}

View File

@@ -0,0 +1,68 @@
namespace SabreTools.Data.Models.ZArchive
{
/// <see href="https://github.com/Exzap/ZArchive/"/>
public static class Constants
{
/// <summary>
/// Number of compressed blocks referred to by a record
/// </summary>
public const int BlockSize = 64 * 1024;
/// <summary>
/// Number of compressed blocks referred to by a record
/// </summary>
public const int BlocksPerOffsetRecord = 16;
/// <summary>
/// Number of bytes stored in an offset record
/// </summary>
public const int OffsetRecordSize = sizeof(ulong) + sizeof(ushort) * BlocksPerOffsetRecord;
/// <summary>
/// Number of bytes stored in a file/directory entry
/// </summary>
public const int FileDirectoryEntrySize = 16;
/// <summary>
/// Number of bytes stored in the footer
/// 6 OffsetInfo fields,
/// </summary>
public const int FooterSize = 144;
/// <summary>
/// NameOffsetAndTypeFlag value for the root node in the FileTree
/// </summary>
public const uint RootNode = 0x7FFFFFFF;
/// <summary>
/// Mask for the NameOffsetAndTypeFlag value when checking if it is a file
/// </summary>
public const uint FileFlag = 0x80000000;
/// <summary>
/// Maximum size of the Offset Records section
/// </summary>
public const ulong MaxOffsetRecordsSize = 0xFFFFFFFF;
/// <summary>
/// Maximum size of the Offset Records section
/// </summary>
public const ulong MaxNameTableSize = 0x7FFFFFFF;
/// <summary>
/// Maximum size of the File Tree section
/// </summary>
public const ulong MaxFileTreeSize = 0x7FFFFFFF;
/// <summary>
/// ZArchive magic bytes at end of file
/// </summary>
public static readonly byte[] MagicBytes = [0x16, 0x9F, 0x52, 0xD6];
/// <summary>
/// ZArchive version field that acts as an extended magic immediately before final 4 magic bytes
/// Currently only version 1 is implemented, any future version bytes are not suppported yet
/// </summary>
public static readonly byte[] Version1Bytes = [0x61, 0xBF, 0x3A, 0x01];
}
}

View File

@@ -0,0 +1,26 @@
namespace SabreTools.Data.Models.ZArchive
{
/// <summary>
/// Node in the FileTree representing a directory
/// </summary>
/// <see href="https://github.com/Exzap/ZArchive/"/>
public sealed class DirectoryEntry : FileDirectoryEntry
{
/// <summary>
/// Starting index of the directory node
/// </summary>
/// <remarks>Big-endian</remarks>
public uint NodeStartIndex { get; set; }
/// <summary>
/// Number of
/// </summary>
/// <remarks>Big-endian</remarks>
public uint Count { get; set; }
/// <summary>
/// Reserved field
/// </summary>
public uint Reserved { get; set; }
}
}

View File

@@ -0,0 +1,17 @@
namespace SabreTools.Data.Models.ZArchive
{
/// <summary>
/// Node in the FileTree
/// Represents either a file or a directory
/// </summary>
/// <see href="https://github.com/Exzap/ZArchive/"/>
public abstract class FileDirectoryEntry
{
/// <summary>
/// MSB is the type flag, 0 is Directory, 1 is File
/// Remaining 31 bits are the offset in the NameTable
/// </summary>
/// <remarks>Big-endian</remarks>
public uint NameOffsetAndTypeFlag { get; set; }
}
}

View File

@@ -0,0 +1,33 @@
namespace SabreTools.Data.Models.ZArchive
{
/// <summary>
/// Node in the FileTree representing a file
/// </summary>
/// <see href="https://github.com/Exzap/ZArchive/"/>
public sealed class FileEntry : FileDirectoryEntry
{
/// <summary>
/// Lowest 8 bits of the file's offset
/// </summary>
/// <remarks>Big-endian</remarks>
public uint FileOffsetLow { get; set; }
/// <summary>
/// Lowest 8 bits of the file's size
/// </summary>
/// <remarks>Big-endian</remarks>
public uint FileSizeLow { get; set; }
/// <summary>
/// Highest 4 bits of the file's size
/// </summary>
/// <remarks>Big-endian</remarks>
public ushort FileSizeHigh { get; set; }
/// <summary>
/// Highest 4 bits of the file's offset
/// </summary>
/// <remarks>Big-endian</remarks>
public ushort FileOffsetHigh { get; set; }
}
}

View File

@@ -0,0 +1,60 @@
namespace SabreTools.Data.Models.ZArchive
{
/// <summary>
/// Footer data stored at the end of a ZArchive file
/// </summary>
/// <see href="https://github.com/Exzap/ZArchive/"/>
public class Footer
{
/// <summary>
/// Size and offset values for the CompressedData section
/// </summary>
public OffsetInfo SectionCompressedData { get; set; } = new();
/// <summary>
/// Size and offset values for the OffsetRecords section
/// </summary>
public OffsetInfo SectionOffsetRecords { get; set; } = new();
/// <summary>
/// Size and offset values for the NameTable section
/// </summary>
public OffsetInfo SectionNameTable { get; set; } = new();
/// <summary>
/// Size and offset values for the FileFree section
/// </summary>
public OffsetInfo SectionFileTree { get; set; } = new();
/// <summary>
/// Size and offset values for the MetaDirectory section
/// </summary>
public OffsetInfo SectionMetaDirectory { get; set; } = new();
/// <summary>
/// Size and offset values for the MetaData section
/// </summary>
public OffsetInfo SectionMetaData { get; set; } = new();
/// <summary>
/// SHA-256 hash of the ZArchive file prior the footer
/// </summary>
public byte[] IntegrityHash { get; set; } = new byte[32];
/// <summary>
/// Size of the entire ZArchive file
/// </summary>
/// <remarks>Big-endian</remarks>
public ulong Size { get; set; }
/// <summary>
/// Version indicator, also acts as extended magic
/// </summary>
public byte[] Version { get; set; } = new byte[4];
/// <summary>
/// Magic bytes to indicate ZArchive file
/// </summary>
public byte[] Magic { get; set; } = new byte[4];
}
}

View File

@@ -0,0 +1,11 @@
namespace SabreTools.Data.Models.ZArchive
{
/// <summary>
/// ZARchive section for Meta Data and Meta Directories
/// </summary>
/// <see href="https://github.com/Exzap/ZArchive/"/>
public class Metadata
{
// Not yet implemented in the ZArchive standard, should be empty
}
}

View File

@@ -0,0 +1,27 @@
namespace SabreTools.Data.Models.ZArchive
{
/// <summary>
/// Filename entry in the NameTable
/// </summary>
/// <see href="https://github.com/Exzap/ZArchive/"/>
public class NameEntry
{
/// <summary>
/// Filename length, with MSB set to 0 for filenames less than 127 long
/// NodeLengthShort and NodeLengthLong fields are exclusive, and one must be present
/// </summary>
public byte? NodeLengthShort { get; set; }
/// <summary>
/// Filename length, with prefix byte's MSB set to 1 for filenames greater than 127 long
/// NodeLengthShort and NodeLengthLong fields are exclusive, and one must be present
/// </summary>
public ushort? NodeLengthLong { get; set; }
/// <summary>
/// UTF-8 encoded file name
/// </summary>
/// <remarks>Maximum length of 2^15 - 1 bytes</remarks>
public byte[] NodeName { get; set; } = [];
}
}

View File

@@ -0,0 +1,20 @@
namespace SabreTools.Data.Models.ZArchive
{
/// <summary>
/// UTF-8 strings, prepended by string lengths
/// </summary>
/// <see href="https://github.com/Exzap/ZArchive/"/>
public class NameTable
{
/// <summary>
/// List of filename entries
/// </summary>
public NameEntry[] NameEntries { get; set; } = [];
/// <summary>
/// Virtual field, to cache the offsets of each name entry in the name table
/// Used for referencing the name entry from an offset into the name table
/// </summary>
public uint[] NameTableOffsets { get; set; } = [];
}
}

View File

@@ -0,0 +1,19 @@
namespace SabreTools.Data.Models.ZArchive
{
/// <summary>
/// Offset and size values of a ZArchive section, stored in the Footer
/// </summary>
/// <see href="https://github.com/Exzap/ZArchive/"/>
public class OffsetInfo
{
/// <summary>
/// Base offset value for the section in bytes
/// </summary>
public ulong Offset { get; set; }
/// <summary>
/// Total size of the section in bytes
/// </summary>
public ulong Size { get; set; }
}
}

View File

@@ -0,0 +1,19 @@
namespace SabreTools.Data.Models.ZArchive
{
/// <summary>
/// Location and size properties of compressed blocks of the file data
/// </summary>
/// <see href="https://github.com/Exzap/ZArchive/"/>
public class OffsetRecord
{
/// <summary>
/// Base offset of compressed blocks
/// </summary>
public ulong Offset { get; set; }
/// <summary>
/// Sizes of each compressed block in this record
/// </summary>
public ushort[] Size { get; set; } = new ushort[Constants.BlocksPerOffsetRecord];
}
}

View File

@@ -0,0 +1,72 @@
using System.IO;
using System.Linq;
using Xunit;
namespace SabreTools.Serialization.Readers.Test
{
public class ZArchiveTests
{
[Fact]
public void NullArray_Null()
{
byte[]? data = null;
int offset = 0;
var deserializer = new ZArchive();
var actual = deserializer.Deserialize(data, offset);
Assert.Null(actual);
}
[Fact]
public void EmptyArray_Null()
{
byte[]? data = [];
int offset = 0;
var deserializer = new ZArchive();
var actual = deserializer.Deserialize(data, offset);
Assert.Null(actual);
}
[Fact]
public void InvalidArray_Null()
{
byte[]? data = [.. Enumerable.Repeat<byte>(0xFF, 1024)];
int offset = 0;
var deserializer = new ZArchive();
var actual = deserializer.Deserialize(data, offset);
Assert.Null(actual);
}
[Fact]
public void NullStream_Null()
{
Stream? data = null;
var deserializer = new ZArchive();
var actual = deserializer.Deserialize(data);
Assert.Null(actual);
}
[Fact]
public void EmptyStream_Null()
{
Stream? data = new MemoryStream([]);
var deserializer = new ZArchive();
var actual = deserializer.Deserialize(data);
Assert.Null(actual);
}
[Fact]
public void InvalidStream_Null()
{
Stream? data = new MemoryStream([.. Enumerable.Repeat<byte>(0xFF, 1024)]);
var deserializer = new ZArchive();
var actual = deserializer.Deserialize(data);
Assert.Null(actual);
}
}
}

View File

@@ -0,0 +1,296 @@
using System.Collections.Generic;
using System.IO;
using SabreTools.Data.Extensions;
using SabreTools.Data.Models.ZArchive;
using SabreTools.Hashing;
using SabreTools.IO.Extensions;
using SabreTools.Matching;
using SabreTools.Numerics.Extensions;
#pragma warning disable IDE0017 // Simplify object initialization
namespace SabreTools.Serialization.Readers
{
public class ZArchive : BaseBinaryReader<Archive>
{
/// <inheritdoc/>
public override Archive? Deserialize(Stream? data)
{
// If the data is invalid
if (data is null || !data.CanRead)
return null;
// Simple check for a valid stream length
if (data.Length - data.Position < Constants.FooterSize)
return null;
try
{
// Cache the current offset
long initialOffset = data.Position;
var archive = new Archive();
// Parse the footer first
data.SeekIfPossible(-Constants.FooterSize, SeekOrigin.End);
var footer = ParseFooter(data, initialOffset);
if (footer is null)
return null;
archive.Footer = footer;
// Check offset records offset validity
long offsetRecordsOffset = initialOffset + (long)archive.Footer.SectionOffsetRecords.Offset;
if (offsetRecordsOffset < 0 || offsetRecordsOffset + (long)archive.Footer.SectionOffsetRecords.Size >= data.Length)
return null;
// Seek to and then read the compression offset records
data.SeekIfPossible(offsetRecordsOffset, SeekOrigin.Begin);
var offsetRecords = ParseOffsetRecords(data, archive.Footer.SectionOffsetRecords.Size);
if (offsetRecords is null)
return null;
archive.OffsetRecords = offsetRecords;
// Check name table section validity
long nameTableOffset = initialOffset + (long)archive.Footer.SectionNameTable.Offset;
if (nameTableOffset < 0 || nameTableOffset + (long)archive.Footer.SectionNameTable.Size >= data.Length)
return null;
// Seek to and then read the name table entries
data.SeekIfPossible((long)nameTableOffset, SeekOrigin.Begin);
var nameTable = ParseNameTable(data, archive.Footer.SectionNameTable.Size);
if (nameTable is null)
return null;
archive.NameTable = nameTable;
// Check name table section validity
long fileTreeOffset = initialOffset + (long)archive.Footer.SectionFileTree.Offset;
if (fileTreeOffset < 0 || fileTreeOffset + (long)archive.Footer.SectionFileTree.Size >= data.Length)
return null;
// Seek to and then read the file tree entries
data.SeekIfPossible((long)fileTreeOffset, SeekOrigin.Begin);
var fileTree = ParseFileTree(data, archive.Footer.SectionFileTree.Size, archive.Footer.SectionNameTable.Size);
if (fileTree is null)
return null;
archive.FileTree = fileTree;
// Do not attempt to read compressed data into memory
return archive;
}
catch
{
// Ignore the actual error
return null;
}
}
/// <summary>
/// Parse a Stream into an ZArchive footer
/// </summary>
/// <param name="data">Stream to parse</param>
/// <returns>Filled ZArchive footer on success, null on error</returns>
public static Footer? ParseFooter(Stream data, long initialOffset)
{
var obj = new Footer();
// Read and validate compressed data section offset and size values
obj.SectionCompressedData.Offset = data.ReadUInt64BigEndian();
obj.SectionCompressedData.Size = data.ReadUInt64BigEndian();
if (obj.SectionCompressedData.Offset + obj.SectionCompressedData.Size > (ulong)data.Length)
return null;
// Read and validate offset records section offset and size values
obj.SectionOffsetRecords.Offset = data.ReadUInt64BigEndian();
obj.SectionOffsetRecords.Size = data.ReadUInt64BigEndian();
if (obj.SectionOffsetRecords.Offset + obj.SectionOffsetRecords.Size > (ulong)data.Length)
return null;
if (obj.SectionOffsetRecords.Size > Constants.MaxOffsetRecordsSize)
return null;
if (obj.SectionOffsetRecords.Size % Constants.OffsetRecordSize != 0)
return null;
// Read and validate name table section offset and size values
obj.SectionNameTable.Offset = data.ReadUInt64BigEndian();
obj.SectionNameTable.Size = data.ReadUInt64BigEndian();
if (obj.SectionNameTable.Offset + obj.SectionNameTable.Size > (ulong)data.Length)
return null;
if (obj.SectionNameTable.Size > Constants.MaxNameTableSize)
return null;
// Read and validate file tree section offset and size values
obj.SectionFileTree.Offset = data.ReadUInt64BigEndian();
obj.SectionFileTree.Size = data.ReadUInt64BigEndian();
if (obj.SectionFileTree.Offset + obj.SectionFileTree.Size > (ulong)data.Length)
return null;
if (obj.SectionFileTree.Size > Constants.MaxFileTreeSize)
return null;
if (obj.SectionFileTree.Size % Constants.FileDirectoryEntrySize != 0)
return null;
// Read and validate metadirectory section offset and size values
obj.SectionMetaDirectory.Offset = data.ReadUInt64BigEndian();
obj.SectionMetaDirectory.Size = data.ReadUInt64BigEndian();
if (obj.SectionMetaDirectory.Offset + obj.SectionMetaDirectory.Size > (ulong)data.Length)
return null;
// Read and validate metadata section offset and size values
obj.SectionMetaData.Offset = data.ReadUInt64BigEndian();
obj.SectionMetaData.Size = data.ReadUInt64BigEndian();
if (obj.SectionMetaData.Offset + obj.SectionMetaData.Size > (ulong)data.Length)
return null;
// Read and validate archive integrity hash
obj.IntegrityHash = data.ReadBytes(32);
// data.SeekIfPossible(initialOffset, SeekOrigin.Begin);
// TODO: Read all bytes and hash them with SHA256
// TODO: Compare obj.Integrity with calculated hash
// Read and validate archive size
obj.Size = data.ReadUInt64BigEndian();
if (obj.Size != (ulong)(data.Length - initialOffset))
return null;
// Read and validate version bytes, only Version 1 is supported
obj.Version = data.ReadBytes(4);
if (!obj.Version.EqualsExactly(Constants.Version1Bytes))
return null;
// Read and validate magic bytes
obj.Magic = data.ReadBytes(4);
if (!obj.Magic.EqualsExactly(Constants.MagicBytes))
return null;
return obj;
}
/// <summary>
/// Parse a Stream into an ZArchive OffsetRecords section
/// </summary>
/// <param name="data">Stream to parse</param>
/// <param name="size">Size of OffsetRecords section</param>
/// <returns>Filled ZArchive OffsetRecords section on success, null on error</returns>
public static OffsetRecord[]? ParseOffsetRecords(Stream data, ulong size)
{
int entries = (int)(size / Constants.OffsetRecordSize);
var obj = new OffsetRecord[entries];
for (int i = 0; i < entries; i++)
{
var offset = data.ReadUInt64BigEndian();
obj[i] = new OffsetRecord();
obj[i].Offset = offset;
for (int block = 0; block < Constants.BlocksPerOffsetRecord; block++)
{
obj[i].Size[block] = data.ReadUInt16BigEndian();
}
}
return obj;
}
/// <summary>
/// Parse a Stream into an ZArchive NameTable section
/// </summary>
/// <param name="data">Stream to parse</param>
/// <param name="size">Size of NameTable section</param>
/// <returns>Filled ZArchive NameTable section on success, null on error</returns>
public static NameTable? ParseNameTable(Stream data, ulong size)
{
var obj = new NameTable();
var nameEntries = new List<NameEntry>();
var nameOffsets = new List<uint>();
uint bytesRead = 0;
while (bytesRead < (uint)size)
{
var nameEntry = new NameEntry();
// Cache the offset into the NameEntry table
nameOffsets.Add(bytesRead);
// Read length of name
uint nameLength = (uint)data.ReadByteValue();
bytesRead += 1;
if ((nameLength & 0x80) == 0x80)
{
nameLength += (uint)data.ReadByteValue() << 7;
bytesRead += 1;
nameEntry.NodeLengthLong = (ushort)nameLength;
}
else
{
nameEntry.NodeLengthShort = (byte)nameLength;
}
// Validate name length
if (bytesRead + nameLength > (uint)size)
return null;
// Add valid name entry to the table
nameEntry.NodeName = data.ReadBytes((int)nameLength);
bytesRead += nameLength;
nameEntries.Add(nameEntry);
}
obj.NameEntries = [..nameEntries];
obj.NameTableOffsets = [..nameOffsets];
return obj;
}
/// <summary>
/// Parse a Stream into an ZArchive FileTree section
/// </summary>
/// <param name="data">Stream to parse</param>
/// <param name="size">Size of FileTree section</param>
/// <returns>Filled ZArchive FileTree section on success, null on error</returns>
public static FileDirectoryEntry[]? ParseFileTree(Stream data, ulong size, ulong nameTableSize)
{
int entries = (int)(size / Constants.FileDirectoryEntrySize);
var obj = new FileDirectoryEntry[entries];
for (int i = 0; i < entries; i++)
{
var nameOffsetAndFlag = data.ReadUInt32BigEndian();
// Validate name table offset value
if ((nameOffsetAndFlag & Constants.RootNode) > nameTableSize && nameOffsetAndFlag != Constants.RootNode)
return null;
// Check if node is file or directory
if ((nameOffsetAndFlag & Constants.FileFlag) == Constants.FileFlag)
{
var fileEntry = new FileEntry();
fileEntry.NameOffsetAndTypeFlag = nameOffsetAndFlag;
fileEntry.FileOffsetLow = data.ReadUInt32BigEndian();
fileEntry.FileSizeLow = data.ReadUInt32BigEndian();
fileEntry.FileSizeHigh = data.ReadUInt16BigEndian();
fileEntry.FileOffsetHigh = data.ReadUInt16BigEndian();
obj[i] = fileEntry;
}
else
{
var directoryEntry = new DirectoryEntry();
directoryEntry.NameOffsetAndTypeFlag = nameOffsetAndFlag;
directoryEntry.NodeStartIndex = data.ReadUInt32BigEndian();
directoryEntry.Count = data.ReadUInt32BigEndian();
directoryEntry.Reserved = data.ReadUInt32BigEndian();
obj[i] = directoryEntry;
}
}
// First entry of file tree must be root directory
if ((obj[0].NameOffsetAndTypeFlag & Constants.RootNode) != Constants.RootNode)
return null;
return obj;
}
}
}

View File

@@ -0,0 +1,62 @@
using System;
using System.IO;
using System.Linq;
using Xunit;
#pragma warning disable xUnit1004 // Test methods should not be skipped
namespace SabreTools.Wrappers.Test
{
public class ZArchiveTests
{
[Fact]
public void NullArray_Null()
{
byte[]? data = null;
int offset = 0;
var actual = ZArchive.Create(data, offset);
Assert.Null(actual);
}
[Fact]
public void EmptyArray_Null()
{
byte[]? data = [];
int offset = 0;
var actual = ZArchive.Create(data, offset);
Assert.Null(actual);
}
[Fact(Skip = "This will never pass with the current code")]
public void InvalidArray_Null()
{
byte[]? data = [.. Enumerable.Repeat<byte>(0xFF, 1024)];
int offset = 0;
var actual = ZArchive.Create(data, offset);
Assert.Null(actual);
}
[Fact]
public void NullStream_Null()
{
Stream? data = null;
var actual = ZArchive.Create(data);
Assert.Null(actual);
}
[Fact(Skip = "This will never pass with the current code")]
public void EmptyStream_Null()
{
Stream? data = new MemoryStream([]);
var actual = ZArchive.Create(data);
Assert.Null(actual);
}
[Fact(Skip = "This will never pass with the current code")]
public void InvalidStream_Null()
{
Stream? data = new MemoryStream([.. Enumerable.Repeat<byte>(0xFF, 1024)]);
var actual = ZArchive.Create(data);
Assert.Null(actual);
}
}
}

View File

@@ -71,6 +71,7 @@ namespace SabreTools.Wrappers
WrapperType.XDVDFS => XDVDFS.Create(data), WrapperType.XDVDFS => XDVDFS.Create(data),
WrapperType.XZ => XZ.Create(data), WrapperType.XZ => XZ.Create(data),
WrapperType.XZP => XZP.Create(data), WrapperType.XZP => XZP.Create(data),
WrapperType.ZArchive => ZArchive.Create(data),
WrapperType.ZSTD => ZSTD.Create(data), WrapperType.ZSTD => ZSTD.Create(data),
// Unimplemented // Unimplemented
@@ -960,6 +961,15 @@ namespace SabreTools.Wrappers
#region ZSTD #region ZSTD
// ZArchive magic is the final 4 bytes of the file: [0x16, 0x9F, 0x52, 0xD6]
if (extension.Equals("zar", StringComparison.OrdinalIgnoreCase))
return WrapperType.ZArchive;
#endregion
#region ZSTD
if (magic.StartsWith([null, 0xB5, 0x2F, 0xFD])) if (magic.StartsWith([null, 0xB5, 0x2F, 0xFD]))
return WrapperType.ZSTD; return WrapperType.ZSTD;

View File

@@ -303,6 +303,11 @@ namespace SabreTools.Wrappers
/// </summary> /// </summary>
XZP, XZP,
/// <summary>
/// ZArchive archive
/// </summary>
ZArchive,
/// <summary> /// <summary>
/// ZStandard compressed file /// ZStandard compressed file
/// </summary> /// </summary>

View File

@@ -0,0 +1,189 @@
using System;
#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
using System.Collections.Generic;
using System.IO;
using System.Text;
using SabreTools.Data.Extensions;
using SabreTools.Data.Models.ZArchive;
using SabreTools.IO.Extensions;
using SabreTools.Numerics.Extensions;
using SharpCompress.Compressors.ZStandard;
#endif
namespace SabreTools.Wrappers
{
public partial class ZArchive : IExtractable
{
/// <inheritdoc/>
public bool Extract(string outputDirectory, bool includeDebug)
{
if (_dataSource is null || !_dataSource.CanRead)
return false;
#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
try
{
// Extract all files and directories from root (index 0)
return ExtractDirectory(outputDirectory, includeDebug, 0);
}
catch (Exception ex)
{
if (includeDebug) Console.Error.WriteLine(ex);
return false;
}
#else
Console.WriteLine("Extraction is not supported for this framework!");
Console.WriteLine();
return false;
#endif
}
#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
/// <inheritdoc/>
public bool ExtractDirectory(string outputDirectory, bool includeDebug, uint index)
{
bool success = true;
// Create directory
if (!string.IsNullOrEmpty(outputDirectory) && !Directory.Exists(outputDirectory))
Directory.CreateDirectory(outputDirectory);
// Extract all children of current node
FileDirectoryEntry node = FileTree[index];
if (node is DirectoryEntry dir)
{
for (uint i = 0; i < dir.Count; i++)
{
uint childIndex = dir.NodeStartIndex + i;
var child = FileTree[childIndex];
string? name = child.GetName(NameTable);
if (string.IsNullOrEmpty(name))
{
if (includeDebug) Console.WriteLine("Invalid node name");
return false;
}
string outputPath = Path.Combine(outputDirectory, name);
if ((child.NameOffsetAndTypeFlag & Constants.FileFlag) == 0)
success |= ExtractDirectory(outputPath, includeDebug, childIndex);
else
success |= ExtractFile(outputPath, includeDebug, childIndex);
}
return success;
}
else
{
if (includeDebug) Console.WriteLine("Invalid directory node");
return false;
}
}
/// <inheritdoc/>
public bool ExtractFile(string outputPath, bool includeDebug, uint index)
{
// Decompress each chunk to output
var node = FileTree[index];
var dataOffset = Footer.SectionCompressedData.Offset;
var dataLength = Footer.SectionCompressedData.Size;
if (node is FileEntry file)
{
ulong fileOffset = ((ulong)file.FileOffsetHigh << 32) | (ulong)file.FileOffsetLow;
ulong fileSize = ((ulong)file.FileSizeHigh << 32) | (ulong)file.FileSizeLow;
// Write the output file
if (includeDebug) Console.WriteLine($"Extracting: {outputPath}");
using var fs = File.Open(outputPath, FileMode.Create, FileAccess.Write, FileShare.ReadWrite);
ulong fileProgress = 0;
lock (_dataSourceLock)
{
while (fileProgress < fileSize)
{
// Determine offset and size of next read
ulong absoluteOffset = fileOffset + fileProgress;
ulong blockIndex = absoluteOffset / (ulong)Constants.BlockSize;
int recordIndex = (int)(blockIndex / (ulong)Constants.BlocksPerOffsetRecord);
if (recordIndex >= OffsetRecords.Length)
{
if (includeDebug) Console.WriteLine($"File offset out of range: {outputPath}");
return false;
}
var offsetRecord = OffsetRecords[recordIndex];
int withinRecordIndex = (int)(blockIndex % (ulong)Constants.BlocksPerOffsetRecord);
if (withinRecordIndex >= offsetRecord.Size.Length)
{
if (includeDebug) Console.WriteLine($"Blocks per record mismatch: {outputPath}");
return false;
}
int intraBlockOffset = (int)(absoluteOffset % (ulong)Constants.BlockSize);
int bytesToRead = (int)(offsetRecord.Size[withinRecordIndex]) + 1;
int bytesToWrite = (int)Math.Min(fileSize - fileProgress, (ulong)Constants.BlockSize - (ulong)intraBlockOffset);
ulong readOffset = dataOffset + offsetRecord.Offset;
for (int i = 0; i < withinRecordIndex; i++)
{
readOffset += (ulong)offsetRecord.Size[i] + 1;
}
// Seek to location of block
_dataSource.SeekIfPossible((long)readOffset, SeekOrigin.Begin);
// Ensure block doesn't exceed compressed section
if (offsetRecord.Offset + (ulong)bytesToRead > dataLength)
{
if (includeDebug) Console.WriteLine("Block exceeds compressed data section");
return false;
}
// Ensure reader won't EOF
if (bytesToRead > _dataSource.Length - _dataSource.Position)
{
if (includeDebug) Console.WriteLine($"File out of bounds: {outputPath}");
return false;
}
// Read block
var buffer = _dataSource.ReadBytes(bytesToRead);
// Write entire block if it is uncompressed
if (bytesToRead == Constants.BlockSize)
{
// Block is stored uncompressed
fs.Write(buffer, intraBlockOffset, bytesToWrite);
fileProgress += (ulong)bytesToWrite;
continue;
}
// Decompress block
byte[] decompressedBuffer;
using var inputStream = new MemoryStream(buffer);
using var zstdStream = new ZStandardStream(inputStream);
using var outputStream = new MemoryStream();
zstdStream.CopyTo(outputStream);
decompressedBuffer = outputStream.ToArray();
if (decompressedBuffer.Length != Constants.BlockSize)
{
if (includeDebug) Console.WriteLine($"Invalid decompressed block size {decompressedBuffer.Length}");
return false;
}
// Write decompressed block to output file
fs.Write(decompressedBuffer, intraBlockOffset, bytesToWrite);
fileProgress += (ulong)bytesToWrite;
}
}
return true;
}
else
{
if (includeDebug) Console.WriteLine("Invalid file node");
return false;
}
}
#endif
}
}

View File

@@ -0,0 +1,144 @@
using System.Text;
using SabreTools.Data.Models.ZArchive;
using SabreTools.Text.Extensions;
namespace SabreTools.Wrappers
{
public partial class ZArchive : IPrintable
{
#if NETCOREAPP
/// <inheritdoc/>
public string ExportJSON() => System.Text.Json.JsonSerializer.Serialize(Model, _jsonSerializerOptions);
#endif
/// <inheritdoc/>
public void PrintInformation(StringBuilder builder)
{
builder.AppendLine("ZArchive Information:");
builder.AppendLine("-------------------------");
builder.AppendLine();
Print(builder, OffsetRecords);
Print(builder, NameTable);
Print(builder, FileTree);
Print(builder, Footer);
}
public void Print(StringBuilder builder, OffsetRecord[] records)
{
builder.AppendLine(" Compression Offset Records:");
builder.AppendLine(" -------------------------");
builder.AppendLine();
if (records.Length == 0)
{
builder.AppendLine(" No compression offset records");
builder.AppendLine();
return;
}
for (int i = 0; i < records.Length; i++)
{
var record = records[i];
builder.AppendLine(record.Offset, " Base Offset");
builder.AppendLine(record.Size, " Block Sizes");
builder.AppendLine();
}
}
public void Print(StringBuilder builder, NameTable nameTable)
{
builder.AppendLine(" Name Table:");
builder.AppendLine(" -------------------------");
builder.AppendLine();
if (nameTable.NameEntries.Length != nameTable.NameTableOffsets.Length)
{
builder.AppendLine(" Mismatched Name Table entry count");
builder.AppendLine();
return;
}
for (int i = 0; i < nameTable.NameEntries.Length; i++)
{
var entry = nameTable.NameEntries[i];
builder.AppendLine(nameTable.NameTableOffsets[i], " Name Table Offset");
if (entry.NodeLengthShort is not null)
builder.AppendLine(entry.NodeLengthShort, " Name Length");
else if (entry.NodeLengthLong is not null)
builder.AppendLine(entry.NodeLengthLong, " Name Length");
builder.AppendLine(Encoding.UTF8.GetString(entry.NodeName), " Name");
builder.AppendLine();
}
}
public void Print(StringBuilder builder, FileDirectoryEntry[] fileTree)
{
builder.AppendLine(" File Tree:");
builder.AppendLine(" -------------------------");
builder.AppendLine();
if (fileTree.Length == 0)
{
builder.AppendLine(" No nodes in file tree");
builder.AppendLine();
return;
}
for (int i = 0; i < fileTree.Length; i++)
{
var node = fileTree[i];
builder.AppendLine(node.NameOffsetAndTypeFlag, " Base Offset");
bool fileFlag = (node.NameOffsetAndTypeFlag & Constants.FileFlag) == Constants.FileFlag;
builder.AppendLine(fileFlag, " File Flag");
builder.AppendLine(node.NameOffsetAndTypeFlag & Constants.RootNode, " Name Table Offset");
if (node is FileEntry fe)
{
var fileOffset = ((ulong)fe.FileOffsetHigh << 32) | (ulong)fe.FileOffsetLow;
builder.AppendLine(fileOffset, " File Offset");
var fileSize = ((ulong)fe.FileSizeHigh << 32) | (ulong)fe.FileSizeLow;
builder.AppendLine(fileSize, " File Size");
}
else if (node is DirectoryEntry de)
{
builder.AppendLine(de.NodeStartIndex, " Node Start Index");
builder.AppendLine(de.Count, " Count");
builder.AppendLine(de.Reserved, " Reserved");
}
else
{
builder.AppendLine(" Unknown Node");
}
builder.AppendLine();
}
}
public void Print(StringBuilder builder, Footer footer)
{
builder.AppendLine(" Footer:");
builder.AppendLine(" -------------------------");
builder.AppendLine();
builder.AppendLine(footer.SectionCompressedData.Offset, " Compressed Data Base Offset");
builder.AppendLine(footer.SectionCompressedData.Size, " Compressed Data Length");
builder.AppendLine(footer.SectionOffsetRecords.Offset, " Compression Offset Records Base Offset");
builder.AppendLine(footer.SectionOffsetRecords.Size, " Compression Offset Records Length");
builder.AppendLine(footer.SectionNameTable.Offset, " Name Table Base Offset");
builder.AppendLine(footer.SectionNameTable.Size, " Name Table Length");
builder.AppendLine(footer.SectionFileTree.Offset, " File Tree Base Offset");
builder.AppendLine(footer.SectionFileTree.Size, " File Tree Length");
builder.AppendLine(footer.SectionMetaDirectory.Offset, " Meta Directory Base Offset");
builder.AppendLine(footer.SectionMetaDirectory.Size, " Meta Directory Length");
builder.AppendLine(footer.SectionMetaData.Offset, " Meta Data Base Offset");
builder.AppendLine(footer.SectionMetaData.Size, " Meta Data Length");
builder.AppendLine(footer.IntegrityHash, " Integrity Hash");
builder.AppendLine(footer.Size, " Size");
builder.AppendLine(footer.Version, " Version");
builder.AppendLine(footer.Magic, " Magic");
builder.AppendLine();
}
}
}

View File

@@ -0,0 +1,106 @@
using System.IO;
using SabreTools.Data.Models.ZArchive;
namespace SabreTools.Wrappers
{
public partial class ZArchive : WrapperBase<Archive>
{
#region Descriptive Properties
/// <inheritdoc/>
public override string DescriptionString => "ZArchive";
#endregion
#region Extension Properties
/// <inheritdoc cref="Archive.OffsetRecords"/>
public OffsetRecord[] OffsetRecords => Model.OffsetRecords;
/// <inheritdoc cref="Archive.NameTable"/>
public NameTable NameTable => Model.NameTable;
/// <inheritdoc cref="Archive.FileTree"/>
public FileDirectoryEntry[] FileTree => Model.FileTree;
/// <inheritdoc cref="Archive.Footer"/>
public Footer Footer => Model.Footer;
#endregion
#region Constructors
/// <inheritdoc/>
public ZArchive(Archive model, byte[] data) : base(model, data) { }
/// <inheritdoc/>
public ZArchive(Archive model, byte[] data, int offset) : base(model, data, offset) { }
/// <inheritdoc/>
public ZArchive(Archive model, byte[] data, int offset, int length) : base(model, data, offset, length) { }
/// <inheritdoc/>
public ZArchive(Archive model, Stream data) : base(model, data) { }
/// <inheritdoc/>
public ZArchive(Archive model, Stream data, long offset) : base(model, data, offset) { }
/// <inheritdoc/>
public ZArchive(Archive model, Stream data, long offset, long length) : base(model, data, offset, length) { }
#endregion
#region Static Constructors
/// <summary>
/// Create a ZArchive from a byte array and offset
/// </summary>
/// <param name="data">Byte array representing the archive</param>
/// <param name="offset">Offset within the array to parse</param>
/// <returns>A ZArchive wrapper on success, null on failure</returns>
public static ZArchive? Create(byte[]? data, int offset)
{
// If the data is invalid
if (data is null || data.Length == 0)
return null;
// If the offset is out of bounds
if (offset < 0 || offset >= data.Length)
return null;
// Create a memory stream and use that
var dataStream = new MemoryStream(data, offset, data.Length - offset);
return Create(dataStream);
}
/// <summary>
/// Create a ZArchive from a Stream
/// </summary>
/// <param name="data">Stream representing the archive</param>
/// <returns>A ZArchive wrapper on success, null on failure</returns>
public static ZArchive? Create(Stream? data)
{
// If the data is invalid
if (data is null || !data.CanRead)
return null;
try
{
// Cache the current offset
long currentOffset = data.Position;
var model = new Serialization.Readers.ZArchive().Deserialize(data);
if (model is null)
return null;
return new ZArchive(model, data, currentOffset);
}
catch
{
return null;
}
}
#endregion
}
}