From 5bb855755505d2f7a606b04ab6633b6d270c19f9 Mon Sep 17 00:00:00 2001
From: Deterous <138427222+Deterous@users.noreply.github.com>
Date: Thu, 2 Apr 2026 15:18:47 +0900
Subject: [PATCH] ZArchive support (#75)
* ZArchive support
* Fix offset record format
* Simplfiy Extensions
* Delete unused writers and test data
* Rework reader
* Fix build
---
README.MD | 1 +
.../ZArchiveExtensionsTests.cs | 19 ++
.../ZArchiveExtensions.cs | 37 +++
SabreTools.Data.Models/README.MD | 1 +
SabreTools.Data.Models/ZArchive/Archive.cs | 50 +++
SabreTools.Data.Models/ZArchive/Constants.cs | 68 ++++
.../ZArchive/DirectoryEntry.cs | 26 ++
.../ZArchive/FileDirectoryEntry.cs | 17 +
SabreTools.Data.Models/ZArchive/FileEntry.cs | 33 ++
SabreTools.Data.Models/ZArchive/Footer.cs | 60 ++++
SabreTools.Data.Models/ZArchive/Metadata.cs | 11 +
SabreTools.Data.Models/ZArchive/NameEntry.cs | 27 ++
SabreTools.Data.Models/ZArchive/NameTable.cs | 20 ++
SabreTools.Data.Models/ZArchive/OffsetInfo.cs | 19 ++
.../ZArchive/OffsetRecord.cs | 19 ++
.../ZArchiveTests.cs | 72 +++++
SabreTools.Serialization.Readers/ZArchive.cs | 296 ++++++++++++++++++
SabreTools.Wrappers.Test/ZArchiveTests.cs | 62 ++++
SabreTools.Wrappers/WrapperFactory.cs | 10 +
SabreTools.Wrappers/WrapperType.cs | 5 +
SabreTools.Wrappers/ZArchive.Extraction.cs | 189 +++++++++++
SabreTools.Wrappers/ZArchive.Printing.cs | 144 +++++++++
SabreTools.Wrappers/ZArchive.cs | 106 +++++++
23 files changed, 1292 insertions(+)
create mode 100644 SabreTools.Data.Extensions.Test/ZArchiveExtensionsTests.cs
create mode 100644 SabreTools.Data.Extensions/ZArchiveExtensions.cs
create mode 100644 SabreTools.Data.Models/ZArchive/Archive.cs
create mode 100644 SabreTools.Data.Models/ZArchive/Constants.cs
create mode 100644 SabreTools.Data.Models/ZArchive/DirectoryEntry.cs
create mode 100644 SabreTools.Data.Models/ZArchive/FileDirectoryEntry.cs
create mode 100644 SabreTools.Data.Models/ZArchive/FileEntry.cs
create mode 100644 SabreTools.Data.Models/ZArchive/Footer.cs
create mode 100644 SabreTools.Data.Models/ZArchive/Metadata.cs
create mode 100644 SabreTools.Data.Models/ZArchive/NameEntry.cs
create mode 100644 SabreTools.Data.Models/ZArchive/NameTable.cs
create mode 100644 SabreTools.Data.Models/ZArchive/OffsetInfo.cs
create mode 100644 SabreTools.Data.Models/ZArchive/OffsetRecord.cs
create mode 100644 SabreTools.Serialization.Readers.Test/ZArchiveTests.cs
create mode 100644 SabreTools.Serialization.Readers/ZArchive.cs
create mode 100644 SabreTools.Wrappers.Test/ZArchiveTests.cs
create mode 100644 SabreTools.Wrappers/ZArchive.Extraction.cs
create mode 100644 SabreTools.Wrappers/ZArchive.Printing.cs
create mode 100644 SabreTools.Wrappers/ZArchive.cs
diff --git a/README.MD b/README.MD
index 5bdad96b..56f6bbf7 100644
--- a/README.MD
+++ b/README.MD
@@ -74,6 +74,7 @@ Options:
| XBox Package File (XZP) | |
| Xbox DVD Filesystem (XISO) | |
| xz archive (XZ) | .NET Framework 4.6.2 and greater |
+| ZArchive | |
## Namespaces
diff --git a/SabreTools.Data.Extensions.Test/ZArchiveExtensionsTests.cs b/SabreTools.Data.Extensions.Test/ZArchiveExtensionsTests.cs
new file mode 100644
index 00000000..0e8d3bd4
--- /dev/null
+++ b/SabreTools.Data.Extensions.Test/ZArchiveExtensionsTests.cs
@@ -0,0 +1,19 @@
+using SabreTools.Data.Models.ZArchive;
+using SabreTools.Numerics;
+using Xunit;
+
+namespace SabreTools.Data.Extensions.Test
+{
+ public class ZArchiveExtensionsTests
+ {
+ [Fact]
+ public void GetName_Null()
+ {
+ var de = new DirectoryEntry();
+ NameTable nt = new NameTable();
+ string? expected = null;
+ string? actual = de.GetName(nt);
+ Assert.Equal(expected, actual);
+ }
+ }
+}
diff --git a/SabreTools.Data.Extensions/ZArchiveExtensions.cs b/SabreTools.Data.Extensions/ZArchiveExtensions.cs
new file mode 100644
index 00000000..b63b9916
--- /dev/null
+++ b/SabreTools.Data.Extensions/ZArchiveExtensions.cs
@@ -0,0 +1,37 @@
+using System;
+using System.Text;
+using SabreTools.Data.Models.ZArchive;
+using SabreTools.Numerics;
+
+namespace SabreTools.Data.Extensions
+{
+ public static class ZArchiveExtensions
+ {
+ ///
+ /// Retrieves the name of the specified node from the NameTable
+ ///
+ /// Node in the file tree
+ /// ZArchive NameTable
+ /// UTF-8 string representing node's name
+ public static string? GetName(this FileDirectoryEntry node, NameTable nameTable)
+ {
+ // Check for a valid offset into the NameTable
+ uint nameOffset = node.NameOffsetAndTypeFlag & Constants.RootNode;
+ if (nameOffset == Constants.RootNode)
+ return null;
+
+ // Get the index into the name table
+ var index = Array.IndexOf(nameTable.NameTableOffsets, nameOffset);
+ if (index < 0)
+ return null;
+
+ // Get the name entry for the requested index
+ var nameEntry = nameTable.NameEntries[index];
+ if (nameEntry is null)
+ return null;
+
+ // Decode name to UTF-8
+ return Encoding.UTF8.GetString(nameEntry.NodeName);
+ }
+ }
+}
diff --git a/SabreTools.Data.Models/README.MD b/SabreTools.Data.Models/README.MD
index a4e1d2cf..99843c18 100644
--- a/SabreTools.Data.Models/README.MD
+++ b/SabreTools.Data.Models/README.MD
@@ -90,6 +90,7 @@ Below is a list of all existing namespaces with the `SabreTools.Data.Models` pre
| `XDVDFS` | Xbox DVD Filesystem (XISO) |
| `XZ` | xz archive |
| `XZP` | XBox Package File |
+| `ZArchive` | ZArchive (ZAR) |
| `ZSTD` | ZSTD archive |
## Notable Information Sources
diff --git a/SabreTools.Data.Models/ZArchive/Archive.cs b/SabreTools.Data.Models/ZArchive/Archive.cs
new file mode 100644
index 00000000..882c81b8
--- /dev/null
+++ b/SabreTools.Data.Models/ZArchive/Archive.cs
@@ -0,0 +1,50 @@
+namespace SabreTools.Data.Models.ZArchive
+{
+ ///
+ /// Represents a single ZAR archive
+ /// Most fields are Big Endian
+ ///
+ ///
+ public class Archive
+ {
+ ///
+ /// Zstd compressed file data, from 65536-byte blocks of the original files
+ /// Blocks are stored uncompressed if ZStd does not decrease the size
+ /// Due to the file size, this field is not usually filled in but remains here for completeness
+ ///
+ public byte[]? CompressedData { get; set; }
+
+ ///
+ /// Padding bytes to be added after compressed blocks to ensure 8-byte alignment
+ /// Padding bytes are all NULL (0x00)
+ ///
+ public byte[]? Padding { get; set; }
+
+ ///
+ /// Records containing the offsets and block sizes of each group of blocks
+ /// This allows the reader to jump to any 65536-byte boundary in the uncompressed stream.
+ ///
+ public OffsetRecord[] OffsetRecords { get; set; } = [];
+
+ ///
+ /// UTF-8 strings, prepended by string lengths
+ ///
+ public NameTable NameTable { get; set; } = new();
+
+ ///
+ /// Serialized file tree structure using a queue of nodes
+ ///
+ public FileDirectoryEntry[] FileTree { get; set; } = [];
+
+ ///
+ /// Section for custom key-value pairs and properties
+ ///
+ public Metadata? Metadata { get; set; }
+
+ ///
+ /// Archive footer containing the offsets and sizes of all other sections
+ /// Ends with a SHA256 hash/size of the entire archive, and magic bytes
+ ///
+ public Footer Footer { get; set; } = new();
+ }
+}
diff --git a/SabreTools.Data.Models/ZArchive/Constants.cs b/SabreTools.Data.Models/ZArchive/Constants.cs
new file mode 100644
index 00000000..1663ef27
--- /dev/null
+++ b/SabreTools.Data.Models/ZArchive/Constants.cs
@@ -0,0 +1,68 @@
+namespace SabreTools.Data.Models.ZArchive
+{
+ ///
+ public static class Constants
+ {
+ ///
+ /// Number of compressed blocks referred to by a record
+ ///
+ public const int BlockSize = 64 * 1024;
+
+ ///
+ /// Number of compressed blocks referred to by a record
+ ///
+ public const int BlocksPerOffsetRecord = 16;
+
+ ///
+ /// Number of bytes stored in an offset record
+ ///
+ public const int OffsetRecordSize = sizeof(ulong) + sizeof(ushort) * BlocksPerOffsetRecord;
+
+ ///
+ /// Number of bytes stored in a file/directory entry
+ ///
+ public const int FileDirectoryEntrySize = 16;
+
+ ///
+ /// Number of bytes stored in the footer
+ /// 6 OffsetInfo fields,
+ ///
+ public const int FooterSize = 144;
+
+ ///
+ /// NameOffsetAndTypeFlag value for the root node in the FileTree
+ ///
+ public const uint RootNode = 0x7FFFFFFF;
+
+ ///
+ /// Mask for the NameOffsetAndTypeFlag value when checking if it is a file
+ ///
+ public const uint FileFlag = 0x80000000;
+
+ ///
+ /// Maximum size of the Offset Records section
+ ///
+ public const ulong MaxOffsetRecordsSize = 0xFFFFFFFF;
+
+ ///
+ /// Maximum size of the Offset Records section
+ ///
+ public const ulong MaxNameTableSize = 0x7FFFFFFF;
+
+ ///
+ /// Maximum size of the File Tree section
+ ///
+ public const ulong MaxFileTreeSize = 0x7FFFFFFF;
+
+ ///
+ /// ZArchive magic bytes at end of file
+ ///
+ public static readonly byte[] MagicBytes = [0x16, 0x9F, 0x52, 0xD6];
+
+ ///
+ /// ZArchive version field that acts as an extended magic immediately before final 4 magic bytes
+ /// Currently only version 1 is implemented, any future version bytes are not suppported yet
+ ///
+ public static readonly byte[] Version1Bytes = [0x61, 0xBF, 0x3A, 0x01];
+ }
+}
diff --git a/SabreTools.Data.Models/ZArchive/DirectoryEntry.cs b/SabreTools.Data.Models/ZArchive/DirectoryEntry.cs
new file mode 100644
index 00000000..e3d49854
--- /dev/null
+++ b/SabreTools.Data.Models/ZArchive/DirectoryEntry.cs
@@ -0,0 +1,26 @@
+namespace SabreTools.Data.Models.ZArchive
+{
+ ///
+ /// Node in the FileTree representing a directory
+ ///
+ ///
+ public sealed class DirectoryEntry : FileDirectoryEntry
+ {
+ ///
+ /// Starting index of the directory node
+ ///
+ /// Big-endian
+ public uint NodeStartIndex { get; set; }
+
+ ///
+ /// Number of
+ ///
+ /// Big-endian
+ public uint Count { get; set; }
+
+ ///
+ /// Reserved field
+ ///
+ public uint Reserved { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/ZArchive/FileDirectoryEntry.cs b/SabreTools.Data.Models/ZArchive/FileDirectoryEntry.cs
new file mode 100644
index 00000000..3d142a92
--- /dev/null
+++ b/SabreTools.Data.Models/ZArchive/FileDirectoryEntry.cs
@@ -0,0 +1,17 @@
+namespace SabreTools.Data.Models.ZArchive
+{
+ ///
+ /// Node in the FileTree
+ /// Represents either a file or a directory
+ ///
+ ///
+ public abstract class FileDirectoryEntry
+ {
+ ///
+ /// MSB is the type flag, 0 is Directory, 1 is File
+ /// Remaining 31 bits are the offset in the NameTable
+ ///
+ /// Big-endian
+ public uint NameOffsetAndTypeFlag { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/ZArchive/FileEntry.cs b/SabreTools.Data.Models/ZArchive/FileEntry.cs
new file mode 100644
index 00000000..a3edf2f7
--- /dev/null
+++ b/SabreTools.Data.Models/ZArchive/FileEntry.cs
@@ -0,0 +1,33 @@
+namespace SabreTools.Data.Models.ZArchive
+{
+ ///
+ /// Node in the FileTree representing a file
+ ///
+ ///
+ public sealed class FileEntry : FileDirectoryEntry
+ {
+ ///
+ /// Lowest 8 bits of the file's offset
+ ///
+ /// Big-endian
+ public uint FileOffsetLow { get; set; }
+
+ ///
+ /// Lowest 8 bits of the file's size
+ ///
+ /// Big-endian
+ public uint FileSizeLow { get; set; }
+
+ ///
+ /// Highest 4 bits of the file's size
+ ///
+ /// Big-endian
+ public ushort FileSizeHigh { get; set; }
+
+ ///
+ /// Highest 4 bits of the file's offset
+ ///
+ /// Big-endian
+ public ushort FileOffsetHigh { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/ZArchive/Footer.cs b/SabreTools.Data.Models/ZArchive/Footer.cs
new file mode 100644
index 00000000..2353a70f
--- /dev/null
+++ b/SabreTools.Data.Models/ZArchive/Footer.cs
@@ -0,0 +1,60 @@
+namespace SabreTools.Data.Models.ZArchive
+{
+ ///
+ /// Footer data stored at the end of a ZArchive file
+ ///
+ ///
+ public class Footer
+ {
+ ///
+ /// Size and offset values for the CompressedData section
+ ///
+ public OffsetInfo SectionCompressedData { get; set; } = new();
+
+ ///
+ /// Size and offset values for the OffsetRecords section
+ ///
+ public OffsetInfo SectionOffsetRecords { get; set; } = new();
+
+ ///
+ /// Size and offset values for the NameTable section
+ ///
+ public OffsetInfo SectionNameTable { get; set; } = new();
+
+ ///
+ /// Size and offset values for the FileFree section
+ ///
+ public OffsetInfo SectionFileTree { get; set; } = new();
+
+ ///
+ /// Size and offset values for the MetaDirectory section
+ ///
+ public OffsetInfo SectionMetaDirectory { get; set; } = new();
+
+ ///
+ /// Size and offset values for the MetaData section
+ ///
+ public OffsetInfo SectionMetaData { get; set; } = new();
+
+ ///
+ /// SHA-256 hash of the ZArchive file prior the footer
+ ///
+ public byte[] IntegrityHash { get; set; } = new byte[32];
+
+ ///
+ /// Size of the entire ZArchive file
+ ///
+ /// Big-endian
+ public ulong Size { get; set; }
+
+ ///
+ /// Version indicator, also acts as extended magic
+ ///
+ public byte[] Version { get; set; } = new byte[4];
+
+ ///
+ /// Magic bytes to indicate ZArchive file
+ ///
+ public byte[] Magic { get; set; } = new byte[4];
+ }
+}
diff --git a/SabreTools.Data.Models/ZArchive/Metadata.cs b/SabreTools.Data.Models/ZArchive/Metadata.cs
new file mode 100644
index 00000000..00ec8f15
--- /dev/null
+++ b/SabreTools.Data.Models/ZArchive/Metadata.cs
@@ -0,0 +1,11 @@
+namespace SabreTools.Data.Models.ZArchive
+{
+ ///
+ /// ZARchive section for Meta Data and Meta Directories
+ ///
+ ///
+ public class Metadata
+ {
+ // Not yet implemented in the ZArchive standard, should be empty
+ }
+}
diff --git a/SabreTools.Data.Models/ZArchive/NameEntry.cs b/SabreTools.Data.Models/ZArchive/NameEntry.cs
new file mode 100644
index 00000000..255c1c87
--- /dev/null
+++ b/SabreTools.Data.Models/ZArchive/NameEntry.cs
@@ -0,0 +1,27 @@
+namespace SabreTools.Data.Models.ZArchive
+{
+ ///
+ /// Filename entry in the NameTable
+ ///
+ ///
+ public class NameEntry
+ {
+ ///
+ /// Filename length, with MSB set to 0 for filenames less than 127 long
+ /// NodeLengthShort and NodeLengthLong fields are exclusive, and one must be present
+ ///
+ public byte? NodeLengthShort { get; set; }
+
+ ///
+ /// Filename length, with prefix byte's MSB set to 1 for filenames greater than 127 long
+ /// NodeLengthShort and NodeLengthLong fields are exclusive, and one must be present
+ ///
+ public ushort? NodeLengthLong { get; set; }
+
+ ///
+ /// UTF-8 encoded file name
+ ///
+ /// Maximum length of 2^15 - 1 bytes
+ public byte[] NodeName { get; set; } = [];
+ }
+}
diff --git a/SabreTools.Data.Models/ZArchive/NameTable.cs b/SabreTools.Data.Models/ZArchive/NameTable.cs
new file mode 100644
index 00000000..d5936ae1
--- /dev/null
+++ b/SabreTools.Data.Models/ZArchive/NameTable.cs
@@ -0,0 +1,20 @@
+namespace SabreTools.Data.Models.ZArchive
+{
+ ///
+ /// UTF-8 strings, prepended by string lengths
+ ///
+ ///
+ public class NameTable
+ {
+ ///
+ /// List of filename entries
+ ///
+ public NameEntry[] NameEntries { get; set; } = [];
+
+ ///
+ /// Virtual field, to cache the offsets of each name entry in the name table
+ /// Used for referencing the name entry from an offset into the name table
+ ///
+ public uint[] NameTableOffsets { get; set; } = [];
+ }
+}
diff --git a/SabreTools.Data.Models/ZArchive/OffsetInfo.cs b/SabreTools.Data.Models/ZArchive/OffsetInfo.cs
new file mode 100644
index 00000000..3fa1166e
--- /dev/null
+++ b/SabreTools.Data.Models/ZArchive/OffsetInfo.cs
@@ -0,0 +1,19 @@
+namespace SabreTools.Data.Models.ZArchive
+{
+ ///
+ /// Offset and size values of a ZArchive section, stored in the Footer
+ ///
+ ///
+ public class OffsetInfo
+ {
+ ///
+ /// Base offset value for the section in bytes
+ ///
+ public ulong Offset { get; set; }
+
+ ///
+ /// Total size of the section in bytes
+ ///
+ public ulong Size { get; set; }
+ }
+}
diff --git a/SabreTools.Data.Models/ZArchive/OffsetRecord.cs b/SabreTools.Data.Models/ZArchive/OffsetRecord.cs
new file mode 100644
index 00000000..c38cc9e3
--- /dev/null
+++ b/SabreTools.Data.Models/ZArchive/OffsetRecord.cs
@@ -0,0 +1,19 @@
+namespace SabreTools.Data.Models.ZArchive
+{
+ ///
+ /// Location and size properties of compressed blocks of the file data
+ ///
+ ///
+ public class OffsetRecord
+ {
+ ///
+ /// Base offset of compressed blocks
+ ///
+ public ulong Offset { get; set; }
+
+ ///
+ /// Sizes of each compressed block in this record
+ ///
+ public ushort[] Size { get; set; } = new ushort[Constants.BlocksPerOffsetRecord];
+ }
+}
diff --git a/SabreTools.Serialization.Readers.Test/ZArchiveTests.cs b/SabreTools.Serialization.Readers.Test/ZArchiveTests.cs
new file mode 100644
index 00000000..3484520a
--- /dev/null
+++ b/SabreTools.Serialization.Readers.Test/ZArchiveTests.cs
@@ -0,0 +1,72 @@
+using System.IO;
+using System.Linq;
+using Xunit;
+
+namespace SabreTools.Serialization.Readers.Test
+{
+ public class ZArchiveTests
+ {
+ [Fact]
+ public void NullArray_Null()
+ {
+ byte[]? data = null;
+ int offset = 0;
+ var deserializer = new ZArchive();
+
+ var actual = deserializer.Deserialize(data, offset);
+ Assert.Null(actual);
+ }
+
+ [Fact]
+ public void EmptyArray_Null()
+ {
+ byte[]? data = [];
+ int offset = 0;
+ var deserializer = new ZArchive();
+
+ var actual = deserializer.Deserialize(data, offset);
+ Assert.Null(actual);
+ }
+
+ [Fact]
+ public void InvalidArray_Null()
+ {
+ byte[]? data = [.. Enumerable.Repeat(0xFF, 1024)];
+ int offset = 0;
+ var deserializer = new ZArchive();
+
+ var actual = deserializer.Deserialize(data, offset);
+ Assert.Null(actual);
+ }
+
+ [Fact]
+ public void NullStream_Null()
+ {
+ Stream? data = null;
+ var deserializer = new ZArchive();
+
+ var actual = deserializer.Deserialize(data);
+ Assert.Null(actual);
+ }
+
+ [Fact]
+ public void EmptyStream_Null()
+ {
+ Stream? data = new MemoryStream([]);
+ var deserializer = new ZArchive();
+
+ var actual = deserializer.Deserialize(data);
+ Assert.Null(actual);
+ }
+
+ [Fact]
+ public void InvalidStream_Null()
+ {
+ Stream? data = new MemoryStream([.. Enumerable.Repeat(0xFF, 1024)]);
+ var deserializer = new ZArchive();
+
+ var actual = deserializer.Deserialize(data);
+ Assert.Null(actual);
+ }
+ }
+}
diff --git a/SabreTools.Serialization.Readers/ZArchive.cs b/SabreTools.Serialization.Readers/ZArchive.cs
new file mode 100644
index 00000000..a920d39b
--- /dev/null
+++ b/SabreTools.Serialization.Readers/ZArchive.cs
@@ -0,0 +1,296 @@
+using System.Collections.Generic;
+using System.IO;
+using SabreTools.Data.Extensions;
+using SabreTools.Data.Models.ZArchive;
+using SabreTools.Hashing;
+using SabreTools.IO.Extensions;
+using SabreTools.Matching;
+using SabreTools.Numerics.Extensions;
+
+#pragma warning disable IDE0017 // Simplify object initialization
+namespace SabreTools.Serialization.Readers
+{
+ public class ZArchive : BaseBinaryReader
+ {
+ ///
+ public override Archive? Deserialize(Stream? data)
+ {
+ // If the data is invalid
+ if (data is null || !data.CanRead)
+ return null;
+
+ // Simple check for a valid stream length
+ if (data.Length - data.Position < Constants.FooterSize)
+ return null;
+
+ try
+ {
+ // Cache the current offset
+ long initialOffset = data.Position;
+
+ var archive = new Archive();
+
+ // Parse the footer first
+ data.SeekIfPossible(-Constants.FooterSize, SeekOrigin.End);
+ var footer = ParseFooter(data, initialOffset);
+ if (footer is null)
+ return null;
+
+ archive.Footer = footer;
+
+ // Check offset records offset validity
+ long offsetRecordsOffset = initialOffset + (long)archive.Footer.SectionOffsetRecords.Offset;
+ if (offsetRecordsOffset < 0 || offsetRecordsOffset + (long)archive.Footer.SectionOffsetRecords.Size >= data.Length)
+ return null;
+
+ // Seek to and then read the compression offset records
+ data.SeekIfPossible(offsetRecordsOffset, SeekOrigin.Begin);
+ var offsetRecords = ParseOffsetRecords(data, archive.Footer.SectionOffsetRecords.Size);
+ if (offsetRecords is null)
+ return null;
+
+ archive.OffsetRecords = offsetRecords;
+
+ // Check name table section validity
+ long nameTableOffset = initialOffset + (long)archive.Footer.SectionNameTable.Offset;
+ if (nameTableOffset < 0 || nameTableOffset + (long)archive.Footer.SectionNameTable.Size >= data.Length)
+ return null;
+
+ // Seek to and then read the name table entries
+ data.SeekIfPossible((long)nameTableOffset, SeekOrigin.Begin);
+ var nameTable = ParseNameTable(data, archive.Footer.SectionNameTable.Size);
+ if (nameTable is null)
+ return null;
+
+ archive.NameTable = nameTable;
+
+ // Check name table section validity
+ long fileTreeOffset = initialOffset + (long)archive.Footer.SectionFileTree.Offset;
+ if (fileTreeOffset < 0 || fileTreeOffset + (long)archive.Footer.SectionFileTree.Size >= data.Length)
+ return null;
+
+ // Seek to and then read the file tree entries
+ data.SeekIfPossible((long)fileTreeOffset, SeekOrigin.Begin);
+ var fileTree = ParseFileTree(data, archive.Footer.SectionFileTree.Size, archive.Footer.SectionNameTable.Size);
+ if (fileTree is null)
+ return null;
+
+ archive.FileTree = fileTree;
+
+ // Do not attempt to read compressed data into memory
+
+ return archive;
+ }
+ catch
+ {
+ // Ignore the actual error
+ return null;
+ }
+ }
+
+ ///
+ /// Parse a Stream into an ZArchive footer
+ ///
+ /// Stream to parse
+ /// Filled ZArchive footer on success, null on error
+ public static Footer? ParseFooter(Stream data, long initialOffset)
+ {
+ var obj = new Footer();
+
+ // Read and validate compressed data section offset and size values
+ obj.SectionCompressedData.Offset = data.ReadUInt64BigEndian();
+ obj.SectionCompressedData.Size = data.ReadUInt64BigEndian();
+ if (obj.SectionCompressedData.Offset + obj.SectionCompressedData.Size > (ulong)data.Length)
+ return null;
+
+ // Read and validate offset records section offset and size values
+ obj.SectionOffsetRecords.Offset = data.ReadUInt64BigEndian();
+ obj.SectionOffsetRecords.Size = data.ReadUInt64BigEndian();
+ if (obj.SectionOffsetRecords.Offset + obj.SectionOffsetRecords.Size > (ulong)data.Length)
+ return null;
+ if (obj.SectionOffsetRecords.Size > Constants.MaxOffsetRecordsSize)
+ return null;
+ if (obj.SectionOffsetRecords.Size % Constants.OffsetRecordSize != 0)
+ return null;
+
+ // Read and validate name table section offset and size values
+ obj.SectionNameTable.Offset = data.ReadUInt64BigEndian();
+ obj.SectionNameTable.Size = data.ReadUInt64BigEndian();
+ if (obj.SectionNameTable.Offset + obj.SectionNameTable.Size > (ulong)data.Length)
+ return null;
+ if (obj.SectionNameTable.Size > Constants.MaxNameTableSize)
+ return null;
+
+ // Read and validate file tree section offset and size values
+ obj.SectionFileTree.Offset = data.ReadUInt64BigEndian();
+ obj.SectionFileTree.Size = data.ReadUInt64BigEndian();
+ if (obj.SectionFileTree.Offset + obj.SectionFileTree.Size > (ulong)data.Length)
+ return null;
+ if (obj.SectionFileTree.Size > Constants.MaxFileTreeSize)
+ return null;
+ if (obj.SectionFileTree.Size % Constants.FileDirectoryEntrySize != 0)
+ return null;
+
+ // Read and validate metadirectory section offset and size values
+ obj.SectionMetaDirectory.Offset = data.ReadUInt64BigEndian();
+ obj.SectionMetaDirectory.Size = data.ReadUInt64BigEndian();
+ if (obj.SectionMetaDirectory.Offset + obj.SectionMetaDirectory.Size > (ulong)data.Length)
+ return null;
+
+ // Read and validate metadata section offset and size values
+ obj.SectionMetaData.Offset = data.ReadUInt64BigEndian();
+ obj.SectionMetaData.Size = data.ReadUInt64BigEndian();
+ if (obj.SectionMetaData.Offset + obj.SectionMetaData.Size > (ulong)data.Length)
+ return null;
+
+ // Read and validate archive integrity hash
+ obj.IntegrityHash = data.ReadBytes(32);
+ // data.SeekIfPossible(initialOffset, SeekOrigin.Begin);
+ // TODO: Read all bytes and hash them with SHA256
+ // TODO: Compare obj.Integrity with calculated hash
+
+ // Read and validate archive size
+ obj.Size = data.ReadUInt64BigEndian();
+ if (obj.Size != (ulong)(data.Length - initialOffset))
+ return null;
+
+ // Read and validate version bytes, only Version 1 is supported
+ obj.Version = data.ReadBytes(4);
+ if (!obj.Version.EqualsExactly(Constants.Version1Bytes))
+ return null;
+
+ // Read and validate magic bytes
+ obj.Magic = data.ReadBytes(4);
+ if (!obj.Magic.EqualsExactly(Constants.MagicBytes))
+ return null;
+
+ return obj;
+ }
+
+ ///
+ /// Parse a Stream into an ZArchive OffsetRecords section
+ ///
+ /// Stream to parse
+ /// Size of OffsetRecords section
+ /// Filled ZArchive OffsetRecords section on success, null on error
+ public static OffsetRecord[]? ParseOffsetRecords(Stream data, ulong size)
+ {
+ int entries = (int)(size / Constants.OffsetRecordSize);
+
+ var obj = new OffsetRecord[entries];
+
+ for (int i = 0; i < entries; i++)
+ {
+ var offset = data.ReadUInt64BigEndian();
+ obj[i] = new OffsetRecord();
+ obj[i].Offset = offset;
+ for (int block = 0; block < Constants.BlocksPerOffsetRecord; block++)
+ {
+ obj[i].Size[block] = data.ReadUInt16BigEndian();
+ }
+ }
+
+ return obj;
+ }
+
+ ///
+ /// Parse a Stream into an ZArchive NameTable section
+ ///
+ /// Stream to parse
+ /// Size of NameTable section
+ /// Filled ZArchive NameTable section on success, null on error
+ public static NameTable? ParseNameTable(Stream data, ulong size)
+ {
+ var obj = new NameTable();
+ var nameEntries = new List();
+ var nameOffsets = new List();
+
+ uint bytesRead = 0;
+
+ while (bytesRead < (uint)size)
+ {
+ var nameEntry = new NameEntry();
+
+ // Cache the offset into the NameEntry table
+ nameOffsets.Add(bytesRead);
+
+ // Read length of name
+ uint nameLength = (uint)data.ReadByteValue();
+ bytesRead += 1;
+ if ((nameLength & 0x80) == 0x80)
+ {
+ nameLength += (uint)data.ReadByteValue() << 7;
+ bytesRead += 1;
+ nameEntry.NodeLengthLong = (ushort)nameLength;
+ }
+ else
+ {
+ nameEntry.NodeLengthShort = (byte)nameLength;
+ }
+
+ // Validate name length
+ if (bytesRead + nameLength > (uint)size)
+ return null;
+
+ // Add valid name entry to the table
+ nameEntry.NodeName = data.ReadBytes((int)nameLength);
+ bytesRead += nameLength;
+ nameEntries.Add(nameEntry);
+ }
+
+ obj.NameEntries = [..nameEntries];
+ obj.NameTableOffsets = [..nameOffsets];
+
+ return obj;
+ }
+
+ ///
+ /// Parse a Stream into an ZArchive FileTree section
+ ///
+ /// Stream to parse
+ /// Size of FileTree section
+ /// Filled ZArchive FileTree section on success, null on error
+ public static FileDirectoryEntry[]? ParseFileTree(Stream data, ulong size, ulong nameTableSize)
+ {
+ int entries = (int)(size / Constants.FileDirectoryEntrySize);
+
+ var obj = new FileDirectoryEntry[entries];
+
+ for (int i = 0; i < entries; i++)
+ {
+ var nameOffsetAndFlag = data.ReadUInt32BigEndian();
+
+ // Validate name table offset value
+ if ((nameOffsetAndFlag & Constants.RootNode) > nameTableSize && nameOffsetAndFlag != Constants.RootNode)
+ return null;
+
+ // Check if node is file or directory
+ if ((nameOffsetAndFlag & Constants.FileFlag) == Constants.FileFlag)
+ {
+ var fileEntry = new FileEntry();
+ fileEntry.NameOffsetAndTypeFlag = nameOffsetAndFlag;
+ fileEntry.FileOffsetLow = data.ReadUInt32BigEndian();
+ fileEntry.FileSizeLow = data.ReadUInt32BigEndian();
+ fileEntry.FileSizeHigh = data.ReadUInt16BigEndian();
+ fileEntry.FileOffsetHigh = data.ReadUInt16BigEndian();
+ obj[i] = fileEntry;
+ }
+ else
+ {
+ var directoryEntry = new DirectoryEntry();
+ directoryEntry.NameOffsetAndTypeFlag = nameOffsetAndFlag;
+ directoryEntry.NodeStartIndex = data.ReadUInt32BigEndian();
+ directoryEntry.Count = data.ReadUInt32BigEndian();
+ directoryEntry.Reserved = data.ReadUInt32BigEndian();
+ obj[i] = directoryEntry;
+ }
+ }
+
+ // First entry of file tree must be root directory
+ if ((obj[0].NameOffsetAndTypeFlag & Constants.RootNode) != Constants.RootNode)
+ return null;
+
+ return obj;
+ }
+ }
+}
diff --git a/SabreTools.Wrappers.Test/ZArchiveTests.cs b/SabreTools.Wrappers.Test/ZArchiveTests.cs
new file mode 100644
index 00000000..17060ea8
--- /dev/null
+++ b/SabreTools.Wrappers.Test/ZArchiveTests.cs
@@ -0,0 +1,62 @@
+using System;
+using System.IO;
+using System.Linq;
+using Xunit;
+
+#pragma warning disable xUnit1004 // Test methods should not be skipped
+namespace SabreTools.Wrappers.Test
+{
+ public class ZArchiveTests
+ {
+ [Fact]
+ public void NullArray_Null()
+ {
+ byte[]? data = null;
+ int offset = 0;
+ var actual = ZArchive.Create(data, offset);
+ Assert.Null(actual);
+ }
+
+ [Fact]
+ public void EmptyArray_Null()
+ {
+ byte[]? data = [];
+ int offset = 0;
+ var actual = ZArchive.Create(data, offset);
+ Assert.Null(actual);
+ }
+
+ [Fact(Skip = "This will never pass with the current code")]
+ public void InvalidArray_Null()
+ {
+ byte[]? data = [.. Enumerable.Repeat(0xFF, 1024)];
+ int offset = 0;
+ var actual = ZArchive.Create(data, offset);
+ Assert.Null(actual);
+ }
+
+ [Fact]
+ public void NullStream_Null()
+ {
+ Stream? data = null;
+ var actual = ZArchive.Create(data);
+ Assert.Null(actual);
+ }
+
+ [Fact(Skip = "This will never pass with the current code")]
+ public void EmptyStream_Null()
+ {
+ Stream? data = new MemoryStream([]);
+ var actual = ZArchive.Create(data);
+ Assert.Null(actual);
+ }
+
+ [Fact(Skip = "This will never pass with the current code")]
+ public void InvalidStream_Null()
+ {
+ Stream? data = new MemoryStream([.. Enumerable.Repeat(0xFF, 1024)]);
+ var actual = ZArchive.Create(data);
+ Assert.Null(actual);
+ }
+ }
+}
diff --git a/SabreTools.Wrappers/WrapperFactory.cs b/SabreTools.Wrappers/WrapperFactory.cs
index a9e81627..77d15642 100644
--- a/SabreTools.Wrappers/WrapperFactory.cs
+++ b/SabreTools.Wrappers/WrapperFactory.cs
@@ -71,6 +71,7 @@ namespace SabreTools.Wrappers
WrapperType.XDVDFS => XDVDFS.Create(data),
WrapperType.XZ => XZ.Create(data),
WrapperType.XZP => XZP.Create(data),
+ WrapperType.ZArchive => ZArchive.Create(data),
WrapperType.ZSTD => ZSTD.Create(data),
// Unimplemented
@@ -960,6 +961,15 @@ namespace SabreTools.Wrappers
#region ZSTD
+ // ZArchive magic is the final 4 bytes of the file: [0x16, 0x9F, 0x52, 0xD6]
+
+ if (extension.Equals("zar", StringComparison.OrdinalIgnoreCase))
+ return WrapperType.ZArchive;
+
+ #endregion
+
+ #region ZSTD
+
if (magic.StartsWith([null, 0xB5, 0x2F, 0xFD]))
return WrapperType.ZSTD;
diff --git a/SabreTools.Wrappers/WrapperType.cs b/SabreTools.Wrappers/WrapperType.cs
index 78658869..0c32eebf 100644
--- a/SabreTools.Wrappers/WrapperType.cs
+++ b/SabreTools.Wrappers/WrapperType.cs
@@ -303,6 +303,11 @@ namespace SabreTools.Wrappers
///
XZP,
+ ///
+ /// ZArchive archive
+ ///
+ ZArchive,
+
///
/// ZStandard compressed file
///
diff --git a/SabreTools.Wrappers/ZArchive.Extraction.cs b/SabreTools.Wrappers/ZArchive.Extraction.cs
new file mode 100644
index 00000000..85b8f2a2
--- /dev/null
+++ b/SabreTools.Wrappers/ZArchive.Extraction.cs
@@ -0,0 +1,189 @@
+using System;
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using SabreTools.Data.Extensions;
+using SabreTools.Data.Models.ZArchive;
+using SabreTools.IO.Extensions;
+using SabreTools.Numerics.Extensions;
+using SharpCompress.Compressors.ZStandard;
+#endif
+
+namespace SabreTools.Wrappers
+{
+ public partial class ZArchive : IExtractable
+ {
+ ///
+ public bool Extract(string outputDirectory, bool includeDebug)
+ {
+ if (_dataSource is null || !_dataSource.CanRead)
+ return false;
+
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+ try
+ {
+ // Extract all files and directories from root (index 0)
+ return ExtractDirectory(outputDirectory, includeDebug, 0);
+ }
+ catch (Exception ex)
+ {
+ if (includeDebug) Console.Error.WriteLine(ex);
+ return false;
+ }
+#else
+ Console.WriteLine("Extraction is not supported for this framework!");
+ Console.WriteLine();
+ return false;
+#endif
+ }
+
+#if NET462_OR_GREATER || NETCOREAPP || NETSTANDARD2_0_OR_GREATER
+ ///
+ public bool ExtractDirectory(string outputDirectory, bool includeDebug, uint index)
+ {
+ bool success = true;
+
+ // Create directory
+ if (!string.IsNullOrEmpty(outputDirectory) && !Directory.Exists(outputDirectory))
+ Directory.CreateDirectory(outputDirectory);
+
+ // Extract all children of current node
+ FileDirectoryEntry node = FileTree[index];
+ if (node is DirectoryEntry dir)
+ {
+ for (uint i = 0; i < dir.Count; i++)
+ {
+ uint childIndex = dir.NodeStartIndex + i;
+ var child = FileTree[childIndex];
+ string? name = child.GetName(NameTable);
+ if (string.IsNullOrEmpty(name))
+ {
+ if (includeDebug) Console.WriteLine("Invalid node name");
+ return false;
+ }
+
+ string outputPath = Path.Combine(outputDirectory, name);
+ if ((child.NameOffsetAndTypeFlag & Constants.FileFlag) == 0)
+ success |= ExtractDirectory(outputPath, includeDebug, childIndex);
+ else
+ success |= ExtractFile(outputPath, includeDebug, childIndex);
+ }
+
+ return success;
+ }
+ else
+ {
+ if (includeDebug) Console.WriteLine("Invalid directory node");
+ return false;
+ }
+ }
+
+ ///
+ public bool ExtractFile(string outputPath, bool includeDebug, uint index)
+ {
+ // Decompress each chunk to output
+ var node = FileTree[index];
+ var dataOffset = Footer.SectionCompressedData.Offset;
+ var dataLength = Footer.SectionCompressedData.Size;
+ if (node is FileEntry file)
+ {
+ ulong fileOffset = ((ulong)file.FileOffsetHigh << 32) | (ulong)file.FileOffsetLow;
+ ulong fileSize = ((ulong)file.FileSizeHigh << 32) | (ulong)file.FileSizeLow;
+
+ // Write the output file
+ if (includeDebug) Console.WriteLine($"Extracting: {outputPath}");
+ using var fs = File.Open(outputPath, FileMode.Create, FileAccess.Write, FileShare.ReadWrite);
+ ulong fileProgress = 0;
+
+ lock (_dataSourceLock)
+ {
+ while (fileProgress < fileSize)
+ {
+ // Determine offset and size of next read
+ ulong absoluteOffset = fileOffset + fileProgress;
+ ulong blockIndex = absoluteOffset / (ulong)Constants.BlockSize;
+ int recordIndex = (int)(blockIndex / (ulong)Constants.BlocksPerOffsetRecord);
+ if (recordIndex >= OffsetRecords.Length)
+ {
+ if (includeDebug) Console.WriteLine($"File offset out of range: {outputPath}");
+ return false;
+ }
+
+ var offsetRecord = OffsetRecords[recordIndex];
+ int withinRecordIndex = (int)(blockIndex % (ulong)Constants.BlocksPerOffsetRecord);
+ if (withinRecordIndex >= offsetRecord.Size.Length)
+ {
+ if (includeDebug) Console.WriteLine($"Blocks per record mismatch: {outputPath}");
+ return false;
+ }
+
+ int intraBlockOffset = (int)(absoluteOffset % (ulong)Constants.BlockSize);
+ int bytesToRead = (int)(offsetRecord.Size[withinRecordIndex]) + 1;
+ int bytesToWrite = (int)Math.Min(fileSize - fileProgress, (ulong)Constants.BlockSize - (ulong)intraBlockOffset);
+
+ ulong readOffset = dataOffset + offsetRecord.Offset;
+ for (int i = 0; i < withinRecordIndex; i++)
+ {
+ readOffset += (ulong)offsetRecord.Size[i] + 1;
+ }
+
+ // Seek to location of block
+ _dataSource.SeekIfPossible((long)readOffset, SeekOrigin.Begin);
+
+ // Ensure block doesn't exceed compressed section
+ if (offsetRecord.Offset + (ulong)bytesToRead > dataLength)
+ {
+ if (includeDebug) Console.WriteLine("Block exceeds compressed data section");
+ return false;
+ }
+
+ // Ensure reader won't EOF
+ if (bytesToRead > _dataSource.Length - _dataSource.Position)
+ {
+ if (includeDebug) Console.WriteLine($"File out of bounds: {outputPath}");
+ return false;
+ }
+
+ // Read block
+ var buffer = _dataSource.ReadBytes(bytesToRead);
+
+ // Write entire block if it is uncompressed
+ if (bytesToRead == Constants.BlockSize)
+ {
+ // Block is stored uncompressed
+ fs.Write(buffer, intraBlockOffset, bytesToWrite);
+ fileProgress += (ulong)bytesToWrite;
+ continue;
+ }
+
+ // Decompress block
+ byte[] decompressedBuffer;
+ using var inputStream = new MemoryStream(buffer);
+ using var zstdStream = new ZStandardStream(inputStream);
+ using var outputStream = new MemoryStream();
+ zstdStream.CopyTo(outputStream);
+ decompressedBuffer = outputStream.ToArray();
+ if (decompressedBuffer.Length != Constants.BlockSize)
+ {
+ if (includeDebug) Console.WriteLine($"Invalid decompressed block size {decompressedBuffer.Length}");
+ return false;
+ }
+
+ // Write decompressed block to output file
+ fs.Write(decompressedBuffer, intraBlockOffset, bytesToWrite);
+ fileProgress += (ulong)bytesToWrite;
+ }
+ }
+
+ return true;
+ }
+ else
+ {
+ if (includeDebug) Console.WriteLine("Invalid file node");
+ return false;
+ }
+ }
+#endif
+ }
+}
diff --git a/SabreTools.Wrappers/ZArchive.Printing.cs b/SabreTools.Wrappers/ZArchive.Printing.cs
new file mode 100644
index 00000000..e2b02aa5
--- /dev/null
+++ b/SabreTools.Wrappers/ZArchive.Printing.cs
@@ -0,0 +1,144 @@
+using System.Text;
+using SabreTools.Data.Models.ZArchive;
+using SabreTools.Text.Extensions;
+
+namespace SabreTools.Wrappers
+{
+ public partial class ZArchive : IPrintable
+ {
+#if NETCOREAPP
+ ///
+ public string ExportJSON() => System.Text.Json.JsonSerializer.Serialize(Model, _jsonSerializerOptions);
+#endif
+
+ ///
+ public void PrintInformation(StringBuilder builder)
+ {
+ builder.AppendLine("ZArchive Information:");
+ builder.AppendLine("-------------------------");
+ builder.AppendLine();
+
+ Print(builder, OffsetRecords);
+ Print(builder, NameTable);
+ Print(builder, FileTree);
+ Print(builder, Footer);
+ }
+
+ public void Print(StringBuilder builder, OffsetRecord[] records)
+ {
+ builder.AppendLine(" Compression Offset Records:");
+ builder.AppendLine(" -------------------------");
+ builder.AppendLine();
+ if (records.Length == 0)
+ {
+ builder.AppendLine(" No compression offset records");
+ builder.AppendLine();
+ return;
+ }
+
+ for (int i = 0; i < records.Length; i++)
+ {
+ var record = records[i];
+
+ builder.AppendLine(record.Offset, " Base Offset");
+ builder.AppendLine(record.Size, " Block Sizes");
+ builder.AppendLine();
+ }
+ }
+
+ public void Print(StringBuilder builder, NameTable nameTable)
+ {
+ builder.AppendLine(" Name Table:");
+ builder.AppendLine(" -------------------------");
+ builder.AppendLine();
+ if (nameTable.NameEntries.Length != nameTable.NameTableOffsets.Length)
+ {
+ builder.AppendLine(" Mismatched Name Table entry count");
+ builder.AppendLine();
+ return;
+ }
+
+ for (int i = 0; i < nameTable.NameEntries.Length; i++)
+ {
+ var entry = nameTable.NameEntries[i];
+
+ builder.AppendLine(nameTable.NameTableOffsets[i], " Name Table Offset");
+ if (entry.NodeLengthShort is not null)
+ builder.AppendLine(entry.NodeLengthShort, " Name Length");
+ else if (entry.NodeLengthLong is not null)
+ builder.AppendLine(entry.NodeLengthLong, " Name Length");
+ builder.AppendLine(Encoding.UTF8.GetString(entry.NodeName), " Name");
+ builder.AppendLine();
+ }
+ }
+
+ public void Print(StringBuilder builder, FileDirectoryEntry[] fileTree)
+ {
+ builder.AppendLine(" File Tree:");
+ builder.AppendLine(" -------------------------");
+ builder.AppendLine();
+ if (fileTree.Length == 0)
+ {
+ builder.AppendLine(" No nodes in file tree");
+ builder.AppendLine();
+ return;
+ }
+
+ for (int i = 0; i < fileTree.Length; i++)
+ {
+ var node = fileTree[i];
+
+ builder.AppendLine(node.NameOffsetAndTypeFlag, " Base Offset");
+ bool fileFlag = (node.NameOffsetAndTypeFlag & Constants.FileFlag) == Constants.FileFlag;
+ builder.AppendLine(fileFlag, " File Flag");
+ builder.AppendLine(node.NameOffsetAndTypeFlag & Constants.RootNode, " Name Table Offset");
+
+ if (node is FileEntry fe)
+ {
+ var fileOffset = ((ulong)fe.FileOffsetHigh << 32) | (ulong)fe.FileOffsetLow;
+ builder.AppendLine(fileOffset, " File Offset");
+ var fileSize = ((ulong)fe.FileSizeHigh << 32) | (ulong)fe.FileSizeLow;
+ builder.AppendLine(fileSize, " File Size");
+ }
+ else if (node is DirectoryEntry de)
+ {
+ builder.AppendLine(de.NodeStartIndex, " Node Start Index");
+ builder.AppendLine(de.Count, " Count");
+ builder.AppendLine(de.Reserved, " Reserved");
+ }
+ else
+ {
+ builder.AppendLine(" Unknown Node");
+ }
+
+ builder.AppendLine();
+ }
+ }
+
+ public void Print(StringBuilder builder, Footer footer)
+ {
+ builder.AppendLine(" Footer:");
+ builder.AppendLine(" -------------------------");
+ builder.AppendLine();
+
+ builder.AppendLine(footer.SectionCompressedData.Offset, " Compressed Data Base Offset");
+ builder.AppendLine(footer.SectionCompressedData.Size, " Compressed Data Length");
+ builder.AppendLine(footer.SectionOffsetRecords.Offset, " Compression Offset Records Base Offset");
+ builder.AppendLine(footer.SectionOffsetRecords.Size, " Compression Offset Records Length");
+ builder.AppendLine(footer.SectionNameTable.Offset, " Name Table Base Offset");
+ builder.AppendLine(footer.SectionNameTable.Size, " Name Table Length");
+ builder.AppendLine(footer.SectionFileTree.Offset, " File Tree Base Offset");
+ builder.AppendLine(footer.SectionFileTree.Size, " File Tree Length");
+ builder.AppendLine(footer.SectionMetaDirectory.Offset, " Meta Directory Base Offset");
+ builder.AppendLine(footer.SectionMetaDirectory.Size, " Meta Directory Length");
+ builder.AppendLine(footer.SectionMetaData.Offset, " Meta Data Base Offset");
+ builder.AppendLine(footer.SectionMetaData.Size, " Meta Data Length");
+ builder.AppendLine(footer.IntegrityHash, " Integrity Hash");
+ builder.AppendLine(footer.Size, " Size");
+ builder.AppendLine(footer.Version, " Version");
+ builder.AppendLine(footer.Magic, " Magic");
+
+ builder.AppendLine();
+ }
+ }
+}
diff --git a/SabreTools.Wrappers/ZArchive.cs b/SabreTools.Wrappers/ZArchive.cs
new file mode 100644
index 00000000..e2157f49
--- /dev/null
+++ b/SabreTools.Wrappers/ZArchive.cs
@@ -0,0 +1,106 @@
+using System.IO;
+using SabreTools.Data.Models.ZArchive;
+
+namespace SabreTools.Wrappers
+{
+ public partial class ZArchive : WrapperBase
+ {
+ #region Descriptive Properties
+
+ ///
+ public override string DescriptionString => "ZArchive";
+
+ #endregion
+
+ #region Extension Properties
+
+ ///
+ public OffsetRecord[] OffsetRecords => Model.OffsetRecords;
+
+ ///
+ public NameTable NameTable => Model.NameTable;
+
+ ///
+ public FileDirectoryEntry[] FileTree => Model.FileTree;
+
+ ///
+ public Footer Footer => Model.Footer;
+
+ #endregion
+
+ #region Constructors
+
+ ///
+ public ZArchive(Archive model, byte[] data) : base(model, data) { }
+
+ ///
+ public ZArchive(Archive model, byte[] data, int offset) : base(model, data, offset) { }
+
+ ///
+ public ZArchive(Archive model, byte[] data, int offset, int length) : base(model, data, offset, length) { }
+
+ ///
+ public ZArchive(Archive model, Stream data) : base(model, data) { }
+
+ ///
+ public ZArchive(Archive model, Stream data, long offset) : base(model, data, offset) { }
+
+ ///
+ public ZArchive(Archive model, Stream data, long offset, long length) : base(model, data, offset, length) { }
+
+ #endregion
+
+ #region Static Constructors
+
+ ///
+ /// Create a ZArchive from a byte array and offset
+ ///
+ /// Byte array representing the archive
+ /// Offset within the array to parse
+ /// A ZArchive wrapper on success, null on failure
+ public static ZArchive? Create(byte[]? data, int offset)
+ {
+ // If the data is invalid
+ if (data is null || data.Length == 0)
+ return null;
+
+ // If the offset is out of bounds
+ if (offset < 0 || offset >= data.Length)
+ return null;
+
+ // Create a memory stream and use that
+ var dataStream = new MemoryStream(data, offset, data.Length - offset);
+ return Create(dataStream);
+ }
+
+ ///
+ /// Create a ZArchive from a Stream
+ ///
+ /// Stream representing the archive
+ /// A ZArchive wrapper on success, null on failure
+ public static ZArchive? Create(Stream? data)
+ {
+ // If the data is invalid
+ if (data is null || !data.CanRead)
+ return null;
+
+ try
+ {
+ // Cache the current offset
+ long currentOffset = data.Position;
+
+ var model = new Serialization.Readers.ZArchive().Deserialize(data);
+ if (model is null)
+ return null;
+
+ return new ZArchive(model, data, currentOffset);
+ }
+ catch
+ {
+ return null;
+ }
+ }
+
+ #endregion
+ }
+}