Compare commits

...

8 Commits
0.36.0 ... dmg

Author SHA1 Message Date
Adam Hathcock
83e8bf8462 Add test dmg 2021-06-04 13:38:09 +01:00
Adam Hathcock
65bcfadfde Merge pull request #573 from Artentus/master
Add read-only support for Dmg archives
2021-06-04 13:25:32 +01:00
Adam Hathcock
5acc195cf7 Merge branch 'master' into master 2021-06-04 13:23:07 +01:00
Mathis Rech
d5cbe71cae Revert global.json 2021-02-17 11:05:19 +01:00
Mathis Rech
014ecd4fc1 Merge pull request #3 from adamhathcock/master
Merge master
2021-02-17 11:00:20 +01:00
Mathis Rech
9600709219 Add read-only support for DMG archives 2021-02-17 10:57:41 +01:00
Mathis Rech
d5e6c31a9f Merge pull request #2 from adamhathcock/master
Merge master
2021-02-09 16:44:48 +01:00
Mathis Rech
5faa603d59 Merge pull request #1 from adamhathcock/master
Merge master
2020-09-24 20:05:04 +02:00
40 changed files with 3606 additions and 4 deletions

View File

@@ -1,5 +1,6 @@
using System;
using System.IO;
using SharpCompress.Archives.Dmg;
using SharpCompress.Archives.GZip;
using SharpCompress.Archives.Rar;
using SharpCompress.Archives.SevenZip;
@@ -44,6 +45,12 @@ namespace SharpCompress.Archives
return GZipArchive.Open(stream, readerOptions);
}
stream.Seek(0, SeekOrigin.Begin);
if (DmgArchive.IsDmgFile(stream))
{
stream.Seek(0, SeekOrigin.Begin);
return DmgArchive.Open(stream, readerOptions);
}
stream.Seek(0, SeekOrigin.Begin);
if (RarArchive.IsRarFile(stream, readerOptions))
{
stream.Seek(0, SeekOrigin.Begin);
@@ -55,7 +62,7 @@ namespace SharpCompress.Archives
stream.Seek(0, SeekOrigin.Begin);
return TarArchive.Open(stream, readerOptions);
}
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, LZip");
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, LZip, Dmg");
}
public static IWritableArchive Create(ArchiveType type)
@@ -106,6 +113,12 @@ namespace SharpCompress.Archives
return GZipArchive.Open(fileInfo, options);
}
stream.Seek(0, SeekOrigin.Begin);
if (DmgArchive.IsDmgFile(stream))
{
stream.Seek(0, SeekOrigin.Begin);
return DmgArchive.Open(fileInfo, options);
}
stream.Seek(0, SeekOrigin.Begin);
if (RarArchive.IsRarFile(stream, options))
{
return RarArchive.Open(fileInfo, options);
@@ -115,7 +128,7 @@ namespace SharpCompress.Archives
{
return TarArchive.Open(fileInfo, options);
}
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip");
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, Dmg");
}
/// <summary>

View File

@@ -0,0 +1,117 @@
using SharpCompress.Common;
using SharpCompress.Common.Dmg;
using SharpCompress.Common.Dmg.Headers;
using SharpCompress.Common.Dmg.HFS;
using SharpCompress.Readers;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
namespace SharpCompress.Archives.Dmg
{
public class DmgArchive : AbstractArchive<DmgArchiveEntry, DmgVolume>
{
private readonly string _fileName;
internal DmgArchive(FileInfo fileInfo, ReaderOptions readerOptions)
: base(ArchiveType.Dmg, fileInfo, readerOptions)
{
_fileName = fileInfo.FullName;
}
internal DmgArchive(Stream stream, ReaderOptions readerOptions)
: base(ArchiveType.Dmg, stream.AsEnumerable(), readerOptions)
{
_fileName = string.Empty;
}
protected override IReader CreateReaderForSolidExtraction()
=> new DmgReader(ReaderOptions, this, _fileName);
protected override IEnumerable<DmgArchiveEntry> LoadEntries(IEnumerable<DmgVolume> volumes)
=> volumes.Single().LoadEntries();
protected override IEnumerable<DmgVolume> LoadVolumes(FileInfo file)
=> new DmgVolume(this, file.OpenRead(), file.FullName, ReaderOptions).AsEnumerable();
protected override IEnumerable<DmgVolume> LoadVolumes(IEnumerable<Stream> streams)
=> new DmgVolume(this, streams.Single(), string.Empty, ReaderOptions).AsEnumerable();
public static bool IsDmgFile(FileInfo fileInfo)
{
if (!fileInfo.Exists) return false;
using var stream = fileInfo.OpenRead();
return IsDmgFile(stream);
}
public static bool IsDmgFile(Stream stream)
{
long headerPos = stream.Length - DmgHeader.HeaderSize;
if (headerPos < 0) return false;
stream.Position = headerPos;
return DmgHeader.TryRead(stream, out _);
}
/// <summary>
/// Constructor expects a filepath to an existing file.
/// </summary>
/// <param name="filePath"></param>
/// <param name="readerOptions"></param>
public static DmgArchive Open(string filePath, ReaderOptions? readerOptions = null)
{
filePath.CheckNotNullOrEmpty(nameof(filePath));
return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions());
}
/// <summary>
/// Constructor with a FileInfo object to an existing file.
/// </summary>
/// <param name="fileInfo"></param>
/// <param name="readerOptions"></param>
public static DmgArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null)
{
fileInfo.CheckNotNull(nameof(fileInfo));
return new DmgArchive(fileInfo, readerOptions ?? new ReaderOptions());
}
/// <summary>
/// Takes a seekable Stream as a source
/// </summary>
/// <param name="stream"></param>
/// <param name="readerOptions"></param>
public static DmgArchive Open(Stream stream, ReaderOptions? readerOptions = null)
{
stream.CheckNotNull(nameof(stream));
return new DmgArchive(stream, readerOptions ?? new ReaderOptions());
}
private sealed class DmgReader : AbstractReader<DmgEntry, DmgVolume>
{
private readonly DmgArchive _archive;
private readonly string _fileName;
private readonly Stream? _partitionStream;
public override DmgVolume Volume { get; }
internal DmgReader(ReaderOptions readerOptions, DmgArchive archive, string fileName)
: base(readerOptions, ArchiveType.Dmg)
{
_archive = archive;
_fileName = fileName;
Volume = archive.Volumes.Single();
using var compressedStream = DmgUtil.LoadHFSPartitionStream(Volume.Stream, Volume.Header);
_partitionStream = compressedStream?.Decompress();
}
protected override IEnumerable<DmgEntry> GetEntries(Stream stream)
{
if (_partitionStream is null) return Array.Empty<DmgArchiveEntry>();
else return HFSUtil.LoadEntriesFromPartition(_partitionStream, _fileName, _archive);
}
}
}
}

View File

@@ -0,0 +1,32 @@
using SharpCompress.Common.Dmg;
using SharpCompress.Common.Dmg.HFS;
using System;
using System.IO;
namespace SharpCompress.Archives.Dmg
{
public sealed class DmgArchiveEntry : DmgEntry, IArchiveEntry
{
private readonly Stream? _stream;
public bool IsComplete { get; } = true;
public IArchive Archive { get; }
internal DmgArchiveEntry(Stream? stream, DmgArchive archive, HFSCatalogRecord record, string path, DmgFilePart part)
: base(record, path, stream?.Length ?? 0, part)
{
_stream = stream;
Archive = archive;
}
public Stream OpenEntryStream()
{
if (IsDirectory)
throw new NotSupportedException("Directories cannot be opened as stream");
_stream!.Position = 0;
return _stream;
}
}
}

View File

@@ -8,5 +8,10 @@ namespace SharpCompress.Common
: base(message)
{
}
public ArchiveException(string message, Exception inner)
: base(message, inner)
{
}
}
}

View File

@@ -6,6 +6,7 @@
Zip,
Tar,
SevenZip,
GZip
GZip,
Dmg
}
}

View File

@@ -0,0 +1,323 @@
using SharpCompress.Common.Dmg.Headers;
using SharpCompress.Compressors;
using SharpCompress.Compressors.ADC;
using SharpCompress.Compressors.BZip2;
using SharpCompress.Compressors.Deflate;
using SharpCompress.IO;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
namespace SharpCompress.Common.Dmg
{
internal sealed class DmgBlockDataStream : Stream
{
private readonly Stream _baseStream;
private readonly DmgHeader _header;
private readonly BlkxTable _table;
private long _position;
private bool _isEnded;
private int _chunkIndex;
private Stream? _chunkStream;
private long _chunkPos;
public override bool CanRead => true;
public override bool CanWrite => false;
public override bool CanSeek => true;
public override long Length { get; }
public override long Position
{
get => _position;
set
{
if ((value < 0) || (value > Length)) throw new ArgumentOutOfRangeException(nameof(value));
if (value == Length)
{
// End of the stream
_position = Length;
_isEnded = true;
_chunkIndex = -1;
_chunkStream = null;
}
else if (value != _position)
{
_position = value;
// We can only seek over entire chunks at a time because some chunks may be compressed.
// So we first find the chunk that we are now in, then we read to the exact position inside that chunk.
for (int i = 0; i < _table.Chunks.Count; i++)
{
var chunk = _table.Chunks[i];
if (IsChunkValid(chunk) && (chunk.UncompressedOffset <= (ulong)_position)
&& ((chunk.UncompressedOffset + chunk.UncompressedLength) > (ulong)_position))
{
if (i == _chunkIndex)
{
// We are still in the same chunk, so if the new position is
// behind the previous one we can just read to the new position.
long offset = (long)chunk.UncompressedOffset + _chunkPos;
if (offset <= _position)
{
long skip = _position - offset;
_chunkStream!.Skip(skip);
_chunkPos += skip;
break;
}
}
_chunkIndex = i;
_chunkStream = GetChunkStream();
_chunkPos = 0;
// If the chunk happens to not be compressed this read will still result in a fast seek
if ((ulong)_position != chunk.UncompressedOffset)
{
long skip = _position - (long)chunk.UncompressedOffset;
_chunkStream.Skip(skip);
_chunkPos = skip;
}
break;
}
}
}
}
}
public DmgBlockDataStream(Stream baseStream, DmgHeader header, BlkxTable table)
{
if (!baseStream.CanRead) throw new ArgumentException("Requires a readable stream", nameof(baseStream));
if (!baseStream.CanSeek) throw new ArgumentException("Requires a seekable stream", nameof(baseStream));
_baseStream = baseStream;
_header = header;
_table = table;
Length = 0;
foreach (var chunk in table.Chunks)
{
if (IsChunkValid(chunk))
Length += (long)chunk.UncompressedLength;
}
_position = 0;
_chunkIndex = -1;
_chunkIndex = GetNextChunk();
_isEnded = _chunkIndex < 0;
if (!_isEnded) _chunkStream = GetChunkStream();
_chunkPos = 0;
}
private static bool IsChunkValid(BlkxChunk chunk)
{
return chunk.Type switch
{
BlkxChunkType.Zero => true,
BlkxChunkType.Uncompressed => true,
BlkxChunkType.Ignore => true,
BlkxChunkType.AdcCompressed => true,
BlkxChunkType.ZlibCompressed => true,
BlkxChunkType.Bz2Compressed => true,
_ => false
};
}
private int GetNextChunk()
{
int index = _chunkIndex;
bool isValid = false;
while (!isValid)
{
index++;
if (index >= _table.Chunks.Count) return -1;
var chunk = _table.Chunks[index];
if (chunk.Type == BlkxChunkType.Last) return -1;
isValid = IsChunkValid(chunk);
}
return index;
}
private Stream GetChunkStream()
{
if (_chunkIndex < 0)
throw new InvalidOperationException("Invalid chunk index");
var chunk = _table.Chunks[_chunkIndex];
// For our purposes, ignore behaves the same as zero
if ((chunk.Type == BlkxChunkType.Zero) || (chunk.Type == BlkxChunkType.Ignore))
return new ConstantStream(0, (long)chunk.UncompressedLength);
// We first create a sub-stream on the region of the base stream where the
// (possibly compressed) data is physically located at.
var subStream = new SeekableSubStream(_baseStream,
(long)(_header.DataForkOffset + _table.DataOffset + chunk.CompressedOffset),
(long)chunk.CompressedLength);
// Then we nest that sub-stream into the apropriate compressed stream.
return chunk.Type switch
{
BlkxChunkType.Uncompressed => subStream,
BlkxChunkType.AdcCompressed => new ADCStream(subStream, CompressionMode.Decompress),
BlkxChunkType.ZlibCompressed => new ZlibStream(subStream, CompressionMode.Decompress),
BlkxChunkType.Bz2Compressed => new BZip2Stream(subStream, CompressionMode.Decompress, false),
_ => throw new InvalidOperationException("Invalid chunk type")
};
}
// Decompresses the entire stream in memory for faster extraction.
// This is about two orders of magnitude faster than decompressing
// on-the-fly while extracting, but also eats RAM for breakfest.
public Stream Decompress()
{
// We have to load all the chunks into separate memory streams first
// because otherwise the decompression threads would block each other
// and actually be slower than just a single decompression thread.
var rawStreams = new Stream?[_table.Chunks.Count];
for (int i = 0; i < rawStreams.Length; i++)
{
var chunk = _table.Chunks[i];
if (IsChunkValid(chunk))
{
if ((chunk.Type == BlkxChunkType.Zero) || (chunk.Type == BlkxChunkType.Ignore))
{
rawStreams[i] = new ConstantStream(0, (long)chunk.UncompressedLength);
}
else
{
var subStream = new SeekableSubStream(_baseStream,
(long)(_header.DataForkOffset + _table.DataOffset + chunk.CompressedOffset),
(long)chunk.CompressedLength);
var memStream = new MemoryStream();
subStream.CopyTo(memStream);
memStream.Position = 0;
rawStreams[i] = memStream;
}
}
else
{
rawStreams[i] = null;
}
}
// Now we can decompress the chunks multithreaded
var streams = new Stream?[_table.Chunks.Count];
Parallel.For(0, streams.Length, i =>
{
var rawStream = rawStreams[i];
if (rawStream is not null)
{
var chunk = _table.Chunks[i];
if ((chunk.Type == BlkxChunkType.Zero)
|| (chunk.Type == BlkxChunkType.Ignore)
|| (chunk.Type == BlkxChunkType.Uncompressed))
{
streams[i] = rawStream;
}
else
{
Stream compStream = chunk.Type switch
{
BlkxChunkType.AdcCompressed => new ADCStream(rawStream, CompressionMode.Decompress),
BlkxChunkType.ZlibCompressed => new ZlibStream(rawStream, CompressionMode.Decompress),
BlkxChunkType.Bz2Compressed => new BZip2Stream(rawStream, CompressionMode.Decompress, false),
_ => throw new InvalidOperationException("Invalid chunk type")
};
var memStream = new MemoryStream();
compStream.CopyTo(memStream);
compStream.Dispose();
memStream.Position = 0;
streams[i] = memStream;
}
rawStream.Dispose();
rawStreams[i] = null;
}
else
{
streams[i] = null;
}
});
return new CompositeStream((IEnumerable<Stream>)streams.Where(s => s is not null));
}
public override int Read(byte[] buffer, int offset, int count)
{
if (_isEnded) return 0;
int readCount = _chunkStream!.Read(buffer, offset, count);
_chunkPos += readCount;
while (readCount < count)
{
// Current chunk has ended, so we have to continue reading from the next chunk.
_chunkIndex = GetNextChunk();
if (_chunkIndex < 0)
{
// We have reached the last chunk
_isEnded = true;
_chunkPos = 0;
_position += readCount;
return readCount;
}
_chunkStream = GetChunkStream();
int rc = _chunkStream.Read(buffer, offset + readCount, count - readCount);
_chunkPos = rc;
readCount += rc;
}
_position += readCount;
return readCount;
}
public override void Flush()
{ }
public override long Seek(long offset, SeekOrigin origin)
{
switch (origin)
{
case SeekOrigin.Begin:
Position = offset;
break;
case SeekOrigin.Current:
Position += offset;
break;
case SeekOrigin.End:
Position = Length - offset;
break;
}
return Position;
}
public override void SetLength(long value)
=> throw new NotSupportedException();
public override void Write(byte[] buffer, int offset, int count)
=> throw new NotSupportedException();
protected override void Dispose(bool disposing)
{ }
}
}

View File

@@ -0,0 +1,52 @@
using SharpCompress.Common.Dmg.HFS;
using System;
using System.Collections.Generic;
namespace SharpCompress.Common.Dmg
{
public abstract class DmgEntry : Entry
{
public override string Key { get; }
public override bool IsDirectory { get; }
public override long Size { get; }
public override long CompressedSize { get; }
public override CompressionType CompressionType { get; }
public override DateTime? LastModifiedTime { get; }
public override DateTime? CreatedTime { get; }
public override DateTime? LastAccessedTime { get; }
public override DateTime? ArchivedTime { get; }
public override long Crc { get; } = 0; // Not stored
public override string? LinkTarget { get; } = null;
public override bool IsEncrypted { get; } = false;
public override bool IsSplitAfter { get; } = false;
internal override IEnumerable<FilePart> Parts { get; }
internal DmgEntry(HFSCatalogRecord record, string path, long size, DmgFilePart part)
{
Key = path;
IsDirectory = record.Type == HFSCatalogRecordType.Folder;
Size = CompressedSize = size; // There is no way to get the actual compressed size or the compression type of
CompressionType = CompressionType.Unknown; // a file in a DMG archive since the files are nested inside the HFS partition.
Parts = part.AsEnumerable();
if (IsDirectory)
{
var folder = (HFSCatalogFolder)record;
LastModifiedTime = (folder.AttributeModDate > folder.ContentModDate) ? folder.AttributeModDate : folder.ContentModDate;
CreatedTime = folder.CreateDate;
LastAccessedTime = folder.AccessDate;
ArchivedTime = folder.BackupDate;
}
else
{
var file = (HFSCatalogFile)record;
LastModifiedTime = (file.AttributeModDate > file.ContentModDate) ? file.AttributeModDate : file.ContentModDate;
CreatedTime = file.CreateDate;
LastAccessedTime = file.AccessDate;
ArchivedTime = file.BackupDate;
}
}
}
}

View File

@@ -0,0 +1,21 @@
using System.IO;
namespace SharpCompress.Common.Dmg
{
internal sealed class DmgFilePart : FilePart
{
private readonly Stream _stream;
internal override string FilePartName { get; }
public DmgFilePart(Stream stream, string fileName)
: base(new ArchiveEncoding())
{
_stream = stream;
FilePartName = fileName;
}
internal override Stream GetCompressedStream() => _stream;
internal override Stream? GetRawStream() => null;
}
}

View File

@@ -0,0 +1,183 @@
using SharpCompress.Common.Dmg.Headers;
using System;
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Text;
using System.Xml.Linq;
namespace SharpCompress.Common.Dmg
{
internal static class DmgUtil
{
private const string MalformedXmlMessage = "Malformed XML block";
private static T[] ParseArray<T>(in XElement parent, in Func<XElement, T> parseElement)
{
var list = new List<T>();
foreach (var node in parent.Elements())
list.Add(parseElement(node));
return list.ToArray();
}
private static Dictionary<string, T> ParseDict<T>(in XElement parent, in Func<XElement, T> parseValue)
{
var dict = new Dictionary<string, T>();
string? key = null;
foreach (var node in parent.Elements())
{
if (string.Equals(node.Name.LocalName, "key", StringComparison.Ordinal))
{
key = node.Value;
}
else if (key is not null)
{
var value = parseValue(node);
dict.Add(key, value);
key = null;
}
}
return dict;
}
private static Dictionary<string, Dictionary<string, Dictionary<string, string>[]>> ParsePList(in XDocument doc)
{
var dictNode = doc.Root?.Element("dict");
if (dictNode is null) throw new InvalidFormatException(MalformedXmlMessage);
static Dictionary<string, string> ParseObject(XElement parent)
=> ParseDict(parent, node => node.Value);
static Dictionary<string, string>[] ParseObjectArray(XElement parent)
=> ParseArray(parent, ParseObject);
static Dictionary<string, Dictionary<string, string>[]> ParseSubDict(XElement parent)
=> ParseDict(parent, ParseObjectArray);
return ParseDict(dictNode, ParseSubDict);
}
private static BlkxData CreateDataFromDict(in Dictionary<string, string> dict)
{
static bool TryParseHex(string? s, out uint value)
{
value = 0;
if (string.IsNullOrEmpty(s)) return false;
if (s!.StartsWith("0x", StringComparison.OrdinalIgnoreCase))
s = s.Substring(2);
return uint.TryParse(s, NumberStyles.HexNumber, CultureInfo.InvariantCulture, out value);
}
if (!dict.TryGetValue("ID", out string? idStr) || !int.TryParse(idStr, out int id))
throw new InvalidFormatException(MalformedXmlMessage);
if (!dict.TryGetValue("Name", out string? name))
throw new InvalidFormatException(MalformedXmlMessage);
if (!dict.TryGetValue("Attributes", out string? attribStr) || !TryParseHex(attribStr, out uint attribs))
throw new InvalidFormatException(MalformedXmlMessage);
if (!dict.TryGetValue("Data", out string? base64Data) || string.IsNullOrEmpty(base64Data))
throw new InvalidFormatException(MalformedXmlMessage);
try
{
var data = Convert.FromBase64String(base64Data);
if (!BlkxTable.TryRead(data, out var table))
throw new InvalidFormatException("Invalid BLKX table");
return new BlkxData(id, name, attribs, table!);
}
catch (FormatException ex)
{
throw new InvalidFormatException(MalformedXmlMessage, ex);
}
}
public static DmgBlockDataStream? LoadHFSPartitionStream(Stream baseStream, DmgHeader header)
{
if ((header.XMLOffset + header.XMLLength) >= (ulong)baseStream.Length)
throw new IncompleteArchiveException("XML block incomplete");
if ((header.DataForkOffset + header.DataForkLength) >= (ulong)baseStream.Length)
throw new IncompleteArchiveException("Data block incomplete");
baseStream.Position = (long)header.XMLOffset;
var xmlBuffer = new byte[header.XMLLength];
baseStream.Read(xmlBuffer, 0, (int)header.XMLLength);
var xml = Encoding.ASCII.GetString(xmlBuffer);
var doc = XDocument.Parse(xml);
var pList = ParsePList(doc);
if (!pList.TryGetValue("resource-fork", out var resDict) || !resDict.TryGetValue("blkx", out var blkxDicts))
throw new InvalidFormatException(MalformedXmlMessage);
var objs = new BlkxData[blkxDicts.Length];
for (int i = 0; i < objs.Length; i++)
objs[i] = CreateDataFromDict(blkxDicts[i]);
// Index 0 is the protective MBR partition
// Index 1 is the GPT header
// Index 2 is the GPT partition table
try
{
var headerData = objs[1];
using var headerStream = new DmgBlockDataStream(baseStream, header, headerData.Table);
if (!GptHeader.TryRead(headerStream, out var gptHeader))
throw new InvalidFormatException("Invalid GPT header");
var tableData = objs[2];
using var tableStream = new DmgBlockDataStream(baseStream, header, tableData.Table);
var gptTable = new GptPartitionEntry[gptHeader!.EntriesCount];
for (int i = 0; i < gptHeader.EntriesCount; i++)
gptTable[i] = GptPartitionEntry.Read(tableStream);
foreach (var entry in gptTable)
{
if (entry.TypeGuid == PartitionFormat.AppleHFS)
{
BlkxData? partitionData = null;
for (int i = 3; i < objs.Length; i++)
{
if (objs[i].Name.StartsWith(entry.Name, StringComparison.Ordinal))
{
partitionData = objs[i];
break;
}
}
if (partitionData is null)
throw new InvalidFormatException($"Missing partition {entry.Name}");
return new DmgBlockDataStream(baseStream, header, partitionData.Table);
}
}
return null;
}
catch (EndOfStreamException ex)
{
throw new IncompleteArchiveException("Partition incomplete", ex);
}
}
private sealed class BlkxData
{
public int Id { get; }
public string Name { get; }
public uint Attributes { get; }
public BlkxTable Table { get; }
public BlkxData(int id, string name, uint attributes, BlkxTable table)
{
Id = id;
Name = name;
Attributes = attributes;
Table = table;
}
}
}
}

View File

@@ -0,0 +1,38 @@
using SharpCompress.Archives.Dmg;
using SharpCompress.Common.Dmg.Headers;
using SharpCompress.Common.Dmg.HFS;
using System;
using System.Collections.Generic;
using System.IO;
namespace SharpCompress.Common.Dmg
{
public class DmgVolume : Volume
{
private readonly DmgArchive _archive;
private readonly string _fileName;
internal DmgHeader Header { get; }
public DmgVolume(DmgArchive archive, Stream stream, string fileName, Readers.ReaderOptions readerOptions)
: base(stream, readerOptions)
{
_archive = archive;
_fileName = fileName;
long pos = stream.Length - DmgHeader.HeaderSize;
if (pos < 0) throw new InvalidFormatException("Invalid DMG volume");
stream.Position = pos;
if (DmgHeader.TryRead(stream, out var header)) Header = header!;
else throw new InvalidFormatException("Invalid DMG volume");
}
internal IEnumerable<DmgArchiveEntry> LoadEntries()
{
var partitionStream = DmgUtil.LoadHFSPartitionStream(Stream, Header);
if (partitionStream is null) return Array.Empty<DmgArchiveEntry>();
else return HFSUtil.LoadEntriesFromPartition(partitionStream, _fileName, _archive);
}
}
}

View File

@@ -0,0 +1,336 @@
using System;
namespace SharpCompress.Common.Dmg.HFS
{
internal sealed class HFSCatalogKey : HFSStructBase, IEquatable<HFSCatalogKey>, IComparable<HFSCatalogKey>, IComparable
{
private readonly StringComparer _comparer;
public uint ParentId { get; }
public string Name { get; }
private static StringComparer GetComparer(HFSKeyCompareType compareType, bool isHFSX)
{
if (isHFSX)
{
return compareType switch
{
HFSKeyCompareType.CaseFolding => StringComparer.InvariantCultureIgnoreCase,
HFSKeyCompareType.BinaryCompare => StringComparer.Ordinal,
_ => StringComparer.InvariantCultureIgnoreCase
};
}
else
{
return StringComparer.InvariantCultureIgnoreCase;
}
}
public HFSCatalogKey(uint parentId, string name, HFSKeyCompareType compareType, bool isHFSX)
{
ParentId = parentId;
Name = name;
_comparer = GetComparer(compareType, isHFSX);
}
public HFSCatalogKey(byte[] key, HFSKeyCompareType compareType, bool isHFSX)
{
ReadOnlySpan<byte> data = key.AsSpan();
ParentId = ReadUInt32(ref data);
Name = ReadString(ref data, true);
_comparer = GetComparer(compareType, isHFSX);
}
public bool Equals(HFSCatalogKey? other)
{
if (other is null) return false;
else return (ParentId == other.ParentId) && _comparer.Equals(Name, other.Name);
}
public override bool Equals(object? obj)
{
if (obj is HFSCatalogKey other) return Equals(other);
else return false;
}
public int CompareTo(HFSCatalogKey? other)
{
if (other is null) return 1;
int result = ParentId.CompareTo(other.ParentId);
if (result == 0) result = _comparer.Compare(Name, other.Name);
return result;
}
public int CompareTo(object? obj)
{
if (obj is null) return 1;
else if (obj is HFSCatalogKey other) return CompareTo(other);
else throw new ArgumentException("Object is not of type CatalogKey", nameof(obj));
}
public override int GetHashCode()
=> ParentId.GetHashCode() ^ _comparer.GetHashCode(Name);
public static bool operator ==(HFSCatalogKey? left, HFSCatalogKey? right)
{
if (left is null) return right is null;
else return left.Equals(right);
}
public static bool operator !=(HFSCatalogKey? left, HFSCatalogKey? right)
{
if (left is null) return right is not null;
else return !left.Equals(right);
}
public static bool operator <(HFSCatalogKey? left, HFSCatalogKey? right)
{
if (left is null) return right is not null;
else return left.CompareTo(right) < 0;
}
public static bool operator >(HFSCatalogKey? left, HFSCatalogKey? right)
{
if (left is null) return false;
else return left.CompareTo(right) > 0;
}
public static bool operator <=(HFSCatalogKey? left, HFSCatalogKey? right)
{
if (left is null) return true;
else return left.CompareTo(right) <= 0;
}
public static bool operator >=(HFSCatalogKey? left, HFSCatalogKey? right)
{
if (left is null) return right is null;
else return left.CompareTo(right) >= 0;
}
}
internal enum HFSCatalogRecordType : ushort
{
Folder = 0x0001,
File = 0x0002,
FolderThread = 0x0003,
FileThread = 0x0004
}
internal abstract class HFSCatalogRecord : HFSStructBase
{
public HFSCatalogRecordType Type { get; }
protected HFSCatalogRecord(HFSCatalogRecordType type)
=> Type = type;
public static bool TryRead(ref ReadOnlySpan<byte> data, HFSKeyCompareType compareType, bool isHFSX, out HFSCatalogRecord? record)
{
record = null;
ushort rawType = ReadUInt16(ref data);
if (!Enum.IsDefined(typeof(HFSCatalogRecordType), rawType)) return false;
var type = (HFSCatalogRecordType)rawType;
switch (type)
{
case HFSCatalogRecordType.Folder:
record = HFSCatalogFolder.Read(ref data);
return true;
case HFSCatalogRecordType.File:
record = HFSCatalogFile.Read(ref data);
return true;
case HFSCatalogRecordType.FolderThread:
record = HFSCatalogThread.Read(ref data, false, compareType, isHFSX);
return true;
case HFSCatalogRecordType.FileThread:
record = HFSCatalogThread.Read(ref data, true, compareType, isHFSX);
return true;
}
return false;
}
}
internal sealed class HFSCatalogFolder : HFSCatalogRecord
{
public uint Valence { get; }
public uint FolderId { get; }
public DateTime CreateDate { get; }
public DateTime ContentModDate { get; }
public DateTime AttributeModDate { get; }
public DateTime AccessDate { get; }
public DateTime BackupDate { get; }
public HFSPermissions Permissions { get; }
public HFSFolderInfo Info { get; }
public uint TextEncoding { get; }
private HFSCatalogFolder(
uint valence,
uint folderId,
DateTime createDate,
DateTime contentModDate,
DateTime attributeModDate,
DateTime accessDate,
DateTime backupDate,
HFSPermissions permissions,
HFSFolderInfo info,
uint textEncoding)
: base(HFSCatalogRecordType.Folder)
{
Valence = valence;
FolderId = folderId;
CreateDate = createDate;
ContentModDate = contentModDate;
AttributeModDate = attributeModDate;
AccessDate = accessDate;
BackupDate = backupDate;
Permissions = permissions;
Info = info;
TextEncoding = textEncoding;
}
public static HFSCatalogFolder Read(ref ReadOnlySpan<byte> data)
{
_ = ReadUInt16(ref data); // reserved
uint valence = ReadUInt32(ref data);
uint folderId = ReadUInt32(ref data);
var createDate = ReadDate(ref data);
var contentModDate = ReadDate(ref data);
var attributeModDate = ReadDate(ref data);
var accessDate = ReadDate(ref data);
var backupDate = ReadDate(ref data);
var permissions = HFSPermissions.Read(ref data);
var info = HFSFolderInfo.Read(ref data);
uint textEncoding = ReadUInt32(ref data);
_ = ReadUInt32(ref data); // reserved
return new HFSCatalogFolder(
valence,
folderId,
createDate,
contentModDate,
attributeModDate,
accessDate,
backupDate,
permissions,
info,
textEncoding);
}
}
internal enum HFSFileFlags : ushort
{
LockedBit = 0x0000,
LockedMask = 0x0001,
ThreadExistsBit = 0x0001,
ThreadExistsMask = 0x0002
}
internal sealed class HFSCatalogFile : HFSCatalogRecord
{
public HFSFileFlags Flags { get; }
public uint FileId { get; }
public DateTime CreateDate { get; }
public DateTime ContentModDate { get; }
public DateTime AttributeModDate { get; }
public DateTime AccessDate { get; }
public DateTime BackupDate { get; }
public HFSPermissions Permissions { get; }
public HFSFileInfo Info { get; }
public uint TextEncoding { get; }
public HFSForkData DataFork { get; }
public HFSForkData ResourceFork { get; }
private HFSCatalogFile(
HFSFileFlags flags,
uint fileId,
DateTime createDate,
DateTime contentModDate,
DateTime attributeModDate,
DateTime accessDate,
DateTime backupDate,
HFSPermissions permissions,
HFSFileInfo info,
uint textEncoding,
HFSForkData dataFork,
HFSForkData resourceFork)
:base(HFSCatalogRecordType.File)
{
Flags = flags;
FileId = fileId;
CreateDate = createDate;
ContentModDate = contentModDate;
AttributeModDate = attributeModDate;
AccessDate = accessDate;
BackupDate = backupDate;
Permissions = permissions;
Info = info;
TextEncoding = textEncoding;
DataFork = dataFork;
ResourceFork = resourceFork;
}
public static HFSCatalogFile Read(ref ReadOnlySpan<byte> data)
{
var flags = (HFSFileFlags)ReadUInt16(ref data);
_ = ReadUInt32(ref data); // reserved
uint fileId = ReadUInt32(ref data);
var createDate = ReadDate(ref data);
var contentModDate = ReadDate(ref data);
var attributeModDate = ReadDate(ref data);
var accessDate = ReadDate(ref data);
var backupDate = ReadDate(ref data);
var permissions = HFSPermissions.Read(ref data);
var info = HFSFileInfo.Read(ref data);
uint textEncoding = ReadUInt32(ref data);
_ = ReadUInt32(ref data); // reserved
var dataFork = HFSForkData.Read(ref data);
var resourceFork = HFSForkData.Read(ref data);
return new HFSCatalogFile(
flags,
fileId,
createDate,
contentModDate,
attributeModDate,
accessDate,
backupDate,
permissions,
info,
textEncoding,
dataFork,
resourceFork);
}
}
internal sealed class HFSCatalogThread : HFSCatalogRecord
{
public uint ParentId { get; }
public string NodeName { get; }
public HFSCatalogKey CatalogKey { get; }
private HFSCatalogThread(uint parentId, string nodeName, bool isFile, HFSKeyCompareType compareType, bool isHFSX)
: base(isFile ? HFSCatalogRecordType.FileThread : HFSCatalogRecordType.FolderThread)
{
ParentId = parentId;
NodeName = nodeName;
CatalogKey = new HFSCatalogKey(ParentId, NodeName, compareType, isHFSX);
}
public static HFSCatalogThread Read(ref ReadOnlySpan<byte> data, bool isFile, HFSKeyCompareType compareType, bool isHFSX)
{
_ = ReadInt16(ref data); // reserved
uint parentId = ReadUInt32(ref data);
string nodeName = ReadString(ref data, true);
return new HFSCatalogThread(parentId, nodeName, isFile, compareType, isHFSX);
}
}
}

View File

@@ -0,0 +1,31 @@
using System;
using System.IO;
namespace SharpCompress.Common.Dmg.HFS
{
internal sealed class HFSExtentDescriptor : HFSStructBase
{
public uint StartBlock { get; }
public uint BlockCount { get; }
private HFSExtentDescriptor(uint startBlock, uint blockCount)
{
StartBlock = startBlock;
BlockCount = blockCount;
}
public static HFSExtentDescriptor Read(Stream stream)
{
return new HFSExtentDescriptor(
ReadUInt32(stream),
ReadUInt32(stream));
}
public static HFSExtentDescriptor Read(ref ReadOnlySpan<byte> data)
{
return new HFSExtentDescriptor(
ReadUInt32(ref data),
ReadUInt32(ref data));
}
}
}

View File

@@ -0,0 +1,115 @@
using System;
using System.Collections.Generic;
namespace SharpCompress.Common.Dmg.HFS
{
internal sealed class HFSExtentKey : HFSStructBase, IEquatable<HFSExtentKey>, IComparable<HFSExtentKey>, IComparable
{
public byte ForkType { get; }
public uint FileId { get; }
public uint StartBlock { get; }
public HFSExtentKey(byte forkType, uint fileId, uint startBlock)
{
ForkType = forkType;
FileId = fileId;
StartBlock = startBlock;
}
public HFSExtentKey(byte[] key)
{
ReadOnlySpan<byte> data = key.AsSpan();
ForkType = ReadUInt8(ref data);
_ = ReadUInt8(ref data); // padding
FileId = ReadUInt32(ref data);
StartBlock = ReadUInt32(ref data);
}
public bool Equals(HFSExtentKey? other)
{
if (other is null) return false;
else return (ForkType == other.ForkType) && (FileId == other.FileId) && (StartBlock == other.StartBlock);
}
public override bool Equals(object? obj)
{
if (obj is HFSExtentKey other) return Equals(other);
else return false;
}
public int CompareTo(HFSExtentKey? other)
{
if (other is null) return 1;
int result = FileId.CompareTo(other.FileId);
if (result == 0) result = ForkType.CompareTo(other.ForkType);
if (result == 0) result = StartBlock.CompareTo(other.StartBlock);
return result;
}
public int CompareTo(object? obj)
{
if (obj is null) return 1;
else if (obj is HFSExtentKey other) return CompareTo(other);
else throw new ArgumentException("Object is not of type ExtentKey", nameof(obj));
}
public override int GetHashCode()
=> ForkType.GetHashCode() ^ FileId.GetHashCode() ^ StartBlock.GetHashCode();
public static bool operator ==(HFSExtentKey? left, HFSExtentKey? right)
{
if (left is null) return right is null;
else return left.Equals(right);
}
public static bool operator !=(HFSExtentKey? left, HFSExtentKey? right)
{
if (left is null) return right is not null;
else return !left.Equals(right);
}
public static bool operator <(HFSExtentKey? left, HFSExtentKey? right)
{
if (left is null) return right is not null;
else return left.CompareTo(right) < 0;
}
public static bool operator >(HFSExtentKey? left, HFSExtentKey? right)
{
if (left is null) return false;
else return left.CompareTo(right) > 0;
}
public static bool operator <=(HFSExtentKey? left, HFSExtentKey? right)
{
if (left is null) return true;
else return left.CompareTo(right) <= 0;
}
public static bool operator >=(HFSExtentKey? left, HFSExtentKey? right)
{
if (left is null) return right is null;
else return left.CompareTo(right) >= 0;
}
}
internal sealed class HFSExtentRecord : HFSStructBase
{
private const int ExtentCount = 8;
public IReadOnlyList<HFSExtentDescriptor> Extents { get; }
private HFSExtentRecord(IReadOnlyList<HFSExtentDescriptor> extents)
=> Extents = extents;
public static HFSExtentRecord Read(ref ReadOnlySpan<byte> data)
{
var extents = new HFSExtentDescriptor[ExtentCount];
for (int i = 0; i < ExtentCount; i++)
extents[i] = HFSExtentDescriptor.Read(ref data);
return new HFSExtentRecord(extents);
}
}
}

View File

@@ -0,0 +1,145 @@
using System;
namespace SharpCompress.Common.Dmg.HFS
{
internal struct HFSPoint
{
public short V;
public short H;
}
internal struct HFSRect
{
public short Top;
public short Left;
public short Bottom;
public short Right;
}
[Flags]
internal enum HFSFinderFlags : ushort
{
None = 0x0000,
IsOnDesk = 0x0001, /* Files and folders (System 6) */
Color = 0x000E, /* Files and folders */
IsShared = 0x0040, /* Files only (Applications only) If */
/* clear, the application needs */
/* to write to its resource fork, */
/* and therefore cannot be shared */
/* on a server */
HasNoINITs = 0x0080, /* Files only (Extensions/Control */
/* Panels only) */
/* This file contains no INIT resource */
HasBeenInited = 0x0100, /* Files only. Clear if the file */
/* contains desktop database resources */
/* ('BNDL', 'FREF', 'open', 'kind'...) */
/* that have not been added yet. Set */
/* only by the Finder. */
/* Reserved for folders */
HasCustomIcon = 0x0400, /* Files and folders */
IsStationery = 0x0800, /* Files only */
NameLocked = 0x1000, /* Files and folders */
HasBundle = 0x2000, /* Files only */
IsInvisible = 0x4000, /* Files and folders */
IsAlias = 0x8000 /* Files only */
}
[Flags]
internal enum HFSExtendedFinderFlags : ushort
{
None = 0x0000,
ExtendedFlagsAreInvalid = 0x8000, /* The other extended flags */
/* should be ignored */
HasCustomBadge = 0x0100, /* The file or folder has a */
/* badge resource */
HasRoutingInfo = 0x0004 /* The file contains routing */
/* info resource */
}
internal sealed class HFSFileInfo : HFSStructBase
{
public string FileType { get; } /* The type of the file */
public string FileCreator { get; } /* The file's creator */
public HFSFinderFlags FinderFlags { get; }
public HFSPoint Location { get; } /* File's location in the folder. */
public HFSExtendedFinderFlags ExtendedFinderFlags { get; }
public int PutAwayFolderId { get; }
private HFSFileInfo(
string fileType,
string fileCreator,
HFSFinderFlags finderFlags,
HFSPoint location,
HFSExtendedFinderFlags extendedFinderFlags,
int putAwayFolderId)
{
FileType = fileType;
FileCreator = fileCreator;
FinderFlags = finderFlags;
Location = location;
ExtendedFinderFlags = extendedFinderFlags;
PutAwayFolderId = putAwayFolderId;
}
public static HFSFileInfo Read(ref ReadOnlySpan<byte> data)
{
string fileType = ReadOSType(ref data);
string fileCreator = ReadOSType(ref data);
var finderFlags = (HFSFinderFlags)ReadUInt16(ref data);
var location = ReadPoint(ref data);
_ = ReadUInt16(ref data); // reserved
data = data.Slice(4 * sizeof(short)); // reserved
var extendedFinderFlags = (HFSExtendedFinderFlags)ReadUInt16(ref data);
_ = ReadInt16(ref data); // reserved
int putAwayFolderId = ReadInt32(ref data);
return new HFSFileInfo(fileType, fileCreator, finderFlags, location, extendedFinderFlags, putAwayFolderId);
}
}
internal sealed class HFSFolderInfo : HFSStructBase
{
public HFSRect WindowBounds { get; } /* The position and dimension of the */
/* folder's window */
public HFSFinderFlags FinderFlags { get; }
public HFSPoint Location { get; } /* Folder's location in the parent */
/* folder. If set to {0, 0}, the Finder */
/* will place the item automatically */
public HFSPoint ScrollPosition { get; } /* Scroll position (for icon views) */
public HFSExtendedFinderFlags ExtendedFinderFlags { get; }
public int PutAwayFolderId { get; }
private HFSFolderInfo(
HFSRect windowBounds,
HFSFinderFlags finderFlags,
HFSPoint location,
HFSPoint scrollPosition,
HFSExtendedFinderFlags extendedFinderFlags,
int putAwayFolderId)
{
WindowBounds = windowBounds;
FinderFlags = finderFlags;
Location = location;
ScrollPosition = scrollPosition;
ExtendedFinderFlags = extendedFinderFlags;
PutAwayFolderId = putAwayFolderId;
}
public static HFSFolderInfo Read(ref ReadOnlySpan<byte> data)
{
var windowBounds = ReadRect(ref data);
var finderFlags = (HFSFinderFlags)ReadUInt16(ref data);
var location = ReadPoint(ref data);
_ = ReadUInt16(ref data); // reserved
var scrollPosition = ReadPoint(ref data);
_ = ReadInt32(ref data); // reserved
var extendedFinderFlags = (HFSExtendedFinderFlags)ReadUInt16(ref data);
_ = ReadInt16(ref data); // reserved
int putAwayFolderId = ReadInt32(ref data);
return new HFSFolderInfo(windowBounds, finderFlags, location, scrollPosition, extendedFinderFlags, putAwayFolderId);
}
}
}

View File

@@ -0,0 +1,50 @@
using System;
using System.Collections.Generic;
using System.IO;
namespace SharpCompress.Common.Dmg.HFS
{
internal sealed class HFSForkData : HFSStructBase
{
private const int ExtentCount = 8;
public ulong LogicalSize { get; }
public uint ClumpSize { get; }
public uint TotalBlocks { get; }
public IReadOnlyList<HFSExtentDescriptor> Extents { get; }
private HFSForkData(ulong logicalSize, uint clumpSize, uint totalBlocks, IReadOnlyList<HFSExtentDescriptor> extents)
{
LogicalSize = logicalSize;
ClumpSize = clumpSize;
TotalBlocks = totalBlocks;
Extents = extents;
}
public static HFSForkData Read(Stream stream)
{
ulong logicalSize = ReadUInt64(stream);
uint clumpSize = ReadUInt32(stream);
uint totalBlocks = ReadUInt32(stream);
var extents = new HFSExtentDescriptor[ExtentCount];
for (int i = 0; i < ExtentCount; i++)
extents[i] = HFSExtentDescriptor.Read(stream);
return new HFSForkData(logicalSize, clumpSize, totalBlocks, extents);
}
public static HFSForkData Read(ref ReadOnlySpan<byte> data)
{
ulong logicalSize = ReadUInt64(ref data);
uint clumpSize = ReadUInt32(ref data);
uint totalBlocks = ReadUInt32(ref data);
var extents = new HFSExtentDescriptor[ExtentCount];
for (int i = 0; i < ExtentCount; i++)
extents[i] = HFSExtentDescriptor.Read(ref data);
return new HFSForkData(logicalSize, clumpSize, totalBlocks, extents);
}
}
}

View File

@@ -0,0 +1,196 @@
using SharpCompress.IO;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
namespace SharpCompress.Common.Dmg.HFS
{
internal sealed class HFSForkStream : Stream
{
private readonly Stream _baseStream;
private readonly HFSVolumeHeader _volumeHeader;
private readonly IReadOnlyList<HFSExtentDescriptor> _extents;
private long _position;
private bool _isEnded;
private int _extentIndex;
private Stream? _extentStream;
public override bool CanRead => true;
public override bool CanWrite => false;
public override bool CanSeek => true;
public override long Length { get; }
public override long Position
{
get => _position;
set
{
if ((value < 0) || (value > Length)) throw new ArgumentOutOfRangeException(nameof(value));
if (value == Length)
{
// End of the stream
_position = Length;
_isEnded = true;
_extentIndex = -1;
_extentStream = null;
}
else if (value != _position)
{
_position = value;
// We first have to determine in which extent we are now, then we seek to the exact position in that extent.
long offsetInExtent = _position;
for (int i = 0; i < _extents.Count; i++)
{
var extent = _extents[i];
long extentSize = extent.BlockCount * _volumeHeader.BlockSize;
if (extentSize < offsetInExtent)
{
if (i == _extentIndex)
{
// We are in the same extent so just seek to the correct position
_extentStream!.Position = offsetInExtent;
}
else
{
_extentIndex = i;
_extentStream = GetExtentStream();
_extentStream.Position = offsetInExtent;
}
break;
}
else
{
offsetInExtent -= extentSize;
}
}
}
}
}
public HFSForkStream(Stream baseStream, HFSVolumeHeader volumeHeader, HFSForkData forkData)
{
_baseStream = baseStream;
_volumeHeader = volumeHeader;
_extents = forkData.Extents;
Length = (long)forkData.LogicalSize;
_position = 0;
_extentIndex = -1;
_extentIndex = GetNextExtent();
_isEnded = _extentIndex < 0;
if (!_isEnded) _extentStream = GetExtentStream();
}
public HFSForkStream(
Stream baseStream, HFSVolumeHeader volumeHeader, HFSForkData forkData, uint fileId,
IReadOnlyDictionary<HFSExtentKey, HFSExtentRecord> extents)
{
_baseStream = baseStream;
_volumeHeader = volumeHeader;
Length = (long)forkData.LogicalSize;
uint blocks = (uint)forkData.Extents.Sum(e => e.BlockCount);
var totalExtents = new List<HFSExtentDescriptor>(forkData.Extents);
_extents = totalExtents;
var nextKey = new HFSExtentKey(0, fileId, blocks);
while (extents.TryGetValue(nextKey, out var record))
{
blocks += (uint)record.Extents.Sum(e => e.BlockCount);
totalExtents.AddRange(record.Extents);
nextKey = new HFSExtentKey(0, fileId, blocks);
}
_position = 0;
_extentIndex = -1;
_extentIndex = GetNextExtent();
_isEnded = _extentIndex < 0;
if (!_isEnded) _extentStream = GetExtentStream();
}
private int GetNextExtent()
{
int index = _extentIndex + 1;
if (index >= _extents.Count) return -1;
var extent = _extents[index];
if ((extent.StartBlock == 0) && (extent.BlockCount == 0)) return -1;
return index;
}
private Stream GetExtentStream()
{
if (_extentIndex < 0)
throw new InvalidOperationException("Invalid extent index");
var extent = _extents[_extentIndex];
return new HFSExtentStream(_baseStream, _volumeHeader, extent);
}
public override void Flush()
{ }
public override int Read(byte[] buffer, int offset, int count)
{
if (_isEnded) return 0;
count = (int)Math.Min(count, Length - Position);
int readCount = _extentStream!.Read(buffer, offset, count);
while (readCount < count)
{
_extentIndex = GetNextExtent();
if (_extentIndex < 0)
{
_isEnded = true;
return readCount;
}
_extentStream = GetExtentStream();
readCount += _extentStream.Read(buffer, offset + readCount, count - readCount);
}
_position += readCount;
return readCount;
}
public override long Seek(long offset, SeekOrigin origin)
{
switch (origin)
{
case SeekOrigin.Begin:
Position = offset;
break;
case SeekOrigin.Current:
Position += offset;
break;
case SeekOrigin.End:
Position = Length - offset;
break;
}
return Position;
}
public override void SetLength(long value)
=> throw new NotSupportedException();
public override void Write(byte[] buffer, int offset, int count)
=> throw new NotSupportedException();
private sealed class HFSExtentStream : SeekableSubStream
{
public HFSExtentStream(Stream stream, HFSVolumeHeader volumeHeader, HFSExtentDescriptor extent)
: base(stream, (long)extent.StartBlock * volumeHeader.BlockSize, (long)extent.BlockCount * volumeHeader.BlockSize)
{ }
}
}
}

View File

@@ -0,0 +1,91 @@
using System;
namespace SharpCompress.Common.Dmg.HFS
{
internal abstract class HFSKeyedRecord : HFSStructBase
{
private readonly HFSKeyCompareType _compareType;
private readonly bool _isHFSX;
private HFSCatalogKey? _catalogKey;
private HFSExtentKey? _extentKey;
public byte[] Key { get; }
public HFSCatalogKey GetCatalogKey() => _catalogKey ??= new HFSCatalogKey(Key, _compareType, _isHFSX);
public HFSExtentKey GetExtentKey() => _extentKey ??= new HFSExtentKey(Key);
protected HFSKeyedRecord(byte[] key, HFSKeyCompareType compareType, bool isHFSX)
{
Key = key;
_compareType = compareType;
_isHFSX = isHFSX;
}
}
internal sealed class HFSPointerRecord : HFSKeyedRecord
{
public uint NodeNumber { get; }
private HFSPointerRecord(byte[] key, uint nodeNumber, HFSKeyCompareType compareType, bool isHFSX)
: base(key, compareType, isHFSX)
{
NodeNumber = nodeNumber;
}
public static HFSPointerRecord Read(ref ReadOnlySpan<byte> data, HFSTreeHeaderRecord headerRecord, bool isHFSX)
{
bool isBigKey = headerRecord.Attributes.HasFlag(HFSTreeAttributes.BigKeys);
ushort keyLength = isBigKey ? ReadUInt16(ref data) : ReadUInt8(ref data);
if (!headerRecord.Attributes.HasFlag(HFSTreeAttributes.VariableIndexKeys)) keyLength = headerRecord.MaxKeyLength;
int keySize = (isBigKey ? 2 : 1) + keyLength;
var key = new byte[keyLength];
data.Slice(0, keyLength).CopyTo(key);
data = data.Slice(keyLength);
// data is always aligned to 2 bytes
if (keySize % 2 == 1) data = data.Slice(1);
uint nodeNumber = ReadUInt32(ref data);
return new HFSPointerRecord(key, nodeNumber, headerRecord.KeyCompareType, isHFSX);
}
}
internal sealed class HFSDataRecord : HFSKeyedRecord
{
public byte[] Data { get; }
private HFSDataRecord(byte[] key, byte[] data, HFSKeyCompareType compareType, bool isHFSX)
: base(key, compareType, isHFSX)
{
Data = data;
}
public static HFSDataRecord Read(ref ReadOnlySpan<byte> data, int size, HFSTreeHeaderRecord headerRecord, bool isHFSX)
{
bool isBigKey = headerRecord.Attributes.HasFlag(HFSTreeAttributes.BigKeys);
ushort keyLength = isBigKey ? ReadUInt16(ref data) : ReadUInt8(ref data);
int keySize = (isBigKey ? 2 : 1) + keyLength;
size -= keySize;
var key = new byte[keyLength];
data.Slice(0, keyLength).CopyTo(key);
data = data.Slice(keyLength);
// data is always aligned to 2 bytes
if (keySize % 2 == 1)
{
data = data.Slice(1);
size--;
}
var structData = new byte[size];
data.Slice(0, size).CopyTo(structData);
data = data.Slice(size);
return new HFSDataRecord(key, structData, headerRecord.KeyCompareType, isHFSX);
}
}
}

View File

@@ -0,0 +1,35 @@
using System;
namespace SharpCompress.Common.Dmg.HFS
{
internal sealed class HFSPermissions : HFSStructBase
{
public uint OwnerID { get; }
public uint GroupID { get; }
public byte AdminFlags { get; }
public byte OwnerFlags { get; }
public ushort FileMode { get; }
public uint Special { get; }
private HFSPermissions(uint ownerID, uint groupID, byte adminFlags, byte ownerFlags, ushort fileMode, uint special)
{
OwnerID = ownerID;
GroupID = groupID;
AdminFlags = adminFlags;
OwnerFlags = ownerFlags;
FileMode = fileMode;
Special = special;
}
public static HFSPermissions Read(ref ReadOnlySpan<byte> data)
{
return new HFSPermissions(
ReadUInt32(ref data),
ReadUInt32(ref data),
ReadUInt8(ref data),
ReadUInt8(ref data),
ReadUInt16(ref data),
ReadUInt32(ref data));
}
}
}

View File

@@ -0,0 +1,187 @@
using System;
using System.Buffers.Binary;
using System.IO;
using System.Text;
namespace SharpCompress.Common.Dmg.HFS
{
internal abstract class HFSStructBase
{
private const int StringSize = 510;
private const int OSTypeSize = 4;
private static readonly DateTime Epoch = new DateTime(1904, 1, 1, 0, 0, 0, DateTimeKind.Utc);
private static readonly byte[] _buffer = new byte[StringSize];
protected static byte ReadUInt8(Stream stream)
{
if (stream.Read(_buffer, 0, sizeof(byte)) != sizeof(byte))
throw new EndOfStreamException();
return _buffer[0];
}
protected static ushort ReadUInt16(Stream stream)
{
if (stream.Read(_buffer, 0, sizeof(ushort)) != sizeof(ushort))
throw new EndOfStreamException();
return BinaryPrimitives.ReadUInt16BigEndian(_buffer);
}
protected static short ReadInt16(Stream stream)
{
if (stream.Read(_buffer, 0, sizeof(short)) != sizeof(short))
throw new EndOfStreamException();
return BinaryPrimitives.ReadInt16BigEndian(_buffer);
}
protected static uint ReadUInt32(Stream stream)
{
if (stream.Read(_buffer, 0, sizeof(uint)) != sizeof(uint))
throw new EndOfStreamException();
return BinaryPrimitives.ReadUInt32BigEndian(_buffer);
}
protected static int ReadInt32(Stream stream)
{
if (stream.Read(_buffer, 0, sizeof(int)) != sizeof(int))
throw new EndOfStreamException();
return BinaryPrimitives.ReadInt32BigEndian(_buffer);
}
protected static ulong ReadUInt64(Stream stream)
{
if (stream.Read(_buffer, 0, sizeof(ulong)) != sizeof(ulong))
throw new EndOfStreamException();
return BinaryPrimitives.ReadUInt64BigEndian(_buffer);
}
protected static long ReadInt64(Stream stream)
{
if (stream.Read(_buffer, 0, sizeof(long)) != sizeof(long))
throw new EndOfStreamException();
return BinaryPrimitives.ReadInt64BigEndian(_buffer);
}
protected static string ReadString(Stream stream)
{
ushort length = ReadUInt16(stream);
if (stream.Read(_buffer, 0, StringSize) != StringSize)
throw new EndOfStreamException();
return Encoding.Unicode.GetString(_buffer, 0, Math.Min(length * 2, StringSize));
}
protected static DateTime ReadDate(Stream stream)
{
uint seconds = ReadUInt32(stream);
var span = TimeSpan.FromSeconds(seconds);
return Epoch + span;
}
protected static byte ReadUInt8(ref ReadOnlySpan<byte> data)
{
byte val = data[0];
data = data.Slice(sizeof(byte));
return val;
}
protected static ushort ReadUInt16(ref ReadOnlySpan<byte> data)
{
ushort val = BinaryPrimitives.ReadUInt16BigEndian(data);
data = data.Slice(sizeof(ushort));
return val;
}
protected static short ReadInt16(ref ReadOnlySpan<byte> data)
{
short val = BinaryPrimitives.ReadInt16BigEndian(data);
data = data.Slice(sizeof(short));
return val;
}
protected static uint ReadUInt32(ref ReadOnlySpan<byte> data)
{
uint val = BinaryPrimitives.ReadUInt32BigEndian(data);
data = data.Slice(sizeof(uint));
return val;
}
protected static int ReadInt32(ref ReadOnlySpan<byte> data)
{
int val = BinaryPrimitives.ReadInt32BigEndian(data);
data = data.Slice(sizeof(int));
return val;
}
protected static ulong ReadUInt64(ref ReadOnlySpan<byte> data)
{
ulong val = BinaryPrimitives.ReadUInt64BigEndian(data);
data = data.Slice(sizeof(ulong));
return val;
}
protected static long ReadInt64(ref ReadOnlySpan<byte> data)
{
long val = BinaryPrimitives.ReadInt64BigEndian(data);
data = data.Slice(sizeof(long));
return val;
}
protected static string ReadString(ref ReadOnlySpan<byte> data, bool truncate)
{
int length = ReadUInt16(ref data);
if (truncate)
{
length = Math.Min(length * 2, StringSize);
data.Slice(0, length).CopyTo(_buffer);
data = data.Slice(length);
return Encoding.BigEndianUnicode.GetString(_buffer, 0, length);
}
else
{
data.Slice(0, StringSize).CopyTo(_buffer);
data = data.Slice(StringSize);
return Encoding.BigEndianUnicode.GetString(_buffer, 0, Math.Min(length * 2, StringSize));
}
}
protected static DateTime ReadDate(ref ReadOnlySpan<byte> data)
{
uint seconds = ReadUInt32(ref data);
var span = TimeSpan.FromSeconds(seconds);
return Epoch + span;
}
protected static string ReadOSType(ref ReadOnlySpan<byte> data)
{
data.Slice(0, OSTypeSize).CopyTo(_buffer);
data = data.Slice(OSTypeSize);
return Encoding.ASCII.GetString(_buffer, 0, OSTypeSize).NullTerminate();
}
protected static HFSPoint ReadPoint(ref ReadOnlySpan<byte> data)
{
return new HFSPoint()
{
V = ReadInt16(ref data),
H = ReadInt16(ref data)
};
}
protected static HFSRect ReadRect(ref ReadOnlySpan<byte> data)
{
return new HFSRect()
{
Top = ReadInt16(ref data),
Left = ReadInt16(ref data),
Bottom = ReadInt16(ref data),
Right = ReadInt16(ref data)
};
}
}
}

View File

@@ -0,0 +1,108 @@
using System;
using System.IO;
namespace SharpCompress.Common.Dmg.HFS
{
internal enum HFSTreeType : byte
{
HFS = 0, // control file
User = 128, // user btree type starts from 128
Reserved = 255
}
internal enum HFSKeyCompareType : byte
{
CaseFolding = 0xCF, // case-insensitive
BinaryCompare = 0xBC // case-sensitive
}
[Flags]
internal enum HFSTreeAttributes : uint
{
None = 0x00000000,
BadClose = 0x00000001,
BigKeys = 0x00000002,
VariableIndexKeys = 0x00000004
}
internal sealed class HFSTreeHeaderRecord : HFSStructBase
{
public ushort TreeDepth;
public uint RootNode;
public uint LeafRecords;
public uint FirstLeafNode;
public uint LastLeafNode;
public ushort NodeSize;
public ushort MaxKeyLength;
public uint TotalNodes;
public uint FreeNodes;
public uint ClumpSize;
public HFSTreeType TreeType;
public HFSKeyCompareType KeyCompareType;
public HFSTreeAttributes Attributes;
private HFSTreeHeaderRecord(
ushort treeDepth,
uint rootNode,
uint leafRecords,
uint firstLeafNode,
uint lastLeafNode,
ushort nodeSize,
ushort maxKeyLength,
uint totalNodes,
uint freeNodes,
uint clumpSize,
HFSTreeType treeType,
HFSKeyCompareType keyCompareType,
HFSTreeAttributes attributes)
{
TreeDepth = treeDepth;
RootNode = rootNode;
LeafRecords = leafRecords;
FirstLeafNode = firstLeafNode;
LastLeafNode = lastLeafNode;
NodeSize = nodeSize;
MaxKeyLength = maxKeyLength;
TotalNodes = totalNodes;
FreeNodes = freeNodes;
ClumpSize = clumpSize;
TreeType = treeType;
KeyCompareType = keyCompareType;
Attributes = attributes;
}
public static HFSTreeHeaderRecord Read(Stream stream)
{
ushort treeDepth = ReadUInt16(stream);
uint rootNode = ReadUInt32(stream);
uint leafRecords = ReadUInt32(stream);
uint firstLeafNode = ReadUInt32(stream);
uint lastLeafNode = ReadUInt32(stream);
ushort nodeSize = ReadUInt16(stream);
ushort maxKeyLength = ReadUInt16(stream);
uint totalNodes = ReadUInt32(stream);
uint freeNodes = ReadUInt32(stream);
_ = ReadUInt16(stream); // reserved
uint clumpSize = ReadUInt32(stream);
var treeType = (HFSTreeType)ReadUInt8(stream);
var keyCompareType = (HFSKeyCompareType)ReadUInt8(stream);
var attributes = (HFSTreeAttributes)ReadUInt32(stream);
for (int i = 0; i < 16; i++) _ = ReadUInt32(stream); // reserved
return new HFSTreeHeaderRecord(
treeDepth,
rootNode,
leafRecords,
firstLeafNode,
lastLeafNode,
nodeSize,
maxKeyLength,
totalNodes,
freeNodes,
clumpSize,
treeType,
keyCompareType,
attributes);
}
}
}

View File

@@ -0,0 +1,167 @@
using System;
using System.Collections.Generic;
using System.IO;
namespace SharpCompress.Common.Dmg.HFS
{
internal abstract class HFSTreeNode : HFSStructBase
{
private static byte[]? _buffer = null;
public HFSTreeNodeDescriptor Descriptor { get; }
protected HFSTreeNode(HFSTreeNodeDescriptor descriptor)
=> Descriptor = descriptor;
public static bool TryRead(Stream stream, HFSTreeHeaderRecord headerRecord, bool isHFSX, out HFSTreeNode? node)
{
node = null;
if (!HFSTreeNodeDescriptor.TryRead(stream, out var descriptor)) return false;
int size = (int)headerRecord.NodeSize - HFSTreeNodeDescriptor.Size;
if ((_buffer is null) || (_buffer.Length < size))
_buffer = new byte[size * 2];
if (stream.Read(_buffer, 0, size) != size)
throw new EndOfStreamException();
ReadOnlySpan<byte> data = _buffer.AsSpan(0, size);
switch (descriptor!.Kind)
{
case HFSTreeNodeKind.Leaf:
node = HFSLeafTreeNode.Read(descriptor, data, headerRecord, isHFSX);
return true;
case HFSTreeNodeKind.Index:
node = HFSIndexTreeNode.Read(descriptor, data, headerRecord, isHFSX);
return true;
case HFSTreeNodeKind.Map:
node = HFSMapTreeNode.Read(descriptor, data);
return true;
}
return false;
}
}
internal sealed class HFSHeaderTreeNode : HFSTreeNode
{
private const int UserDataSize = 128;
public HFSTreeHeaderRecord HeaderRecord { get; }
public IReadOnlyList<byte> UserData { get; }
public IReadOnlyList<byte> Map { get; }
private HFSHeaderTreeNode(
HFSTreeNodeDescriptor descriptor,
HFSTreeHeaderRecord headerRecord,
IReadOnlyList<byte> userData,
IReadOnlyList<byte> map)
: base(descriptor)
{
HeaderRecord = headerRecord;
UserData = userData;
Map = map;
}
public static HFSHeaderTreeNode Read(HFSTreeNodeDescriptor descriptor, Stream stream)
{
if (descriptor.Kind != HFSTreeNodeKind.Header)
throw new ArgumentException("Descriptor does not define a header node");
var headerRecord = HFSTreeHeaderRecord.Read(stream);
var userData = new byte[UserDataSize];
if (stream.Read(userData, 0, UserDataSize) != UserDataSize)
throw new EndOfStreamException();
int mapSize = (int)(headerRecord.NodeSize - 256);
var map = new byte[mapSize];
if (stream.Read(map, 0, mapSize) != mapSize)
throw new EndOfStreamException();
// offset values (not required for header node)
_ = ReadUInt16(stream);
_ = ReadUInt16(stream);
_ = ReadUInt16(stream);
_ = ReadUInt16(stream);
return new HFSHeaderTreeNode(descriptor, headerRecord, userData, map);
}
}
internal sealed class HFSMapTreeNode : HFSTreeNode
{
public IReadOnlyList<byte> Map { get; }
private HFSMapTreeNode(HFSTreeNodeDescriptor descriptor, IReadOnlyList<byte> map)
: base(descriptor)
{
Map = map;
}
public static HFSMapTreeNode Read(HFSTreeNodeDescriptor descriptor, ReadOnlySpan<byte> data)
{
int mapSize = data.Length - 6;
var map = new byte[mapSize];
data.Slice(0, mapSize).CopyTo(map);
return new HFSMapTreeNode(descriptor, map);
}
}
internal sealed class HFSIndexTreeNode : HFSTreeNode
{
public IReadOnlyList<HFSPointerRecord> Records { get; }
private HFSIndexTreeNode(HFSTreeNodeDescriptor descriptor, IReadOnlyList<HFSPointerRecord> records)
: base(descriptor)
{
Records = records;
}
public static HFSIndexTreeNode Read(HFSTreeNodeDescriptor descriptor, ReadOnlySpan<byte> data, HFSTreeHeaderRecord headerRecord, bool isHFSX)
{
int recordCount = descriptor.NumRecords;
var records = new HFSPointerRecord[recordCount];
for (int i = 0; i < recordCount; i++)
records[i] = HFSPointerRecord.Read(ref data, headerRecord, isHFSX);
return new HFSIndexTreeNode(descriptor, records);
}
}
internal sealed class HFSLeafTreeNode : HFSTreeNode
{
public IReadOnlyList<HFSDataRecord> Records { get; }
private HFSLeafTreeNode(HFSTreeNodeDescriptor descriptor, IReadOnlyList<HFSDataRecord> records)
: base(descriptor)
{
Records = records;
}
public static HFSLeafTreeNode Read(HFSTreeNodeDescriptor descriptor, ReadOnlySpan<byte> data, HFSTreeHeaderRecord headerRecord, bool isHFSX)
{
int recordCount = descriptor.NumRecords;
var recordOffsets = new int[recordCount + 1];
for (int i = 0; i < recordOffsets.Length; i++)
{
var offsetData = data.Slice(data.Length - (2 * i) - 2);
ushort offset = ReadUInt16(ref offsetData);
recordOffsets[i] = offset;
}
var records = new HFSDataRecord[recordCount];
for (int i = 0; i < recordCount; i++)
{
int size = recordOffsets[i + 1] - recordOffsets[i];
records[i] = HFSDataRecord.Read(ref data, size, headerRecord, isHFSX);
}
return new HFSLeafTreeNode(descriptor, records);
}
}
}

View File

@@ -0,0 +1,55 @@
using System;
using System.IO;
namespace SharpCompress.Common.Dmg.HFS
{
internal enum HFSTreeNodeKind : sbyte
{
Leaf = -1,
Index = 0,
Header = 1,
Map = 2
}
internal sealed class HFSTreeNodeDescriptor : HFSStructBase
{
public const int Size = 14;
public uint FLink { get; }
public uint BLink { get; }
public HFSTreeNodeKind Kind { get; }
public byte Height { get; }
public ushort NumRecords { get; }
private HFSTreeNodeDescriptor(uint fLink, uint bLink, HFSTreeNodeKind kind, byte height, ushort numRecords)
{
FLink = fLink;
BLink = bLink;
Kind = kind;
Height = height;
NumRecords = numRecords;
}
public static bool TryRead(Stream stream, out HFSTreeNodeDescriptor? descriptor)
{
descriptor = null;
uint fLink = ReadUInt32(stream);
uint bLink = ReadUInt32(stream);
sbyte rawKind = (sbyte)ReadUInt8(stream);
if (!Enum.IsDefined(typeof(HFSTreeNodeKind), rawKind)) return false;
var kind = (HFSTreeNodeKind)rawKind;
byte height = ReadUInt8(stream);
if (((kind == HFSTreeNodeKind.Header) || (kind == HFSTreeNodeKind.Map)) && (height != 0)) return false;
if ((kind == HFSTreeNodeKind.Leaf) && (height != 1)) return false;
ushort numRecords = ReadUInt16(stream);
_ = ReadUInt16(stream); // reserved
descriptor = new HFSTreeNodeDescriptor(fLink, bLink, kind, height, numRecords);
return true;
}
}
}

View File

@@ -0,0 +1,206 @@
using SharpCompress.Archives.Dmg;
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
namespace SharpCompress.Common.Dmg.HFS
{
internal static class HFSUtil
{
private const string CorruptHFSMessage = "Corrupt HFS volume";
private static (HFSHeaderTreeNode, IReadOnlyList<HFSTreeNode>) ReadTree(Stream stream, bool isHFSX)
{
if (!HFSTreeNodeDescriptor.TryRead(stream, out var headerDesc))
throw new InvalidFormatException(CorruptHFSMessage);
var header = HFSHeaderTreeNode.Read(headerDesc!, stream);
var nodes = new HFSTreeNode[header.HeaderRecord.TotalNodes];
nodes[0] = header;
for (int i = 1; i < nodes.Length; i++)
{
if (!HFSTreeNode.TryRead(stream, header.HeaderRecord, isHFSX, out var node))
throw new InvalidFormatException(CorruptHFSMessage);
nodes[i] = node!;
}
return (header, nodes);
}
private static void EnumerateExtentsTree(
IReadOnlyList<HFSTreeNode> extentsTree,
IDictionary<HFSExtentKey, HFSExtentRecord> records,
int parentIndex)
{
var parent = extentsTree[parentIndex];
if (parent is HFSLeafTreeNode leafNode)
{
foreach (var record in leafNode.Records)
{
ReadOnlySpan<byte> data = record.Data.AsSpan();
var recordData = HFSExtentRecord.Read(ref data);
var key = record.GetExtentKey();
records.Add(key, recordData);
}
}
else if (parent is HFSIndexTreeNode indexNode)
{
foreach (var record in indexNode.Records)
EnumerateExtentsTree(extentsTree, records, (int)record.NodeNumber);
}
else
{
throw new InvalidFormatException(CorruptHFSMessage);
}
}
private static IReadOnlyDictionary<HFSExtentKey, HFSExtentRecord> LoadExtents(IReadOnlyList<HFSTreeNode> extentsTree, int rootIndex)
{
var records = new Dictionary<HFSExtentKey, HFSExtentRecord>();
if (rootIndex == 0) return records;
EnumerateExtentsTree(extentsTree, records, rootIndex);
return records;
}
private static void EnumerateCatalogTree(
HFSHeaderTreeNode catalogHeader,
IReadOnlyList<HFSTreeNode> catalogTree,
IDictionary<HFSCatalogKey, HFSCatalogRecord> records,
IDictionary<uint, HFSCatalogThread> threads,
int parentIndex,
bool isHFSX)
{
var parent = catalogTree[parentIndex];
if (parent is HFSLeafTreeNode leafNode)
{
foreach (var record in leafNode.Records)
{
ReadOnlySpan<byte> data = record.Data.AsSpan();
if (HFSCatalogRecord.TryRead(ref data, catalogHeader.HeaderRecord.KeyCompareType, isHFSX, out var recordData))
{
var key = record.GetCatalogKey();
if ((recordData!.Type == HFSCatalogRecordType.FileThread) || (recordData!.Type == HFSCatalogRecordType.FolderThread))
{
threads.Add(key.ParentId, (HFSCatalogThread)recordData);
}
else
{
records.Add(key, recordData);
}
}
else
{
throw new InvalidFormatException(CorruptHFSMessage);
}
}
}
else if (parent is HFSIndexTreeNode indexNode)
{
foreach (var record in indexNode.Records)
EnumerateCatalogTree(catalogHeader, catalogTree, records, threads, (int)record.NodeNumber, isHFSX);
}
else
{
throw new InvalidFormatException(CorruptHFSMessage);
}
}
private static (HFSCatalogKey, HFSCatalogRecord) GetRecord(uint id, IDictionary<HFSCatalogKey, HFSCatalogRecord> records, IDictionary<uint, HFSCatalogThread> threads)
{
if (threads.TryGetValue(id, out var thread))
{
if (records.TryGetValue(thread.CatalogKey, out var record))
return (thread.CatalogKey, record!);
}
throw new InvalidFormatException(CorruptHFSMessage);
}
private static string SanitizePath(string path)
{
var sb = new StringBuilder(path.Length);
foreach (char c in path)
{
if (!char.IsControl(c))
sb.Append(c);
}
return sb.ToString();
}
private static string GetPath(HFSCatalogKey key, IDictionary<HFSCatalogKey, HFSCatalogRecord> records, IDictionary<uint, HFSCatalogThread> threads)
{
if (key.ParentId == 1)
{
return key.Name;
}
else
{
var (parentKey, _) = GetRecord(key.ParentId, records, threads);
var path = Path.Combine(GetPath(parentKey, records, threads), key.Name);
return SanitizePath(path);
}
}
private static IEnumerable<DmgArchiveEntry> LoadEntriesFromCatalogTree(
Stream partitionStream,
DmgFilePart filePart,
HFSVolumeHeader volumeHeader,
HFSHeaderTreeNode catalogHeader,
IReadOnlyList<HFSTreeNode> catalogTree,
IReadOnlyDictionary<HFSExtentKey, HFSExtentRecord> extents,
DmgArchive archive,
int rootIndex)
{
if (rootIndex == 0) return Array.Empty<DmgArchiveEntry>();
var records = new Dictionary<HFSCatalogKey, HFSCatalogRecord>();
var threads = new Dictionary<uint, HFSCatalogThread>();
EnumerateCatalogTree(catalogHeader, catalogTree, records, threads, rootIndex, volumeHeader.IsHFSX);
var entries = new List<DmgArchiveEntry>();
foreach (var kvp in records)
{
var key = kvp.Key;
var record = kvp.Value;
string path = GetPath(key, records, threads);
var stream = (record is HFSCatalogFile file) ? new HFSForkStream(partitionStream, volumeHeader, file.DataFork, file.FileId, extents) : null;
var entry = new DmgArchiveEntry(stream, archive, record, path, filePart);
entries.Add(entry);
}
return entries;
}
public static IEnumerable<DmgArchiveEntry> LoadEntriesFromPartition(Stream partitionStream, string fileName, DmgArchive archive)
{
if (!HFSVolumeHeader.TryRead(partitionStream, out var volumeHeader))
throw new InvalidFormatException(CorruptHFSMessage);
var filePart = new DmgFilePart(partitionStream, fileName);
var extentsFile = volumeHeader!.ExtentsFile;
var extentsStream = new HFSForkStream(partitionStream, volumeHeader, extentsFile);
var (extentsHeader, extentsTree) = ReadTree(extentsStream, volumeHeader.IsHFSX);
var extents = LoadExtents(extentsTree, (int)extentsHeader.HeaderRecord.RootNode);
var catalogFile = volumeHeader!.CatalogFile;
var catalogStream = new HFSForkStream(partitionStream, volumeHeader, catalogFile);
var (catalogHeader, catalogTree) = ReadTree(catalogStream, volumeHeader.IsHFSX);
return LoadEntriesFromCatalogTree(
partitionStream,
filePart,
volumeHeader,
catalogHeader,
catalogTree,
extents,
archive,
(int)catalogHeader.HeaderRecord.RootNode);
}
}
}

View File

@@ -0,0 +1,179 @@
using System;
using System.Collections.Generic;
using System.IO;
namespace SharpCompress.Common.Dmg.HFS
{
internal sealed class HFSVolumeHeader : HFSStructBase
{
private const ushort SignaturePlus = 0x482B;
private const ushort SignatureX = 0x4858;
private const int FinderInfoCount = 8;
public bool IsHFSX { get; }
public ushort Version { get; }
public uint Attributes { get; }
public uint LastMountedVersion { get; }
public uint JournalInfoBlock { get; }
public DateTime CreateDate { get; }
public DateTime ModifyDate { get; }
public DateTime BackupDate { get; }
public DateTime CheckedDate { get; }
public uint FileCount { get; }
public uint FolderCount { get; }
public uint BlockSize { get; }
public uint TotalBlocks { get; }
public uint FreeBlocks { get; }
public uint NextAllocation { get; }
public uint RsrcClumpSize { get; }
public uint DataClumpSize { get; }
public uint NextCatalogID { get; }
public uint WriteCount { get; }
public ulong EncodingsBitmap { get; }
public IReadOnlyList<uint> FinderInfo { get; }
public HFSForkData AllocationFile { get; }
public HFSForkData ExtentsFile { get; }
public HFSForkData CatalogFile { get; }
public HFSForkData AttributesFile { get; }
public HFSForkData StartupFile { get; }
public HFSVolumeHeader(
bool isHFSX,
ushort version,
uint attributes,
uint lastMountedVersion,
uint journalInfoBlock,
DateTime createDate,
DateTime modifyDate,
DateTime backupDate,
DateTime checkedDate,
uint fileCount,
uint folderCount,
uint blockSize,
uint totalBlocks,
uint freeBlocks,
uint nextAllocation,
uint rsrcClumpSize,
uint dataClumpSize,
uint nextCatalogID,
uint writeCount,
ulong encodingsBitmap,
IReadOnlyList<uint> finderInfo,
HFSForkData allocationFile,
HFSForkData extentsFile,
HFSForkData catalogFile,
HFSForkData attributesFile,
HFSForkData startupFile)
{
IsHFSX = isHFSX;
Version = version;
Attributes = attributes;
LastMountedVersion = lastMountedVersion;
JournalInfoBlock = journalInfoBlock;
CreateDate = createDate;
ModifyDate = modifyDate;
BackupDate = backupDate;
CheckedDate = checkedDate;
FileCount = fileCount;
FolderCount = folderCount;
BlockSize = blockSize;
TotalBlocks = totalBlocks;
FreeBlocks = freeBlocks;
NextAllocation = nextAllocation;
RsrcClumpSize = rsrcClumpSize;
DataClumpSize = dataClumpSize;
NextCatalogID = nextCatalogID;
WriteCount = writeCount;
EncodingsBitmap = encodingsBitmap;
FinderInfo = finderInfo;
AllocationFile = allocationFile;
ExtentsFile = extentsFile;
CatalogFile = catalogFile;
AttributesFile = attributesFile;
StartupFile = startupFile;
}
private static IReadOnlyList<uint> ReadFinderInfo(Stream stream)
{
var finderInfo = new uint[FinderInfoCount];
for (int i = 0; i < FinderInfoCount; i++)
finderInfo[i] = ReadUInt32(stream);
return finderInfo;
}
public static bool TryRead(Stream stream, out HFSVolumeHeader? header)
{
header = null;
stream.Skip(1024); // reserved bytes
bool isHFSX;
ushort sig = ReadUInt16(stream);
if (sig == SignaturePlus) isHFSX = false;
else if (sig == SignatureX) isHFSX = true;
else return false;
ushort version = ReadUInt16(stream);
uint attributes = ReadUInt32(stream);
uint lastMountedVersion = ReadUInt32(stream);
uint journalInfoBlock = ReadUInt32(stream);
DateTime createDate = ReadDate(stream);
DateTime modifyDate = ReadDate(stream);
DateTime backupDate = ReadDate(stream);
DateTime checkedDate = ReadDate(stream);
uint fileCount = ReadUInt32(stream);
uint folderCount = ReadUInt32(stream);
uint blockSize = ReadUInt32(stream);
uint totalBlocks = ReadUInt32(stream);
uint freeBlocks = ReadUInt32(stream);
uint nextAllocation = ReadUInt32(stream);
uint rsrcClumpSize = ReadUInt32(stream);
uint dataClumpSize = ReadUInt32(stream);
uint nextCatalogID = ReadUInt32(stream);
uint writeCount = ReadUInt32(stream);
ulong encodingsBitmap = ReadUInt64(stream);
IReadOnlyList<uint> finderInfo = ReadFinderInfo(stream);
HFSForkData allocationFile = HFSForkData.Read(stream);
HFSForkData extentsFile = HFSForkData.Read(stream);
HFSForkData catalogFile = HFSForkData.Read(stream);
HFSForkData attributesFile = HFSForkData.Read(stream);
HFSForkData startupFile = HFSForkData.Read(stream);
header = new HFSVolumeHeader(
isHFSX,
version,
attributes,
lastMountedVersion,
journalInfoBlock,
createDate,
modifyDate,
backupDate,
checkedDate,
fileCount,
folderCount,
blockSize,
totalBlocks,
freeBlocks,
nextAllocation,
rsrcClumpSize,
dataClumpSize,
nextCatalogID,
writeCount,
encodingsBitmap,
finderInfo,
allocationFile,
extentsFile,
catalogFile,
attributesFile,
startupFile);
return true;
}
}
}

View File

@@ -0,0 +1,49 @@
using System;
namespace SharpCompress.Common.Dmg.Headers
{
internal enum BlkxChunkType : uint
{
Zero = 0x00000000u,
Uncompressed = 0x00000001u,
Ignore = 0x00000002u,
AdcCompressed = 0x80000004u,
ZlibCompressed = 0x80000005u,
Bz2Compressed = 0x80000006u,
Comment = 0x7FFFFFFEu,
Last = 0xFFFFFFFFu,
}
internal sealed class BlkxChunk : DmgStructBase
{
private const int SectorSize = 512;
public BlkxChunkType Type { get; } // Compression type used or chunk type
public uint Comment { get; } // "+beg" or "+end", if EntryType is comment (0x7FFFFFFE). Else reserved.
public ulong UncompressedOffset { get; } // Start sector of this chunk
public ulong UncompressedLength { get; } // Number of sectors in this chunk
public ulong CompressedOffset { get; } // Start of chunk in data fork
public ulong CompressedLength { get; } // Count of bytes of chunk, in data fork
private BlkxChunk(BlkxChunkType type, uint comment, ulong sectorNumber, ulong sectorCount, ulong compressedOffset, ulong compressedLength)
{
Type = type;
Comment = comment;
UncompressedOffset = sectorNumber * SectorSize;
UncompressedLength = sectorCount * SectorSize;
CompressedOffset = compressedOffset;
CompressedLength = compressedLength;
}
public static bool TryRead(ref ReadOnlySpan<byte> data, out BlkxChunk? chunk)
{
chunk = null;
var type = (BlkxChunkType)ReadUInt32(ref data);
if (!Enum.IsDefined(typeof(BlkxChunkType), type)) return false;
chunk = new BlkxChunk(type, ReadUInt32(ref data), ReadUInt64(ref data), ReadUInt64(ref data), ReadUInt64(ref data), ReadUInt64(ref data));
return true;
}
}
}

View File

@@ -0,0 +1,75 @@
using System;
using System.Collections.Generic;
namespace SharpCompress.Common.Dmg.Headers
{
internal sealed class BlkxTable : DmgStructBase
{
private const uint Signature = 0x6d697368u;
public uint Version { get; } // Current version is 1
public ulong SectorNumber { get; } // Starting disk sector in this blkx descriptor
public ulong SectorCount { get; } // Number of disk sectors in this blkx descriptor
public ulong DataOffset { get; }
public uint BuffersNeeded { get; }
public uint BlockDescriptors { get; } // Number of descriptors
public UdifChecksum Checksum { get; }
public IReadOnlyList<BlkxChunk> Chunks { get; }
private BlkxTable(
uint version,
ulong sectorNumber,
ulong sectorCount,
ulong dataOffset,
uint buffersNeeded,
uint blockDescriptors,
UdifChecksum checksum,
IReadOnlyList<BlkxChunk> chunks)
{
Version = version;
SectorNumber = sectorNumber;
SectorCount = sectorCount;
DataOffset = dataOffset;
BuffersNeeded = buffersNeeded;
BlockDescriptors = blockDescriptors;
Checksum = checksum;
Chunks = chunks;
}
public static bool TryRead(in byte[] buffer, out BlkxTable? header)
{
header = null;
ReadOnlySpan<byte> data = buffer.AsSpan();
uint sig = ReadUInt32(ref data);
if (sig != Signature) return false;
uint version = ReadUInt32(ref data);
ulong sectorNumber = ReadUInt64(ref data);
ulong sectorCount = ReadUInt64(ref data);
ulong dataOffset = ReadUInt64(ref data);
uint buffersNeeded = ReadUInt32(ref data);
uint blockDescriptors = ReadUInt32(ref data);
data = data.Slice(6 * sizeof(uint)); // reserved
var checksum = UdifChecksum.Read(ref data);
uint chunkCount = ReadUInt32(ref data);
var chunks = new BlkxChunk[chunkCount];
for (int i = 0; i < chunkCount; i++)
{
if (!BlkxChunk.TryRead(ref data, out var chunk)) return false;
chunks[i] = chunk!;
}
header = new BlkxTable(version, sectorNumber, sectorCount, dataOffset, buffersNeeded, blockDescriptors, checksum, chunks);
return true;
}
}
}

View File

@@ -0,0 +1,138 @@
using System;
using System.Collections.Generic;
using System.IO;
namespace SharpCompress.Common.Dmg.Headers
{
internal sealed class DmgHeader : DmgStructBase
{
public const int HeaderSize = 512;
private const uint Signature = 0x6B6F6C79u;
private const int UuidSize = 16; // 128 bit
public uint Version { get; } // Current version is 4
public uint Flags { get; } // Flags
public ulong RunningDataForkOffset { get; } //
public ulong DataForkOffset { get; } // Data fork offset (usually 0, beginning of file)
public ulong DataForkLength { get; } // Size of data fork (usually up to the XMLOffset, below)
public ulong RsrcForkOffset { get; } // Resource fork offset, if any
public ulong RsrcForkLength { get; } // Resource fork length, if any
public uint SegmentNumber { get; } // Usually 1, may be 0
public uint SegmentCount { get; } // Usually 1, may be 0
public IReadOnlyList<byte> SegmentID { get; } // 128-bit GUID identifier of segment (if SegmentNumber !=0)
public UdifChecksum DataChecksum { get; }
public ulong XMLOffset { get; } // Offset of property list in DMG, from beginning
public ulong XMLLength { get; } // Length of property list
public UdifChecksum Checksum { get; }
public uint ImageVariant { get; } // Commonly 1
public ulong SectorCount { get; } // Size of DMG when expanded, in sectors
private DmgHeader(
uint version,
uint flags,
ulong runningDataForkOffset,
ulong dataForkOffset,
ulong dataForkLength,
ulong rsrcForkOffset,
ulong rsrcForkLength,
uint segmentNumber,
uint segmentCount,
IReadOnlyList<byte> segmentID,
UdifChecksum dataChecksum,
ulong xMLOffset,
ulong xMLLength,
UdifChecksum checksum,
uint imageVariant,
ulong sectorCount)
{
Version = version;
Flags = flags;
RunningDataForkOffset = runningDataForkOffset;
DataForkOffset = dataForkOffset;
DataForkLength = dataForkLength;
RsrcForkOffset = rsrcForkOffset;
RsrcForkLength = rsrcForkLength;
SegmentNumber = segmentNumber;
SegmentCount = segmentCount;
SegmentID = segmentID;
DataChecksum = dataChecksum;
XMLOffset = xMLOffset;
XMLLength = xMLLength;
Checksum = checksum;
ImageVariant = imageVariant;
SectorCount = sectorCount;
}
private static void ReadUuid(ref ReadOnlySpan<byte> data, byte[] buffer)
{
data.Slice(0, UuidSize).CopyTo(buffer);
data = data.Slice(UuidSize);
}
internal static bool TryRead(Stream input, out DmgHeader? header)
{
header = null;
var buffer = new byte[HeaderSize];
int count = input.Read(buffer, 0, HeaderSize);
if (count != HeaderSize) return false;
ReadOnlySpan<byte> data = buffer.AsSpan();
uint sig = ReadUInt32(ref data);
if (sig != Signature) return false;
uint version = ReadUInt32(ref data);
uint size = ReadUInt32(ref data);
if (size != (uint)HeaderSize) return false;
uint flags = ReadUInt32(ref data);
ulong runningDataForkOffset = ReadUInt64(ref data);
ulong dataForkOffset = ReadUInt64(ref data);
ulong dataForkLength = ReadUInt64(ref data);
ulong rsrcForkOffset = ReadUInt64(ref data);
ulong rsrcForkLength = ReadUInt64(ref data);
uint segmentNumber = ReadUInt32(ref data);
uint segmentCount = ReadUInt32(ref data);
var segmentID = new byte[UuidSize];
ReadUuid(ref data, segmentID);
var dataChecksum = UdifChecksum.Read(ref data);
ulong xmlOffset = ReadUInt64(ref data);
ulong xmlLength = ReadUInt64(ref data);
data = data.Slice(120); // Reserved bytes
var checksum = UdifChecksum.Read(ref data);
uint imageVariant = ReadUInt32(ref data);
ulong sectorCount = ReadUInt64(ref data);
header = new DmgHeader(
version,
flags,
runningDataForkOffset,
dataForkOffset,
dataForkLength,
rsrcForkOffset,
rsrcForkLength,
segmentNumber,
segmentCount,
segmentID,
dataChecksum,
xmlOffset,
xmlLength,
checksum,
imageVariant,
sectorCount);
return true;
}
}
}

View File

@@ -0,0 +1,22 @@
using System;
using System.Buffers.Binary;
namespace SharpCompress.Common.Dmg.Headers
{
internal abstract class DmgStructBase
{
protected static uint ReadUInt32(ref ReadOnlySpan<byte> data)
{
uint val = BinaryPrimitives.ReadUInt32BigEndian(data);
data = data.Slice(sizeof(uint));
return val;
}
protected static ulong ReadUInt64(ref ReadOnlySpan<byte> data)
{
ulong val = BinaryPrimitives.ReadUInt64BigEndian(data);
data = data.Slice(sizeof(ulong));
return val;
}
}
}

View File

@@ -0,0 +1,90 @@
using System;
using System.Buffers.Binary;
using System.IO;
namespace SharpCompress.Common.Dmg.Headers
{
internal sealed class GptHeader : GptStructBase
{
private const int HeaderSize = 92;
private static readonly ulong Signature = BinaryPrimitives.ReadUInt64LittleEndian(new byte[] { 69, 70, 73, 32, 80, 65, 82, 84 });
public uint Revision { get; }
public uint Crc32Header { get; }
public ulong CurrentLba { get; }
public ulong BackupLba { get; }
public ulong FirstUsableLba { get; }
public ulong LastUsableLba { get; }
public Guid DiskGuid { get; }
public ulong EntriesStart { get; }
public uint EntriesCount { get; }
public uint EntriesSize { get; }
public uint Crc32Array { get; }
private GptHeader(
uint revision,
uint crc32Header,
ulong currentLba,
ulong backupLba,
ulong firstUsableLba,
ulong lastUsableLba,
Guid diskGuid,
ulong entriesStart,
uint entriesCount,
uint entriesSize,
uint crc32Array)
{
Revision = revision;
Crc32Header = crc32Header;
CurrentLba = currentLba;
BackupLba = backupLba;
FirstUsableLba = firstUsableLba;
LastUsableLba = lastUsableLba;
DiskGuid = diskGuid;
EntriesStart = entriesStart;
EntriesCount = entriesCount;
EntriesSize = entriesSize;
Crc32Array = crc32Array;
}
public static bool TryRead(Stream stream, out GptHeader? header)
{
header = null;
ulong sig = ReadUInt64(stream);
if (sig != Signature) return false;
uint revision = ReadUInt32(stream);
uint headerSize = ReadUInt32(stream);
if (headerSize != HeaderSize) return false;
uint crc32Header = ReadUInt32(stream);
_ = ReadUInt32(stream); // reserved
ulong currentLba = ReadUInt64(stream);
ulong backupLba = ReadUInt64(stream);
ulong firstUsableLba = ReadUInt64(stream);
ulong lastUsableLba = ReadUInt64(stream);
Guid diskGuid = ReadGuid(stream);
ulong entriesStart = ReadUInt64(stream);
uint entriesCount = ReadUInt32(stream);
uint entriesSize = ReadUInt32(stream);
uint crc32Array = ReadUInt32(stream);
header = new GptHeader(
revision,
crc32Header,
currentLba,
backupLba,
firstUsableLba,
lastUsableLba,
diskGuid,
entriesStart,
entriesCount,
entriesSize,
crc32Array);
return true;
}
}
}

View File

@@ -0,0 +1,36 @@
using System;
using System.IO;
namespace SharpCompress.Common.Dmg.Headers
{
internal sealed class GptPartitionEntry : GptStructBase
{
public Guid TypeGuid { get; }
public Guid Guid { get; }
public ulong FirstLba { get; }
public ulong LastLba { get; }
public ulong Attributes { get; }
public string Name { get; }
private GptPartitionEntry(Guid typeGuid, Guid guid, ulong firstLba, ulong lastLba, ulong attributes, string name)
{
TypeGuid = typeGuid;
Guid = guid;
FirstLba = firstLba;
LastLba = lastLba;
Attributes = attributes;
Name = name;
}
public static GptPartitionEntry Read(Stream stream)
{
return new GptPartitionEntry(
ReadGuid(stream),
ReadGuid(stream),
ReadUInt64(stream),
ReadUInt64(stream),
ReadUInt64(stream),
ReadString(stream, 72));
}
}
}

View File

@@ -0,0 +1,56 @@
using System;
using System.Buffers.Binary;
using System.IO;
using System.Text;
namespace SharpCompress.Common.Dmg.Headers
{
internal abstract class GptStructBase
{
private static readonly byte[] _buffer = new byte[8];
protected static ushort ReadUInt16(Stream stream)
{
if (stream.Read(_buffer, 0, sizeof(ushort)) != sizeof(ushort))
throw new EndOfStreamException();
return BinaryPrimitives.ReadUInt16LittleEndian(_buffer);
}
protected static uint ReadUInt32(Stream stream)
{
if (stream.Read(_buffer, 0, sizeof(uint)) != sizeof(uint))
throw new EndOfStreamException();
return BinaryPrimitives.ReadUInt32LittleEndian(_buffer);
}
protected static ulong ReadUInt64(Stream stream)
{
if (stream.Read(_buffer, 0, sizeof(ulong)) != sizeof(ulong))
throw new EndOfStreamException();
return BinaryPrimitives.ReadUInt64LittleEndian(_buffer);
}
protected static Guid ReadGuid(Stream stream)
{
int a = (int)ReadUInt32(stream);
short b = (short)ReadUInt16(stream);
short c = (short)ReadUInt16(stream);
if (stream.Read(_buffer, 0, 8) != 8)
throw new EndOfStreamException();
return new Guid(a, b, c, _buffer);
}
protected static string ReadString(Stream stream, int byteSize)
{
var buffer = new byte[byteSize];
if (stream.Read(buffer, 0, byteSize) != byteSize)
throw new EndOfStreamException();
return Encoding.Unicode.GetString(buffer).NullTerminate();
}
}
}

View File

@@ -0,0 +1,33 @@
using System;
using System.Collections.Generic;
namespace SharpCompress.Common.Dmg.Headers
{
internal sealed class UdifChecksum : DmgStructBase
{
private const int MaxSize = 32; // * 4 to get byte size
public uint Type { get; }
public uint Size { get; } // in bits
public IReadOnlyList<uint> Bits { get; }
private UdifChecksum(uint type, uint size, IReadOnlyList<uint> bits)
{
Type = type;
Size = size;
Bits = bits;
}
public static UdifChecksum Read(ref ReadOnlySpan<byte> data)
{
uint type = ReadUInt32(ref data);
uint size = ReadUInt32(ref data);
var bits = new uint[MaxSize];
for (int i = 0; i < MaxSize; i++)
bits[i] = ReadUInt32(ref data);
return new UdifChecksum(type, size, bits);
}
}
}

View File

@@ -0,0 +1,14 @@
using System;
namespace SharpCompress.Common.Dmg
{
internal static class PartitionFormat
{
public static readonly Guid AppleHFS = new Guid("48465300-0000-11AA-AA11-00306543ECAC");
public static readonly Guid AppleUFS = new Guid("55465300-0000-11AA-AA11-00306543ECAC");
public static readonly Guid AppleBoot = new Guid("426F6F74-0000-11AA-AA11-00306543ECAC");
public static readonly Guid AppleRaid = new Guid("52414944-0000-11AA-AA11-00306543ECAC");
public static readonly Guid AppleRaidOffline = new Guid("52414944-5F4F-11AA-AA11-00306543ECAC");
public static readonly Guid AppleLabel = new Guid("4C616265-6C00-11AA-AA11-00306543ECAC");
}
}

View File

@@ -1,4 +1,6 @@
namespace SharpCompress.Common
using System;
namespace SharpCompress.Common
{
public class IncompleteArchiveException : ArchiveException
{
@@ -6,5 +8,10 @@
: base(message)
{
}
public IncompleteArchiveException(string message, Exception inner)
: base(message, inner)
{
}
}
}

View File

@@ -0,0 +1,172 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
namespace SharpCompress.IO
{
internal class CompositeStream : Stream
{
private readonly IReadOnlyList<Stream> _streams;
private long _pos;
private int _streamIndex;
private long _streamPos;
public override bool CanRead { get; }
public override bool CanWrite { get; }
public override bool CanSeek { get; }
public override long Length { get; }
public override long Position
{
get => _pos;
set
{
if (!CanSeek) throw new NotSupportedException();
if ((value < 0) || (value > Length)) throw new ArgumentOutOfRangeException(nameof(value));
_pos = value;
_streamIndex = -1;
long offset = _pos;
for (int i = 0; i < _streams.Count; i++)
{
var stream = _streams[i];
if (offset < stream.Length)
{
_streamIndex = i;
_streamPos = offset;
break;
}
else
{
offset -= stream.Length;
}
}
}
}
public CompositeStream(IReadOnlyList<Stream> streams)
{
CanRead = true;
CanWrite = false;
CanSeek = true;
Length = 0;
_pos = 0;
_streamIndex = 0;
_streamPos = 0;
_streams = streams;
foreach (var stream in _streams)
{
if (!stream.CanRead) throw new ArgumentException("All streams must be readable");
if (!stream.CanSeek) CanSeek = false;
Length += stream.Length;
}
}
public CompositeStream(IEnumerable<Stream> streams)
: this((IReadOnlyList<Stream>)streams.ToArray())
{ }
public CompositeStream(params Stream[] streams)
: this((IReadOnlyList<Stream>)streams)
{ }
public override int Read(byte[] buffer, int offset, int count)
{
Stream? GetCurrent()
{
if ((_streamIndex < 0) || (_streamIndex >= _streams.Count)) return null;
else return _streams[_streamIndex];
}
if (CanSeek)
{
var stream = GetCurrent();
if (stream is null) return 0;
stream.Position = _streamPos;
int readCount = stream.Read(buffer, offset, count);
_pos += readCount;
_streamPos += readCount;
while (readCount < count)
{
_streamIndex++;
stream = GetCurrent();
if (stream is null) return readCount;
stream.Position = _streamPos = 0;
int rc = stream.Read(buffer, offset + readCount, count - readCount);
readCount += rc;
_pos += rc;
_streamPos += rc;
}
return readCount;
}
else
{
var stream = GetCurrent();
if (stream is null) return 0;
int readCount = stream.Read(buffer, offset, count);
_pos += readCount;
while (readCount < count)
{
_streamIndex++;
stream = GetCurrent();
if (stream is null) return readCount;
int rc = stream.Read(buffer, offset + readCount, count - readCount);
readCount += rc;
_pos += rc;
}
return readCount;
}
}
public override long Seek(long offset, SeekOrigin origin)
{
if (CanSeek)
{
long ClampPos(long value) => Math.Min(Math.Max(value, 0), Length);
switch (origin)
{
case SeekOrigin.Begin:
Position = ClampPos(offset);
break;
case SeekOrigin.Current:
Position = ClampPos(Position + offset);
break;
case SeekOrigin.End:
Position = ClampPos(Length - offset);
break;
}
}
return Position;
}
public override void Flush()
{ }
public override void Write(byte[] buffer, int offset, int count)
=> throw new NotSupportedException();
public override void SetLength(long value)
=> throw new NotSupportedException();
protected override void Dispose(bool disposing)
{
foreach (var stream in _streams)
stream.Dispose();
}
}
}

View File

@@ -0,0 +1,81 @@
using System;
using System.IO;
namespace SharpCompress.IO
{
internal class ConstantStream : Stream
{
private long _length;
private long _pos;
public byte Value { get; set; }
public override bool CanRead => true;
public override bool CanWrite => false;
public override bool CanSeek => true;
public override long Length => _length;
public override long Position
{
get => _pos;
set
{
if ((value < 0) || (value > Length)) throw new ArgumentOutOfRangeException(nameof(value));
_pos = value;
}
}
public ConstantStream(byte value, long length)
{
Value = value;
_length = length;
_pos = 0;
}
private long ClampPos(long value) => Math.Min(Math.Max(value, 0), Length);
public override void Flush()
{ }
public override int Read(byte[] buffer, int offset, int count)
{
count = (int)Math.Min(count, Length - Position);
for (int i = 0; i < count; i++)
buffer[i + offset] = Value;
Position += count;
return count;
}
public override long Seek(long offset, SeekOrigin origin)
{
switch (origin)
{
case SeekOrigin.Begin:
Position = ClampPos(offset);
break;
case SeekOrigin.Current:
Position = ClampPos(Position + offset);
break;
case SeekOrigin.End:
Position = ClampPos(Length - offset);
break;
}
return Position;
}
public override void SetLength(long value)
{
_length = value;
Position = ClampPos(Position);
}
public override void Write(byte[] buffer, int offset, int count)
=> throw new NotSupportedException();
protected override void Dispose(bool disposing)
{ }
}
}

View File

@@ -0,0 +1,79 @@
using System;
using System.IO;
namespace SharpCompress.IO
{
internal class SeekableSubStream : NonDisposingStream
{
private readonly long _origin;
private long _pos;
public override bool CanRead => true;
public override bool CanWrite => false;
public override bool CanSeek => true;
public override long Length { get; }
public override long Position
{
get => _pos;
set
{
if ((value < 0) || (value > Length)) throw new ArgumentOutOfRangeException(nameof(value));
_pos = value;
}
}
public SeekableSubStream(Stream stream, long origin, long length)
: base(stream, false)
{
if (!stream.CanRead) throw new ArgumentException("Requires a readable stream", nameof(stream));
if (!stream.CanSeek) throw new ArgumentException("Requires a seekable stream", nameof(stream));
_origin = origin;
Length = length;
_pos = 0;
}
public override void Flush()
{ }
public override int Read(byte[] buffer, int offset, int count)
{
count = (int)Math.Min(count, Length - Position);
Stream.Position = Position + _origin;
count = Stream.Read(buffer, offset, count);
Position += count;
return count;
}
public override long Seek(long offset, SeekOrigin origin)
{
long ClampPos(long value) => Math.Min(Math.Max(value, 0), Length);
switch (origin)
{
case SeekOrigin.Begin:
Position = ClampPos(offset);
break;
case SeekOrigin.Current:
Position = ClampPos(Position + offset);
break;
case SeekOrigin.End:
Position = ClampPos(Length - offset);
break;
}
return Position;
}
public override void SetLength(long value)
=> throw new NotSupportedException();
public override void Write(byte[] buffer, int offset, int count)
=> throw new NotSupportedException();
}
}

View File

@@ -315,5 +315,12 @@ namespace SharpCompress
{
return source.Replace('\0', ' ').Trim();
}
public static string NullTerminate(this string source)
{
int index = source.IndexOf('\0');
if (index < 0) return source;
return source.Substring(0, index);
}
}
}

View File

@@ -0,0 +1,57 @@
using Xunit;
namespace SharpCompress.Test.Dmg
{
public class DmgArchiveTests : ArchiveTests
{
// ToDo: create the required test archives
[Fact]
public void DmgArchive_StreamRead()
{
ArchiveStreamRead("Dmg.dmg");
}
[Fact]
public void DmgArchive_PathRead()
{
ArchiveFileRead("Dmg.dmg");
}
[Fact]
public void DmgArchive_ADC_StreamRead()
{
ArchiveStreamRead("Dmg.dmg");
}
//[Fact]
//public void DmgArchive_ADC_PathRead()
//{
// ArchiveFileRead("Dmg_ADC.dmg");
//}
//[Fact]
//public void DmgArchive_Zlib_StreamRead()
//{
// ArchiveStreamRead("Dmg_zLib.dmg");
//}
//[Fact]
//public void DmgArchive_Zlib_PathRead()
//{
// ArchiveFileRead("Dmg_zLib.dmg");
//}
//[Fact]
//public void DmgArchive_Bz2_StreamRead()
//{
// ArchiveStreamRead("Dmg_bz2.dmg");
//}
//[Fact]
//public void DmgArchive_Bz2_PathRead()
//{
// ArchiveFileRead("Dmg_bz2.dmg");
//}
}
}

Binary file not shown.