mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-04 21:21:49 +00:00
Compare commits
116 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
83e8bf8462 | ||
|
|
65bcfadfde | ||
|
|
5acc195cf7 | ||
|
|
161f99bbad | ||
|
|
c012db0776 | ||
|
|
8ee257d299 | ||
|
|
f9522107c3 | ||
|
|
e07046a37a | ||
|
|
ad6d0d9ae8 | ||
|
|
fdc33e91bd | ||
|
|
a34f5a855c | ||
|
|
6474741af1 | ||
|
|
c10bd840c5 | ||
|
|
e6dded826b | ||
|
|
8a022c4b18 | ||
|
|
cfef228afc | ||
|
|
237ff9f055 | ||
|
|
020f862814 | ||
|
|
d5cbe71cae | ||
|
|
014ecd4fc1 | ||
|
|
9600709219 | ||
|
|
fa6107200d | ||
|
|
eb81f972c4 | ||
|
|
93c1ff396e | ||
|
|
403baf05a6 | ||
|
|
a51b56339a | ||
|
|
f48a6d47dc | ||
|
|
5b52463e4c | ||
|
|
6f08bb72d8 | ||
|
|
045093f453 | ||
|
|
566c49ce53 | ||
|
|
d1d2758ee0 | ||
|
|
5b86c40d5b | ||
|
|
53393e744e | ||
|
|
2dd17e3882 | ||
|
|
c4f7433584 | ||
|
|
9405a7cf4b | ||
|
|
cd677440ce | ||
|
|
c06f4bc5a8 | ||
|
|
d5e6c31a9f | ||
|
|
4a7337b223 | ||
|
|
1d8afb817e | ||
|
|
0f06c3d934 | ||
|
|
9d5cb8d119 | ||
|
|
a28d686eb9 | ||
|
|
ac525a8ec2 | ||
|
|
52c44befa2 | ||
|
|
c64251c341 | ||
|
|
bdc57d3c33 | ||
|
|
7edc437df2 | ||
|
|
57e4395e7d | ||
|
|
ee17dca9e5 | ||
|
|
e9f3add5b9 | ||
|
|
faf1a9f7e4 | ||
|
|
5357bd07c7 | ||
|
|
8c0e2cbd25 | ||
|
|
674f3b4f28 | ||
|
|
6e42e00974 | ||
|
|
8598885258 | ||
|
|
669e40d53c | ||
|
|
1adcce6c62 | ||
|
|
147be6e6e1 | ||
|
|
5879999094 | ||
|
|
477a30cf5b | ||
|
|
2fec03e1ac | ||
|
|
9a17449a02 | ||
|
|
087a6aad8c | ||
|
|
e243a8e88f | ||
|
|
b57df8026a | ||
|
|
a1d45b44cd | ||
|
|
e47e1d220a | ||
|
|
0129a933df | ||
|
|
fa241bb0d7 | ||
|
|
d8804ae108 | ||
|
|
8090d269e7 | ||
|
|
b0101f20c5 | ||
|
|
dd48e4299a | ||
|
|
c61ee0c24f | ||
|
|
9576867c34 | ||
|
|
4426a24298 | ||
|
|
3b43c1e413 | ||
|
|
aa6575c8f9 | ||
|
|
0268713960 | ||
|
|
5faa603d59 | ||
|
|
f36167d425 | ||
|
|
33ffcb9308 | ||
|
|
a649c25a91 | ||
|
|
fa1e773960 | ||
|
|
62f7238796 | ||
|
|
d4ccf73340 | ||
|
|
5ddb0f96bc | ||
|
|
75a6db8f4c | ||
|
|
ae5635319b | ||
|
|
98ed3080d0 | ||
|
|
c618eacad4 | ||
|
|
3b11e6ef97 | ||
|
|
40af9359db | ||
|
|
d6bf9dae42 | ||
|
|
13917941ff | ||
|
|
28f04329ae | ||
|
|
404a6b231d | ||
|
|
184596da3c | ||
|
|
f00f393687 | ||
|
|
cbbfb89619 | ||
|
|
6a5cf11dd0 | ||
|
|
fc1d0a0464 | ||
|
|
74af1759eb | ||
|
|
ee3162ad71 | ||
|
|
4357165163 | ||
|
|
6973436b94 | ||
|
|
7750ed7106 | ||
|
|
773158e9d8 | ||
|
|
4db615597d | ||
|
|
6bdf2365fc | ||
|
|
3b2e273832 | ||
|
|
43c839eb89 |
12
.config/dotnet-tools.json
Normal file
12
.config/dotnet-tools.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"version": 1,
|
||||
"isRoot": true,
|
||||
"tools": {
|
||||
"dotnet-format": {
|
||||
"version": "4.1.131201",
|
||||
"commands": [
|
||||
"dotnet-format"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
2
.github/workflows/dotnetcore.yml
vendored
2
.github/workflows/dotnetcore.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-dotnet@v1
|
||||
with:
|
||||
dotnet-version: 3.1.302
|
||||
dotnet-version: 5.0.101
|
||||
- run: dotnet run -p build/build.csproj
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
| Tar.XZ | LZMA2 | Decompress | TarArchive | TarReader | TarWriter (3) |
|
||||
| GZip (single file) | DEFLATE | Both | GZipArchive | GZipReader | GZipWriter |
|
||||
| 7Zip (4) | LZMA, LZMA2, BZip2, PPMd, BCJ, BCJ2, Deflate | Decompress | SevenZipArchive | N/A | N/A |
|
||||
| LZip (single file) (5) | LZip (LZMA) | Both | LZipArchive | LZipReader | LZipWriter |
|
||||
|
||||
1. SOLID Rars are only supported in the RarReader API.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading/writing is supported but only with seekable streams as the Zip spec doesn't support Zip64 data in post data descriptors. Deflate64 is only supported for reading.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# SharpCompress
|
||||
|
||||
SharpCompress is a compression library in pure C# for .NET Standard 2.0, 2.1 and NET 4.6 that can unrar, un7zip, unzip, untar unbzip2, ungzip, unlzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip/lzip are implemented.
|
||||
SharpCompress is a compression library in pure C# for .NET Standard 2.0, 2.1, .NET Core 3.1 and .NET 5.0 that can unrar, un7zip, unzip, untar unbzip2, ungzip, unlzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip/lzip are implemented.
|
||||
|
||||
The major feature is support for non-seekable streams so large files can be processed on the fly (i.e. download stream).
|
||||
|
||||
|
||||
@@ -9,10 +9,11 @@ using static SimpleExec.Command;
|
||||
class Program
|
||||
{
|
||||
private const string Clean = "clean";
|
||||
private const string Format = "format";
|
||||
private const string Build = "build";
|
||||
private const string Test = "test";
|
||||
private const string Publish = "publish";
|
||||
|
||||
|
||||
static void Main(string[] args)
|
||||
{
|
||||
Target(Clean,
|
||||
@@ -39,30 +40,36 @@ class Program
|
||||
}
|
||||
});
|
||||
|
||||
Target(Build, ForEach("net46", "netstandard2.0", "netstandard2.1"),
|
||||
Target(Format, () =>
|
||||
{
|
||||
Run("dotnet", "tool restore");
|
||||
Run("dotnet", "format --check");
|
||||
});
|
||||
|
||||
Target(Build, DependsOn(Format),
|
||||
framework =>
|
||||
{
|
||||
if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && framework == "net46")
|
||||
{
|
||||
return;
|
||||
}
|
||||
Run("dotnet", "build src/SharpCompress/SharpCompress.csproj -c Release");
|
||||
});
|
||||
if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && framework == "net46")
|
||||
{
|
||||
return;
|
||||
}
|
||||
Run("dotnet", "build src/SharpCompress/SharpCompress.csproj -c Release");
|
||||
});
|
||||
|
||||
Target(Test, DependsOn(Build), ForEach("netcoreapp3.1"),
|
||||
framework =>
|
||||
{
|
||||
IEnumerable<string> GetFiles(string d)
|
||||
{
|
||||
return Glob.Files(".", d);
|
||||
}
|
||||
Target(Test, DependsOn(Build), ForEach("net5.0"),
|
||||
framework =>
|
||||
{
|
||||
IEnumerable<string> GetFiles(string d)
|
||||
{
|
||||
return Glob.Files(".", d);
|
||||
}
|
||||
|
||||
foreach (var file in GetFiles("**/*.Test.csproj"))
|
||||
{
|
||||
Run("dotnet", $"test {file} -c Release -f {framework}");
|
||||
}
|
||||
});
|
||||
|
||||
foreach (var file in GetFiles("**/*.Test.csproj"))
|
||||
{
|
||||
Run("dotnet", $"test {file} -c Release -f {framework}");
|
||||
}
|
||||
});
|
||||
|
||||
Target(Publish, DependsOn(Test),
|
||||
() =>
|
||||
{
|
||||
@@ -70,7 +77,7 @@ class Program
|
||||
});
|
||||
|
||||
Target("default", DependsOn(Publish), () => Console.WriteLine("Done!"));
|
||||
|
||||
|
||||
RunTargetsAndExit(args);
|
||||
}
|
||||
}
|
||||
@@ -2,13 +2,13 @@
|
||||
|
||||
<PropertyGroup>
|
||||
<OutputType>Exe</OutputType>
|
||||
<TargetFramework>netcoreapp3.1</TargetFramework>
|
||||
<TargetFramework>net5.0</TargetFramework>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Bullseye" Version="3.3.0" />
|
||||
<PackageReference Include="Glob" Version="1.1.7" />
|
||||
<PackageReference Include="SimpleExec" Version="6.2.0" />
|
||||
<PackageReference Include="Bullseye" Version="3.6.0" />
|
||||
<PackageReference Include="Glob" Version="1.1.8" />
|
||||
<PackageReference Include="SimpleExec" Version="6.4.0" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"sdk": {
|
||||
"version": "3.1.302"
|
||||
"version": "5.0.101"
|
||||
}
|
||||
}
|
||||
285
src/SharpCompress/Algorithms/Alder32.cs
Normal file
285
src/SharpCompress/Algorithms/Alder32.cs
Normal file
@@ -0,0 +1,285 @@
|
||||
// Copyright (c) Six Labors and contributors.
|
||||
// Licensed under the GNU Affero General Public License, Version 3.
|
||||
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
|
||||
using System.Runtime.Intrinsics;
|
||||
using System.Runtime.Intrinsics.X86;
|
||||
#endif
|
||||
|
||||
namespace SharpCompress.Algorithms
|
||||
{
|
||||
/// <summary>
|
||||
/// Calculates the 32 bit Adler checksum of a given buffer according to
|
||||
/// RFC 1950. ZLIB Compressed Data Format Specification version 3.3)
|
||||
/// </summary>
|
||||
internal static class Adler32
|
||||
{
|
||||
/// <summary>
|
||||
/// The default initial seed value of a Adler32 checksum calculation.
|
||||
/// </summary>
|
||||
public const uint SeedValue = 1U;
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
|
||||
private const int MinBufferSize = 64;
|
||||
#endif
|
||||
|
||||
// Largest prime smaller than 65536
|
||||
private const uint BASE = 65521;
|
||||
|
||||
// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
|
||||
private const uint NMAX = 5552;
|
||||
|
||||
/// <summary>
|
||||
/// Calculates the Adler32 checksum with the bytes taken from the span.
|
||||
/// </summary>
|
||||
/// <param name="buffer">The readonly span of bytes.</param>
|
||||
/// <returns>The <see cref="uint"/>.</returns>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static uint Calculate(ReadOnlySpan<byte> buffer)
|
||||
{
|
||||
return Calculate(SeedValue, buffer);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Calculates the Adler32 checksum with the bytes taken from the span and seed.
|
||||
/// </summary>
|
||||
/// <param name="adler">The input Adler32 value.</param>
|
||||
/// <param name="buffer">The readonly span of bytes.</param>
|
||||
/// <returns>The <see cref="uint"/>.</returns>
|
||||
public static uint Calculate(uint adler, ReadOnlySpan<byte> buffer)
|
||||
{
|
||||
if (buffer.IsEmpty)
|
||||
{
|
||||
return SeedValue;
|
||||
}
|
||||
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
|
||||
if (Sse3.IsSupported && buffer.Length >= MinBufferSize)
|
||||
{
|
||||
return CalculateSse(adler, buffer);
|
||||
}
|
||||
|
||||
return CalculateScalar(adler, buffer);
|
||||
#else
|
||||
return CalculateScalar(adler, buffer);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Based on https://github.com/chromium/chromium/blob/master/third_party/zlib/adler32_simd.c
|
||||
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
|
||||
private static unsafe uint CalculateSse(uint adler, ReadOnlySpan<byte> buffer)
|
||||
{
|
||||
uint s1 = adler & 0xFFFF;
|
||||
uint s2 = (adler >> 16) & 0xFFFF;
|
||||
|
||||
// Process the data in blocks.
|
||||
const int BLOCK_SIZE = 1 << 5;
|
||||
|
||||
uint length = (uint)buffer.Length;
|
||||
uint blocks = length / BLOCK_SIZE;
|
||||
length -= blocks * BLOCK_SIZE;
|
||||
|
||||
int index = 0;
|
||||
fixed (byte* bufferPtr = &buffer[0])
|
||||
{
|
||||
index += (int)blocks * BLOCK_SIZE;
|
||||
var localBufferPtr = bufferPtr;
|
||||
|
||||
// _mm_setr_epi8 on x86
|
||||
var tap1 = Vector128.Create(32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17);
|
||||
var tap2 = Vector128.Create(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
|
||||
Vector128<byte> zero = Vector128<byte>.Zero;
|
||||
var ones = Vector128.Create((short)1);
|
||||
|
||||
while (blocks > 0)
|
||||
{
|
||||
uint n = NMAX / BLOCK_SIZE; /* The NMAX constraint. */
|
||||
if (n > blocks)
|
||||
{
|
||||
n = blocks;
|
||||
}
|
||||
|
||||
blocks -= n;
|
||||
|
||||
// Process n blocks of data. At most NMAX data bytes can be
|
||||
// processed before s2 must be reduced modulo BASE.
|
||||
Vector128<int> v_ps = Vector128.CreateScalar(s1 * n).AsInt32();
|
||||
Vector128<int> v_s2 = Vector128.CreateScalar(s2).AsInt32();
|
||||
Vector128<int> v_s1 = Vector128<int>.Zero;
|
||||
|
||||
do
|
||||
{
|
||||
// Load 32 input bytes.
|
||||
Vector128<byte> bytes1 = Sse3.LoadDquVector128(localBufferPtr);
|
||||
Vector128<byte> bytes2 = Sse3.LoadDquVector128(localBufferPtr + 16);
|
||||
|
||||
// Add previous block byte sum to v_ps.
|
||||
v_ps = Sse2.Add(v_ps, v_s1);
|
||||
|
||||
// Horizontally add the bytes for s1, multiply-adds the
|
||||
// bytes by [ 32, 31, 30, ... ] for s2.
|
||||
v_s1 = Sse2.Add(v_s1, Sse2.SumAbsoluteDifferences(bytes1, zero).AsInt32());
|
||||
Vector128<short> mad1 = Ssse3.MultiplyAddAdjacent(bytes1, tap1);
|
||||
v_s2 = Sse2.Add(v_s2, Sse2.MultiplyAddAdjacent(mad1, ones));
|
||||
|
||||
v_s1 = Sse2.Add(v_s1, Sse2.SumAbsoluteDifferences(bytes2, zero).AsInt32());
|
||||
Vector128<short> mad2 = Ssse3.MultiplyAddAdjacent(bytes2, tap2);
|
||||
v_s2 = Sse2.Add(v_s2, Sse2.MultiplyAddAdjacent(mad2, ones));
|
||||
|
||||
localBufferPtr += BLOCK_SIZE;
|
||||
}
|
||||
while (--n > 0);
|
||||
|
||||
v_s2 = Sse2.Add(v_s2, Sse2.ShiftLeftLogical(v_ps, 5));
|
||||
|
||||
// Sum epi32 ints v_s1(s2) and accumulate in s1(s2).
|
||||
const byte S2301 = 0b1011_0001; // A B C D -> B A D C
|
||||
const byte S1032 = 0b0100_1110; // A B C D -> C D A B
|
||||
|
||||
v_s1 = Sse2.Add(v_s1, Sse2.Shuffle(v_s1, S2301));
|
||||
v_s1 = Sse2.Add(v_s1, Sse2.Shuffle(v_s1, S1032));
|
||||
|
||||
s1 += (uint)v_s1.ToScalar();
|
||||
|
||||
v_s2 = Sse2.Add(v_s2, Sse2.Shuffle(v_s2, S2301));
|
||||
v_s2 = Sse2.Add(v_s2, Sse2.Shuffle(v_s2, S1032));
|
||||
|
||||
s2 = (uint)v_s2.ToScalar();
|
||||
|
||||
// Reduce.
|
||||
s1 %= BASE;
|
||||
s2 %= BASE;
|
||||
}
|
||||
}
|
||||
|
||||
ref byte bufferRef = ref MemoryMarshal.GetReference(buffer);
|
||||
|
||||
if (length > 0)
|
||||
{
|
||||
if (length >= 16)
|
||||
{
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
length -= 16;
|
||||
}
|
||||
|
||||
while (length-- > 0)
|
||||
{
|
||||
s2 += s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
}
|
||||
|
||||
if (s1 >= BASE)
|
||||
{
|
||||
s1 -= BASE;
|
||||
}
|
||||
|
||||
s2 %= BASE;
|
||||
}
|
||||
|
||||
return s1 | (s2 << 16);
|
||||
}
|
||||
#endif
|
||||
|
||||
private static uint CalculateScalar(uint adler, ReadOnlySpan<byte> buffer)
|
||||
{
|
||||
uint s1 = adler & 0xFFFF;
|
||||
uint s2 = (adler >> 16) & 0xFFFF;
|
||||
uint k;
|
||||
|
||||
ref byte bufferRef = ref MemoryMarshal.GetReference<byte>(buffer);
|
||||
uint length = (uint)buffer.Length;
|
||||
int index = 0;
|
||||
|
||||
while (length > 0)
|
||||
{
|
||||
k = length < NMAX ? length : NMAX;
|
||||
length -= k;
|
||||
|
||||
while (k >= 16)
|
||||
{
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
k -= 16;
|
||||
}
|
||||
|
||||
if (k != 0)
|
||||
{
|
||||
do
|
||||
{
|
||||
s1 += Unsafe.Add(ref bufferRef, index++);
|
||||
s2 += s1;
|
||||
}
|
||||
while (--k != 0);
|
||||
}
|
||||
|
||||
s1 %= BASE;
|
||||
s2 %= BASE;
|
||||
}
|
||||
|
||||
return (s2 << 16) | s1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -81,29 +81,29 @@ namespace SharpCompress.Archives
|
||||
/// <summary>
|
||||
/// Returns an ReadOnlyCollection of all the RarArchiveEntries across the one or many parts of the RarArchive.
|
||||
/// </summary>
|
||||
public virtual ICollection<TEntry> Entries { get { return lazyEntries; } }
|
||||
public virtual ICollection<TEntry> Entries => lazyEntries;
|
||||
|
||||
/// <summary>
|
||||
/// Returns an ReadOnlyCollection of all the RarArchiveVolumes across the one or many parts of the RarArchive.
|
||||
/// </summary>
|
||||
public ICollection<TVolume> Volumes { get { return lazyVolumes; } }
|
||||
public ICollection<TVolume> Volumes => lazyVolumes;
|
||||
|
||||
/// <summary>
|
||||
/// The total size of the files compressed in the archive.
|
||||
/// </summary>
|
||||
public virtual long TotalSize { get { return Entries.Aggregate(0L, (total, cf) => total + cf.CompressedSize); } }
|
||||
public virtual long TotalSize => Entries.Aggregate(0L, (total, cf) => total + cf.CompressedSize);
|
||||
|
||||
/// <summary>
|
||||
/// The total size of the files as uncompressed in the archive.
|
||||
/// </summary>
|
||||
public virtual long TotalUncompressSize { get { return Entries.Aggregate(0L, (total, cf) => total + cf.Size); } }
|
||||
public virtual long TotalUncompressSize => Entries.Aggregate(0L, (total, cf) => total + cf.Size);
|
||||
|
||||
protected abstract IEnumerable<TVolume> LoadVolumes(IEnumerable<Stream> streams);
|
||||
protected abstract IEnumerable<TEntry> LoadEntries(IEnumerable<TVolume> volumes);
|
||||
|
||||
IEnumerable<IArchiveEntry> IArchive.Entries { get { return Entries.Cast<IArchiveEntry>(); } }
|
||||
IEnumerable<IArchiveEntry> IArchive.Entries => Entries.Cast<IArchiveEntry>();
|
||||
|
||||
IEnumerable<IVolume> IArchive.Volumes { get { return lazyVolumes.Cast<IVolume>(); } }
|
||||
IEnumerable<IVolume> IArchive.Volumes => lazyVolumes.Cast<IVolume>();
|
||||
|
||||
public virtual void Dispose()
|
||||
{
|
||||
@@ -132,9 +132,9 @@ namespace SharpCompress.Archives
|
||||
void IExtractionListener.FireFilePartExtractionBegin(string name, long size, long compressedSize)
|
||||
{
|
||||
FilePartExtractionBegin?.Invoke(this, new FilePartExtractionBeginEventArgs(
|
||||
compressedSize : compressedSize,
|
||||
size : size,
|
||||
name : name
|
||||
compressedSize: compressedSize,
|
||||
size: size,
|
||||
name: name
|
||||
));
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ namespace SharpCompress.Archives
|
||||
/// <summary>
|
||||
/// Archive is SOLID (this means the Archive saved bytes by reusing information which helps for archives containing many small files).
|
||||
/// </summary>
|
||||
public virtual bool IsSolid { get { return false; } }
|
||||
public virtual bool IsSolid => false;
|
||||
|
||||
/// <summary>
|
||||
/// The archive can find all the parts of the archive needed to fully extract the archive. This forces the parsing of the entire archive.
|
||||
|
||||
@@ -12,11 +12,28 @@ namespace SharpCompress.Archives
|
||||
where TEntry : IArchiveEntry
|
||||
where TVolume : IVolume
|
||||
{
|
||||
private class RebuildPauseDisposable : IDisposable
|
||||
{
|
||||
private readonly AbstractWritableArchive<TEntry, TVolume> archive;
|
||||
|
||||
public RebuildPauseDisposable(AbstractWritableArchive<TEntry, TVolume> archive)
|
||||
{
|
||||
this.archive = archive;
|
||||
archive.pauseRebuilding = true;
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
archive.pauseRebuilding = false;
|
||||
archive.RebuildModifiedCollection();
|
||||
}
|
||||
}
|
||||
private readonly List<TEntry> newEntries = new List<TEntry>();
|
||||
private readonly List<TEntry> removedEntries = new List<TEntry>();
|
||||
|
||||
private readonly List<TEntry> modifiedEntries = new List<TEntry>();
|
||||
private bool hasModifications;
|
||||
private bool pauseRebuilding;
|
||||
|
||||
internal AbstractWritableArchive(ArchiveType type)
|
||||
: base(type)
|
||||
@@ -45,8 +62,17 @@ namespace SharpCompress.Archives
|
||||
}
|
||||
}
|
||||
|
||||
public IDisposable PauseEntryRebuilding()
|
||||
{
|
||||
return new RebuildPauseDisposable(this);
|
||||
}
|
||||
|
||||
private void RebuildModifiedCollection()
|
||||
{
|
||||
if (pauseRebuilding)
|
||||
{
|
||||
return;
|
||||
}
|
||||
hasModifications = true;
|
||||
newEntries.RemoveAll(v => removedEntries.Contains(v));
|
||||
modifiedEntries.Clear();
|
||||
@@ -83,8 +109,7 @@ namespace SharpCompress.Archives
|
||||
public TEntry AddEntry(string key, Stream source, bool closeStream,
|
||||
long size = 0, DateTime? modified = null)
|
||||
{
|
||||
if (key.StartsWith("/")
|
||||
|| key.StartsWith("\\"))
|
||||
if (key.Length > 0 && key[0] is '/' or '\\')
|
||||
{
|
||||
key = key.Substring(1);
|
||||
}
|
||||
@@ -103,7 +128,7 @@ namespace SharpCompress.Archives
|
||||
foreach (var path in Entries.Select(x => x.Key))
|
||||
{
|
||||
var p = path.Replace('/', '\\');
|
||||
if (p.StartsWith("\\"))
|
||||
if (p.Length > 0 && p[0] == '\\')
|
||||
{
|
||||
p = p.Substring(1);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Archives.Dmg;
|
||||
using SharpCompress.Archives.GZip;
|
||||
using SharpCompress.Archives.Rar;
|
||||
using SharpCompress.Archives.SevenZip;
|
||||
@@ -10,7 +11,7 @@ using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Archives
|
||||
{
|
||||
public class ArchiveFactory
|
||||
public static class ArchiveFactory
|
||||
{
|
||||
/// <summary>
|
||||
/// Opens an Archive for random access
|
||||
@@ -25,7 +26,7 @@ namespace SharpCompress.Archives
|
||||
{
|
||||
throw new ArgumentException("Stream should be readable and seekable");
|
||||
}
|
||||
readerOptions = readerOptions ?? new ReaderOptions();
|
||||
readerOptions ??= new ReaderOptions();
|
||||
if (ZipArchive.IsZipFile(stream, null))
|
||||
{
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
@@ -44,6 +45,12 @@ namespace SharpCompress.Archives
|
||||
return GZipArchive.Open(stream, readerOptions);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (DmgArchive.IsDmgFile(stream))
|
||||
{
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
return DmgArchive.Open(stream, readerOptions);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (RarArchive.IsRarFile(stream, readerOptions))
|
||||
{
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
@@ -55,30 +62,18 @@ namespace SharpCompress.Archives
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
return TarArchive.Open(stream, readerOptions);
|
||||
}
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, LZip");
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, LZip, Dmg");
|
||||
}
|
||||
|
||||
public static IWritableArchive Create(ArchiveType type)
|
||||
{
|
||||
switch (type)
|
||||
return type switch
|
||||
{
|
||||
case ArchiveType.Zip:
|
||||
{
|
||||
return ZipArchive.Create();
|
||||
}
|
||||
case ArchiveType.Tar:
|
||||
{
|
||||
return TarArchive.Create();
|
||||
}
|
||||
case ArchiveType.GZip:
|
||||
{
|
||||
return GZipArchive.Create();
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw new NotSupportedException("Cannot create Archives of type: " + type);
|
||||
}
|
||||
}
|
||||
ArchiveType.Zip => ZipArchive.Create(),
|
||||
ArchiveType.Tar => TarArchive.Create(),
|
||||
ArchiveType.GZip => GZipArchive.Create(),
|
||||
_ => throw new NotSupportedException("Cannot create Archives of type: " + type)
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
@@ -100,35 +95,40 @@ namespace SharpCompress.Archives
|
||||
public static IArchive Open(FileInfo fileInfo, ReaderOptions? options = null)
|
||||
{
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
options = options ?? new ReaderOptions { LeaveStreamOpen = false };
|
||||
using (var stream = fileInfo.OpenRead())
|
||||
options ??= new ReaderOptions { LeaveStreamOpen = false };
|
||||
|
||||
using var stream = fileInfo.OpenRead();
|
||||
if (ZipArchive.IsZipFile(stream, null))
|
||||
{
|
||||
if (ZipArchive.IsZipFile(stream, null))
|
||||
{
|
||||
return ZipArchive.Open(fileInfo, options);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (SevenZipArchive.IsSevenZipFile(stream))
|
||||
{
|
||||
return SevenZipArchive.Open(fileInfo, options);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (GZipArchive.IsGZipFile(stream))
|
||||
{
|
||||
return GZipArchive.Open(fileInfo, options);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (RarArchive.IsRarFile(stream, options))
|
||||
{
|
||||
return RarArchive.Open(fileInfo, options);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (TarArchive.IsTarFile(stream))
|
||||
{
|
||||
return TarArchive.Open(fileInfo, options);
|
||||
}
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip");
|
||||
return ZipArchive.Open(fileInfo, options);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (SevenZipArchive.IsSevenZipFile(stream))
|
||||
{
|
||||
return SevenZipArchive.Open(fileInfo, options);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (GZipArchive.IsGZipFile(stream))
|
||||
{
|
||||
return GZipArchive.Open(fileInfo, options);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (DmgArchive.IsDmgFile(stream))
|
||||
{
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
return DmgArchive.Open(fileInfo, options);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (RarArchive.IsRarFile(stream, options))
|
||||
{
|
||||
return RarArchive.Open(fileInfo, options);
|
||||
}
|
||||
stream.Seek(0, SeekOrigin.Begin);
|
||||
if (TarArchive.IsTarFile(stream))
|
||||
{
|
||||
return TarArchive.Open(fileInfo, options);
|
||||
}
|
||||
throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, Dmg");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
@@ -137,12 +137,10 @@ namespace SharpCompress.Archives
|
||||
public static void WriteToDirectory(string sourceArchive, string destinationDirectory,
|
||||
ExtractionOptions? options = null)
|
||||
{
|
||||
using (IArchive archive = Open(sourceArchive))
|
||||
using IArchive archive = Open(sourceArchive);
|
||||
foreach (IArchiveEntry entry in archive.Entries)
|
||||
{
|
||||
foreach (IArchiveEntry entry in archive.Entries)
|
||||
{
|
||||
entry.WriteToDirectory(destinationDirectory, options);
|
||||
}
|
||||
entry.WriteToDirectory(destinationDirectory, options);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
117
src/SharpCompress/Archives/Dmg/DmgArchive.cs
Normal file
117
src/SharpCompress/Archives/Dmg/DmgArchive.cs
Normal file
@@ -0,0 +1,117 @@
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Dmg;
|
||||
using SharpCompress.Common.Dmg.Headers;
|
||||
using SharpCompress.Common.Dmg.HFS;
|
||||
using SharpCompress.Readers;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
|
||||
namespace SharpCompress.Archives.Dmg
|
||||
{
|
||||
public class DmgArchive : AbstractArchive<DmgArchiveEntry, DmgVolume>
|
||||
{
|
||||
private readonly string _fileName;
|
||||
|
||||
internal DmgArchive(FileInfo fileInfo, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Dmg, fileInfo, readerOptions)
|
||||
{
|
||||
_fileName = fileInfo.FullName;
|
||||
}
|
||||
|
||||
internal DmgArchive(Stream stream, ReaderOptions readerOptions)
|
||||
: base(ArchiveType.Dmg, stream.AsEnumerable(), readerOptions)
|
||||
{
|
||||
_fileName = string.Empty;
|
||||
}
|
||||
|
||||
protected override IReader CreateReaderForSolidExtraction()
|
||||
=> new DmgReader(ReaderOptions, this, _fileName);
|
||||
|
||||
protected override IEnumerable<DmgArchiveEntry> LoadEntries(IEnumerable<DmgVolume> volumes)
|
||||
=> volumes.Single().LoadEntries();
|
||||
|
||||
protected override IEnumerable<DmgVolume> LoadVolumes(FileInfo file)
|
||||
=> new DmgVolume(this, file.OpenRead(), file.FullName, ReaderOptions).AsEnumerable();
|
||||
|
||||
protected override IEnumerable<DmgVolume> LoadVolumes(IEnumerable<Stream> streams)
|
||||
=> new DmgVolume(this, streams.Single(), string.Empty, ReaderOptions).AsEnumerable();
|
||||
|
||||
public static bool IsDmgFile(FileInfo fileInfo)
|
||||
{
|
||||
if (!fileInfo.Exists) return false;
|
||||
|
||||
using var stream = fileInfo.OpenRead();
|
||||
return IsDmgFile(stream);
|
||||
}
|
||||
|
||||
public static bool IsDmgFile(Stream stream)
|
||||
{
|
||||
long headerPos = stream.Length - DmgHeader.HeaderSize;
|
||||
if (headerPos < 0) return false;
|
||||
stream.Position = headerPos;
|
||||
|
||||
return DmgHeader.TryRead(stream, out _);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="filePath"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static DmgArchive Open(string filePath, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
filePath.CheckNotNullOrEmpty(nameof(filePath));
|
||||
return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
/// <param name="fileInfo"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static DmgArchive Open(FileInfo fileInfo, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
fileInfo.CheckNotNull(nameof(fileInfo));
|
||||
return new DmgArchive(fileInfo, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Takes a seekable Stream as a source
|
||||
/// </summary>
|
||||
/// <param name="stream"></param>
|
||||
/// <param name="readerOptions"></param>
|
||||
public static DmgArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
return new DmgArchive(stream, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
private sealed class DmgReader : AbstractReader<DmgEntry, DmgVolume>
|
||||
{
|
||||
private readonly DmgArchive _archive;
|
||||
private readonly string _fileName;
|
||||
private readonly Stream? _partitionStream;
|
||||
|
||||
public override DmgVolume Volume { get; }
|
||||
|
||||
internal DmgReader(ReaderOptions readerOptions, DmgArchive archive, string fileName)
|
||||
: base(readerOptions, ArchiveType.Dmg)
|
||||
{
|
||||
_archive = archive;
|
||||
_fileName = fileName;
|
||||
Volume = archive.Volumes.Single();
|
||||
|
||||
using var compressedStream = DmgUtil.LoadHFSPartitionStream(Volume.Stream, Volume.Header);
|
||||
_partitionStream = compressedStream?.Decompress();
|
||||
}
|
||||
|
||||
protected override IEnumerable<DmgEntry> GetEntries(Stream stream)
|
||||
{
|
||||
if (_partitionStream is null) return Array.Empty<DmgArchiveEntry>();
|
||||
else return HFSUtil.LoadEntriesFromPartition(_partitionStream, _fileName, _archive);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
32
src/SharpCompress/Archives/Dmg/DmgArchiveEntry.cs
Normal file
32
src/SharpCompress/Archives/Dmg/DmgArchiveEntry.cs
Normal file
@@ -0,0 +1,32 @@
|
||||
using SharpCompress.Common.Dmg;
|
||||
using SharpCompress.Common.Dmg.HFS;
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Archives.Dmg
|
||||
{
|
||||
public sealed class DmgArchiveEntry : DmgEntry, IArchiveEntry
|
||||
{
|
||||
private readonly Stream? _stream;
|
||||
|
||||
public bool IsComplete { get; } = true;
|
||||
|
||||
public IArchive Archive { get; }
|
||||
|
||||
internal DmgArchiveEntry(Stream? stream, DmgArchive archive, HFSCatalogRecord record, string path, DmgFilePart part)
|
||||
: base(record, path, stream?.Length ?? 0, part)
|
||||
{
|
||||
_stream = stream;
|
||||
Archive = archive;
|
||||
}
|
||||
|
||||
public Stream OpenEntryStream()
|
||||
{
|
||||
if (IsDirectory)
|
||||
throw new NotSupportedException("Directories cannot be opened as stream");
|
||||
|
||||
_stream!.Position = 0;
|
||||
return _stream;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -77,10 +77,9 @@ namespace SharpCompress.Archives.GZip
|
||||
{
|
||||
return false;
|
||||
}
|
||||
using (Stream stream = fileInfo.OpenRead())
|
||||
{
|
||||
return IsGZipFile(stream);
|
||||
}
|
||||
|
||||
using Stream stream = fileInfo.OpenRead();
|
||||
return IsGZipFile(stream);
|
||||
}
|
||||
|
||||
public void SaveTo(string filePath)
|
||||
@@ -99,7 +98,7 @@ namespace SharpCompress.Archives.GZip
|
||||
public static bool IsGZipFile(Stream stream)
|
||||
{
|
||||
// read the header on the first read
|
||||
byte[] header = new byte[10];
|
||||
Span<byte> header = stackalloc byte[10];
|
||||
|
||||
// workitem 8501: handle edge case (decompress empty stream)
|
||||
if (!stream.ReadFully(header))
|
||||
|
||||
@@ -31,7 +31,7 @@ namespace SharpCompress.Archives
|
||||
}
|
||||
streamListener.FireEntryExtractionEnd(archiveEntry);
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific directory, retaining filename
|
||||
/// </summary>
|
||||
@@ -45,11 +45,11 @@ namespace SharpCompress.Archives
|
||||
/// <summary>
|
||||
/// Extract to specific file
|
||||
/// </summary>
|
||||
public static void WriteToFile(this IArchiveEntry entry,
|
||||
public static void WriteToFile(this IArchiveEntry entry,
|
||||
string destinationFileName,
|
||||
ExtractionOptions? options = null)
|
||||
{
|
||||
|
||||
|
||||
ExtractionMethods.WriteEntryToFile(entry, destinationFileName, options,
|
||||
(x, fm) =>
|
||||
{
|
||||
|
||||
@@ -11,5 +11,11 @@ namespace SharpCompress.Archives
|
||||
IArchiveEntry AddEntry(string key, Stream source, bool closeStream, long size = 0, DateTime? modified = null);
|
||||
|
||||
void SaveTo(Stream stream, WriterOptions options);
|
||||
|
||||
/// <summary>
|
||||
/// Use this to pause entry rebuilding when adding large collections of entries. Dispose when complete. A using statement is recommended.
|
||||
/// </summary>
|
||||
/// <returns>IDisposeable to resume entry rebuilding</returns>
|
||||
IDisposable PauseEntryRebuilding();
|
||||
}
|
||||
}
|
||||
@@ -35,11 +35,14 @@ namespace SharpCompress.Archives
|
||||
this IWritableArchive writableArchive,
|
||||
string filePath, string searchPattern = "*.*", SearchOption searchOption = SearchOption.AllDirectories)
|
||||
{
|
||||
foreach (var path in Directory.EnumerateFiles(filePath, searchPattern, searchOption))
|
||||
using (writableArchive.PauseEntryRebuilding())
|
||||
{
|
||||
var fileInfo = new FileInfo(path);
|
||||
writableArchive.AddEntry(path.Substring(filePath.Length), fileInfo.OpenRead(), true, fileInfo.Length,
|
||||
fileInfo.LastWriteTime);
|
||||
foreach (var path in Directory.EnumerateFiles(filePath, searchPattern, searchOption))
|
||||
{
|
||||
var fileInfo = new FileInfo(path);
|
||||
writableArchive.AddEntry(path.Substring(filePath.Length), fileInfo.OpenRead(), true, fileInfo.Length,
|
||||
fileInfo.LastWriteTime);
|
||||
}
|
||||
}
|
||||
}
|
||||
public static IArchiveEntry AddEntry(this IWritableArchive writableArchive, string key, FileInfo fileInfo)
|
||||
|
||||
@@ -10,7 +10,8 @@ using SharpCompress.Readers.Rar;
|
||||
|
||||
namespace SharpCompress.Archives.Rar
|
||||
{
|
||||
public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
public class
|
||||
RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
{
|
||||
internal Lazy<IRarUnpack> UnpackV2017 { get; } = new Lazy<IRarUnpack>(() => new SharpCompress.Compressors.Rar.UnpackV2017.Unpack());
|
||||
internal Lazy<IRarUnpack> UnpackV1 { get; } = new Lazy<IRarUnpack>(() => new SharpCompress.Compressors.Rar.UnpackV1.Unpack());
|
||||
@@ -42,7 +43,7 @@ namespace SharpCompress.Archives.Rar
|
||||
|
||||
protected override IEnumerable<RarArchiveEntry> LoadEntries(IEnumerable<RarVolume> volumes)
|
||||
{
|
||||
return RarArchiveEntryFactory.GetEntries(this, volumes);
|
||||
return RarArchiveEntryFactory.GetEntries(this, volumes, ReaderOptions);
|
||||
}
|
||||
|
||||
protected override IEnumerable<RarVolume> LoadVolumes(IEnumerable<Stream> streams)
|
||||
@@ -120,7 +121,7 @@ namespace SharpCompress.Archives.Rar
|
||||
return IsRarFile(stream);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static bool IsRarFile(Stream stream, ReaderOptions? options = null)
|
||||
{
|
||||
try
|
||||
|
||||
@@ -6,6 +6,7 @@ using SharpCompress.Common;
|
||||
using SharpCompress.Common.Rar;
|
||||
using SharpCompress.Common.Rar.Headers;
|
||||
using SharpCompress.Compressors.Rar;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Archives.Rar
|
||||
{
|
||||
@@ -13,11 +14,13 @@ namespace SharpCompress.Archives.Rar
|
||||
{
|
||||
private readonly ICollection<RarFilePart> parts;
|
||||
private readonly RarArchive archive;
|
||||
private readonly ReaderOptions readerOptions;
|
||||
|
||||
internal RarArchiveEntry(RarArchive archive, IEnumerable<RarFilePart> parts)
|
||||
internal RarArchiveEntry(RarArchive archive, IEnumerable<RarFilePart> parts, ReaderOptions readerOptions)
|
||||
{
|
||||
this.parts = parts.ToList();
|
||||
this.archive = archive;
|
||||
this.readerOptions = readerOptions;
|
||||
}
|
||||
|
||||
public override CompressionType CompressionType => CompressionType.Rar;
|
||||
@@ -61,21 +64,22 @@ namespace SharpCompress.Archives.Rar
|
||||
{
|
||||
return new RarStream(archive.UnpackV1.Value, FileHeader, new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>(), archive));
|
||||
}
|
||||
|
||||
|
||||
return new RarStream(archive.UnpackV2017.Value, FileHeader, new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>(), archive));
|
||||
}
|
||||
|
||||
public bool IsComplete
|
||||
{
|
||||
get
|
||||
{
|
||||
return parts.Select(fp => fp.FileHeader).Any(fh => !fh.IsSplitAfter);
|
||||
}
|
||||
public bool IsComplete
|
||||
{
|
||||
get
|
||||
{
|
||||
var headers = parts.Select(x => x.FileHeader);
|
||||
return !headers.First().IsSplitBefore && !headers.Last().IsSplitAfter;
|
||||
}
|
||||
}
|
||||
|
||||
private void CheckIncomplete()
|
||||
{
|
||||
if (!IsComplete)
|
||||
if (!readerOptions.DisableCheckIncomplete && !IsComplete)
|
||||
{
|
||||
throw new IncompleteArchiveException("ArchiveEntry is incomplete and cannot perform this operation.");
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
using System.Collections.Generic;
|
||||
using SharpCompress.Common.Rar;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Archives.Rar
|
||||
{
|
||||
@@ -36,11 +37,12 @@ namespace SharpCompress.Archives.Rar
|
||||
}
|
||||
|
||||
internal static IEnumerable<RarArchiveEntry> GetEntries(RarArchive archive,
|
||||
IEnumerable<RarVolume> rarParts)
|
||||
IEnumerable<RarVolume> rarParts,
|
||||
ReaderOptions readerOptions)
|
||||
{
|
||||
foreach (var groupedParts in GetMatchedFileParts(rarParts))
|
||||
{
|
||||
yield return new RarArchiveEntry(archive, groupedParts);
|
||||
yield return new RarArchiveEntry(archive, groupedParts, readerOptions);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ namespace SharpCompress.Archives.Rar
|
||||
yield return part;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
internal static IEnumerable<RarVolume> GetParts(FileInfo fileInfo, ReaderOptions options)
|
||||
{
|
||||
FileInfoRarArchiveVolume part = new FileInfoRarArchiveVolume(fileInfo, options);
|
||||
@@ -68,7 +68,7 @@ namespace SharpCompress.Archives.Rar
|
||||
// .rar, .r00, .r01, ...
|
||||
string extension = currentFileInfo.Extension;
|
||||
|
||||
StringBuilder buffer = new StringBuilder(currentFileInfo.FullName.Length);
|
||||
var buffer = new StringBuilder(currentFileInfo.FullName.Length);
|
||||
buffer.Append(currentFileInfo.FullName.Substring(0,
|
||||
currentFileInfo.FullName.Length - extension.Length));
|
||||
if (string.Compare(extension, ".rar", StringComparison.OrdinalIgnoreCase) == 0)
|
||||
@@ -77,8 +77,7 @@ namespace SharpCompress.Archives.Rar
|
||||
}
|
||||
else
|
||||
{
|
||||
int num = 0;
|
||||
if (int.TryParse(extension.Substring(2, 2), out num))
|
||||
if (int.TryParse(extension.Substring(2, 2), out int num))
|
||||
{
|
||||
num++;
|
||||
buffer.Append(".r");
|
||||
@@ -111,12 +110,11 @@ namespace SharpCompress.Archives.Rar
|
||||
}
|
||||
StringBuilder buffer = new StringBuilder(currentFileInfo.FullName.Length);
|
||||
buffer.Append(currentFileInfo.FullName, 0, startIndex);
|
||||
int num = 0;
|
||||
string numString = currentFileInfo.FullName.Substring(startIndex + 5,
|
||||
currentFileInfo.FullName.IndexOf('.', startIndex + 5) -
|
||||
startIndex - 5);
|
||||
buffer.Append(".part");
|
||||
if (int.TryParse(numString, out num))
|
||||
if (int.TryParse(numString, out int num))
|
||||
{
|
||||
num++;
|
||||
for (int i = 0; i < numString.Length - num.ToString().Length; i++)
|
||||
|
||||
@@ -131,7 +131,7 @@ namespace SharpCompress.Archives.SevenZip
|
||||
}
|
||||
}
|
||||
|
||||
private static ReadOnlySpan<byte> SIGNATURE => new byte[] {(byte)'7', (byte)'z', 0xBC, 0xAF, 0x27, 0x1C};
|
||||
private static ReadOnlySpan<byte> SIGNATURE => new byte[] { (byte)'7', (byte)'z', 0xBC, 0xAF, 0x27, 0x1C };
|
||||
|
||||
private static bool SignatureMatch(Stream stream)
|
||||
{
|
||||
|
||||
@@ -79,7 +79,7 @@ namespace SharpCompress.Archives.Tar
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
|
||||
@@ -24,7 +24,7 @@ namespace SharpCompress.Archives.Zip
|
||||
/// if the compression method is set to deflate
|
||||
/// </summary>
|
||||
public CompressionLevel DeflateCompressionLevel { get; set; }
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Constructor expects a filepath to an existing file.
|
||||
/// </summary>
|
||||
@@ -57,7 +57,7 @@ namespace SharpCompress.Archives.Zip
|
||||
stream.CheckNotNull(nameof(stream));
|
||||
return new ZipArchive(stream, readerOptions ?? new ReaderOptions());
|
||||
}
|
||||
|
||||
|
||||
public static bool IsZipFile(string filePath, string? password = null)
|
||||
{
|
||||
return IsZipFile(new FileInfo(filePath), password);
|
||||
@@ -80,8 +80,7 @@ namespace SharpCompress.Archives.Zip
|
||||
StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password, new ArchiveEncoding());
|
||||
try
|
||||
{
|
||||
ZipHeader header =
|
||||
headerFactory.ReadStreamHeader(stream).FirstOrDefault(x => x.ZipHeaderType != ZipHeaderType.Split);
|
||||
ZipHeader? header = headerFactory.ReadStreamHeader(stream).FirstOrDefault(x => x.ZipHeaderType != ZipHeaderType.Split);
|
||||
if (header is null)
|
||||
{
|
||||
return false;
|
||||
@@ -97,7 +96,7 @@ namespace SharpCompress.Archives.Zip
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Constructor with a FileInfo object to an existing file.
|
||||
/// </summary>
|
||||
|
||||
@@ -36,7 +36,7 @@ namespace SharpCompress.Common
|
||||
Password = password;
|
||||
}
|
||||
|
||||
#if NETSTANDARD1_3 || NETSTANDARD2_0 || NETSTANDARD2_1
|
||||
#if !NET461
|
||||
static ArchiveEncoding()
|
||||
{
|
||||
Encoding.RegisterProvider(CodePagesEncodingProvider.Instance);
|
||||
|
||||
@@ -8,5 +8,10 @@ namespace SharpCompress.Common
|
||||
: base(message)
|
||||
{
|
||||
}
|
||||
|
||||
public ArchiveException(string message, Exception inner)
|
||||
: base(message, inner)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@
|
||||
Zip,
|
||||
Tar,
|
||||
SevenZip,
|
||||
GZip
|
||||
GZip,
|
||||
Dmg
|
||||
}
|
||||
}
|
||||
323
src/SharpCompress/Common/Dmg/DmgBlockDataStream.cs
Normal file
323
src/SharpCompress/Common/Dmg/DmgBlockDataStream.cs
Normal file
@@ -0,0 +1,323 @@
|
||||
using SharpCompress.Common.Dmg.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.ADC;
|
||||
using SharpCompress.Compressors.BZip2;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.IO;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
internal sealed class DmgBlockDataStream : Stream
|
||||
{
|
||||
private readonly Stream _baseStream;
|
||||
private readonly DmgHeader _header;
|
||||
private readonly BlkxTable _table;
|
||||
private long _position;
|
||||
private bool _isEnded;
|
||||
private int _chunkIndex;
|
||||
private Stream? _chunkStream;
|
||||
private long _chunkPos;
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanWrite => false;
|
||||
public override bool CanSeek => true;
|
||||
public override long Length { get; }
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _position;
|
||||
set
|
||||
{
|
||||
if ((value < 0) || (value > Length)) throw new ArgumentOutOfRangeException(nameof(value));
|
||||
|
||||
if (value == Length)
|
||||
{
|
||||
// End of the stream
|
||||
|
||||
_position = Length;
|
||||
_isEnded = true;
|
||||
_chunkIndex = -1;
|
||||
_chunkStream = null;
|
||||
}
|
||||
else if (value != _position)
|
||||
{
|
||||
_position = value;
|
||||
|
||||
// We can only seek over entire chunks at a time because some chunks may be compressed.
|
||||
// So we first find the chunk that we are now in, then we read to the exact position inside that chunk.
|
||||
|
||||
for (int i = 0; i < _table.Chunks.Count; i++)
|
||||
{
|
||||
var chunk = _table.Chunks[i];
|
||||
if (IsChunkValid(chunk) && (chunk.UncompressedOffset <= (ulong)_position)
|
||||
&& ((chunk.UncompressedOffset + chunk.UncompressedLength) > (ulong)_position))
|
||||
{
|
||||
if (i == _chunkIndex)
|
||||
{
|
||||
// We are still in the same chunk, so if the new position is
|
||||
// behind the previous one we can just read to the new position.
|
||||
|
||||
long offset = (long)chunk.UncompressedOffset + _chunkPos;
|
||||
if (offset <= _position)
|
||||
{
|
||||
long skip = _position - offset;
|
||||
_chunkStream!.Skip(skip);
|
||||
_chunkPos += skip;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_chunkIndex = i;
|
||||
_chunkStream = GetChunkStream();
|
||||
_chunkPos = 0;
|
||||
|
||||
// If the chunk happens to not be compressed this read will still result in a fast seek
|
||||
if ((ulong)_position != chunk.UncompressedOffset)
|
||||
{
|
||||
long skip = _position - (long)chunk.UncompressedOffset;
|
||||
_chunkStream.Skip(skip);
|
||||
_chunkPos = skip;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public DmgBlockDataStream(Stream baseStream, DmgHeader header, BlkxTable table)
|
||||
{
|
||||
if (!baseStream.CanRead) throw new ArgumentException("Requires a readable stream", nameof(baseStream));
|
||||
if (!baseStream.CanSeek) throw new ArgumentException("Requires a seekable stream", nameof(baseStream));
|
||||
|
||||
_baseStream = baseStream;
|
||||
_header = header;
|
||||
_table = table;
|
||||
|
||||
Length = 0;
|
||||
foreach (var chunk in table.Chunks)
|
||||
{
|
||||
if (IsChunkValid(chunk))
|
||||
Length += (long)chunk.UncompressedLength;
|
||||
}
|
||||
|
||||
_position = 0;
|
||||
_chunkIndex = -1;
|
||||
_chunkIndex = GetNextChunk();
|
||||
_isEnded = _chunkIndex < 0;
|
||||
if (!_isEnded) _chunkStream = GetChunkStream();
|
||||
_chunkPos = 0;
|
||||
}
|
||||
|
||||
private static bool IsChunkValid(BlkxChunk chunk)
|
||||
{
|
||||
return chunk.Type switch
|
||||
{
|
||||
BlkxChunkType.Zero => true,
|
||||
BlkxChunkType.Uncompressed => true,
|
||||
BlkxChunkType.Ignore => true,
|
||||
BlkxChunkType.AdcCompressed => true,
|
||||
BlkxChunkType.ZlibCompressed => true,
|
||||
BlkxChunkType.Bz2Compressed => true,
|
||||
_ => false
|
||||
};
|
||||
}
|
||||
|
||||
private int GetNextChunk()
|
||||
{
|
||||
int index = _chunkIndex;
|
||||
bool isValid = false;
|
||||
while (!isValid)
|
||||
{
|
||||
index++;
|
||||
if (index >= _table.Chunks.Count) return -1;
|
||||
|
||||
var chunk = _table.Chunks[index];
|
||||
if (chunk.Type == BlkxChunkType.Last) return -1;
|
||||
|
||||
isValid = IsChunkValid(chunk);
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
private Stream GetChunkStream()
|
||||
{
|
||||
if (_chunkIndex < 0)
|
||||
throw new InvalidOperationException("Invalid chunk index");
|
||||
|
||||
var chunk = _table.Chunks[_chunkIndex];
|
||||
|
||||
// For our purposes, ignore behaves the same as zero
|
||||
if ((chunk.Type == BlkxChunkType.Zero) || (chunk.Type == BlkxChunkType.Ignore))
|
||||
return new ConstantStream(0, (long)chunk.UncompressedLength);
|
||||
|
||||
// We first create a sub-stream on the region of the base stream where the
|
||||
// (possibly compressed) data is physically located at.
|
||||
var subStream = new SeekableSubStream(_baseStream,
|
||||
(long)(_header.DataForkOffset + _table.DataOffset + chunk.CompressedOffset),
|
||||
(long)chunk.CompressedLength);
|
||||
|
||||
// Then we nest that sub-stream into the apropriate compressed stream.
|
||||
return chunk.Type switch
|
||||
{
|
||||
BlkxChunkType.Uncompressed => subStream,
|
||||
BlkxChunkType.AdcCompressed => new ADCStream(subStream, CompressionMode.Decompress),
|
||||
BlkxChunkType.ZlibCompressed => new ZlibStream(subStream, CompressionMode.Decompress),
|
||||
BlkxChunkType.Bz2Compressed => new BZip2Stream(subStream, CompressionMode.Decompress, false),
|
||||
_ => throw new InvalidOperationException("Invalid chunk type")
|
||||
};
|
||||
}
|
||||
|
||||
// Decompresses the entire stream in memory for faster extraction.
|
||||
// This is about two orders of magnitude faster than decompressing
|
||||
// on-the-fly while extracting, but also eats RAM for breakfest.
|
||||
public Stream Decompress()
|
||||
{
|
||||
// We have to load all the chunks into separate memory streams first
|
||||
// because otherwise the decompression threads would block each other
|
||||
// and actually be slower than just a single decompression thread.
|
||||
|
||||
var rawStreams = new Stream?[_table.Chunks.Count];
|
||||
for (int i = 0; i < rawStreams.Length; i++)
|
||||
{
|
||||
var chunk = _table.Chunks[i];
|
||||
if (IsChunkValid(chunk))
|
||||
{
|
||||
if ((chunk.Type == BlkxChunkType.Zero) || (chunk.Type == BlkxChunkType.Ignore))
|
||||
{
|
||||
rawStreams[i] = new ConstantStream(0, (long)chunk.UncompressedLength);
|
||||
}
|
||||
else
|
||||
{
|
||||
var subStream = new SeekableSubStream(_baseStream,
|
||||
(long)(_header.DataForkOffset + _table.DataOffset + chunk.CompressedOffset),
|
||||
(long)chunk.CompressedLength);
|
||||
|
||||
var memStream = new MemoryStream();
|
||||
subStream.CopyTo(memStream);
|
||||
memStream.Position = 0;
|
||||
rawStreams[i] = memStream;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
rawStreams[i] = null;
|
||||
}
|
||||
}
|
||||
|
||||
// Now we can decompress the chunks multithreaded
|
||||
|
||||
var streams = new Stream?[_table.Chunks.Count];
|
||||
Parallel.For(0, streams.Length, i =>
|
||||
{
|
||||
var rawStream = rawStreams[i];
|
||||
if (rawStream is not null)
|
||||
{
|
||||
var chunk = _table.Chunks[i];
|
||||
if ((chunk.Type == BlkxChunkType.Zero)
|
||||
|| (chunk.Type == BlkxChunkType.Ignore)
|
||||
|| (chunk.Type == BlkxChunkType.Uncompressed))
|
||||
{
|
||||
streams[i] = rawStream;
|
||||
}
|
||||
else
|
||||
{
|
||||
Stream compStream = chunk.Type switch
|
||||
{
|
||||
BlkxChunkType.AdcCompressed => new ADCStream(rawStream, CompressionMode.Decompress),
|
||||
BlkxChunkType.ZlibCompressed => new ZlibStream(rawStream, CompressionMode.Decompress),
|
||||
BlkxChunkType.Bz2Compressed => new BZip2Stream(rawStream, CompressionMode.Decompress, false),
|
||||
_ => throw new InvalidOperationException("Invalid chunk type")
|
||||
};
|
||||
|
||||
var memStream = new MemoryStream();
|
||||
compStream.CopyTo(memStream);
|
||||
compStream.Dispose();
|
||||
|
||||
memStream.Position = 0;
|
||||
streams[i] = memStream;
|
||||
}
|
||||
|
||||
rawStream.Dispose();
|
||||
rawStreams[i] = null;
|
||||
}
|
||||
else
|
||||
{
|
||||
streams[i] = null;
|
||||
}
|
||||
});
|
||||
|
||||
return new CompositeStream((IEnumerable<Stream>)streams.Where(s => s is not null));
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_isEnded) return 0;
|
||||
|
||||
int readCount = _chunkStream!.Read(buffer, offset, count);
|
||||
_chunkPos += readCount;
|
||||
|
||||
while (readCount < count)
|
||||
{
|
||||
// Current chunk has ended, so we have to continue reading from the next chunk.
|
||||
|
||||
_chunkIndex = GetNextChunk();
|
||||
if (_chunkIndex < 0)
|
||||
{
|
||||
// We have reached the last chunk
|
||||
|
||||
_isEnded = true;
|
||||
_chunkPos = 0;
|
||||
_position += readCount;
|
||||
return readCount;
|
||||
}
|
||||
|
||||
_chunkStream = GetChunkStream();
|
||||
int rc = _chunkStream.Read(buffer, offset + readCount, count - readCount);
|
||||
_chunkPos = rc;
|
||||
readCount += rc;
|
||||
}
|
||||
|
||||
_position += readCount;
|
||||
return readCount;
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{ }
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
=> throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
=> throw new NotSupportedException();
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{ }
|
||||
}
|
||||
}
|
||||
52
src/SharpCompress/Common/Dmg/DmgEntry.cs
Normal file
52
src/SharpCompress/Common/Dmg/DmgEntry.cs
Normal file
@@ -0,0 +1,52 @@
|
||||
using SharpCompress.Common.Dmg.HFS;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
public abstract class DmgEntry : Entry
|
||||
{
|
||||
public override string Key { get; }
|
||||
public override bool IsDirectory { get; }
|
||||
public override long Size { get; }
|
||||
public override long CompressedSize { get; }
|
||||
public override CompressionType CompressionType { get; }
|
||||
public override DateTime? LastModifiedTime { get; }
|
||||
public override DateTime? CreatedTime { get; }
|
||||
public override DateTime? LastAccessedTime { get; }
|
||||
public override DateTime? ArchivedTime { get; }
|
||||
|
||||
public override long Crc { get; } = 0; // Not stored
|
||||
public override string? LinkTarget { get; } = null;
|
||||
public override bool IsEncrypted { get; } = false;
|
||||
public override bool IsSplitAfter { get; } = false;
|
||||
|
||||
internal override IEnumerable<FilePart> Parts { get; }
|
||||
|
||||
internal DmgEntry(HFSCatalogRecord record, string path, long size, DmgFilePart part)
|
||||
{
|
||||
Key = path;
|
||||
IsDirectory = record.Type == HFSCatalogRecordType.Folder;
|
||||
Size = CompressedSize = size; // There is no way to get the actual compressed size or the compression type of
|
||||
CompressionType = CompressionType.Unknown; // a file in a DMG archive since the files are nested inside the HFS partition.
|
||||
Parts = part.AsEnumerable();
|
||||
|
||||
if (IsDirectory)
|
||||
{
|
||||
var folder = (HFSCatalogFolder)record;
|
||||
LastModifiedTime = (folder.AttributeModDate > folder.ContentModDate) ? folder.AttributeModDate : folder.ContentModDate;
|
||||
CreatedTime = folder.CreateDate;
|
||||
LastAccessedTime = folder.AccessDate;
|
||||
ArchivedTime = folder.BackupDate;
|
||||
}
|
||||
else
|
||||
{
|
||||
var file = (HFSCatalogFile)record;
|
||||
LastModifiedTime = (file.AttributeModDate > file.ContentModDate) ? file.AttributeModDate : file.ContentModDate;
|
||||
CreatedTime = file.CreateDate;
|
||||
LastAccessedTime = file.AccessDate;
|
||||
ArchivedTime = file.BackupDate;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
21
src/SharpCompress/Common/Dmg/DmgFilePart.cs
Normal file
21
src/SharpCompress/Common/Dmg/DmgFilePart.cs
Normal file
@@ -0,0 +1,21 @@
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
internal sealed class DmgFilePart : FilePart
|
||||
{
|
||||
private readonly Stream _stream;
|
||||
|
||||
internal override string FilePartName { get; }
|
||||
|
||||
public DmgFilePart(Stream stream, string fileName)
|
||||
: base(new ArchiveEncoding())
|
||||
{
|
||||
_stream = stream;
|
||||
FilePartName = fileName;
|
||||
}
|
||||
|
||||
internal override Stream GetCompressedStream() => _stream;
|
||||
internal override Stream? GetRawStream() => null;
|
||||
}
|
||||
}
|
||||
183
src/SharpCompress/Common/Dmg/DmgUtil.cs
Normal file
183
src/SharpCompress/Common/Dmg/DmgUtil.cs
Normal file
@@ -0,0 +1,183 @@
|
||||
using SharpCompress.Common.Dmg.Headers;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Globalization;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using System.Xml.Linq;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
internal static class DmgUtil
|
||||
{
|
||||
private const string MalformedXmlMessage = "Malformed XML block";
|
||||
|
||||
private static T[] ParseArray<T>(in XElement parent, in Func<XElement, T> parseElement)
|
||||
{
|
||||
var list = new List<T>();
|
||||
|
||||
foreach (var node in parent.Elements())
|
||||
list.Add(parseElement(node));
|
||||
|
||||
return list.ToArray();
|
||||
}
|
||||
|
||||
private static Dictionary<string, T> ParseDict<T>(in XElement parent, in Func<XElement, T> parseValue)
|
||||
{
|
||||
var dict = new Dictionary<string, T>();
|
||||
|
||||
string? key = null;
|
||||
foreach (var node in parent.Elements())
|
||||
{
|
||||
if (string.Equals(node.Name.LocalName, "key", StringComparison.Ordinal))
|
||||
{
|
||||
key = node.Value;
|
||||
}
|
||||
else if (key is not null)
|
||||
{
|
||||
var value = parseValue(node);
|
||||
dict.Add(key, value);
|
||||
key = null;
|
||||
}
|
||||
}
|
||||
|
||||
return dict;
|
||||
}
|
||||
|
||||
private static Dictionary<string, Dictionary<string, Dictionary<string, string>[]>> ParsePList(in XDocument doc)
|
||||
{
|
||||
var dictNode = doc.Root?.Element("dict");
|
||||
if (dictNode is null) throw new InvalidFormatException(MalformedXmlMessage);
|
||||
|
||||
static Dictionary<string, string> ParseObject(XElement parent)
|
||||
=> ParseDict(parent, node => node.Value);
|
||||
|
||||
static Dictionary<string, string>[] ParseObjectArray(XElement parent)
|
||||
=> ParseArray(parent, ParseObject);
|
||||
|
||||
static Dictionary<string, Dictionary<string, string>[]> ParseSubDict(XElement parent)
|
||||
=> ParseDict(parent, ParseObjectArray);
|
||||
|
||||
return ParseDict(dictNode, ParseSubDict);
|
||||
}
|
||||
|
||||
private static BlkxData CreateDataFromDict(in Dictionary<string, string> dict)
|
||||
{
|
||||
static bool TryParseHex(string? s, out uint value)
|
||||
{
|
||||
value = 0;
|
||||
if (string.IsNullOrEmpty(s)) return false;
|
||||
|
||||
if (s!.StartsWith("0x", StringComparison.OrdinalIgnoreCase))
|
||||
s = s.Substring(2);
|
||||
|
||||
return uint.TryParse(s, NumberStyles.HexNumber, CultureInfo.InvariantCulture, out value);
|
||||
}
|
||||
|
||||
if (!dict.TryGetValue("ID", out string? idStr) || !int.TryParse(idStr, out int id))
|
||||
throw new InvalidFormatException(MalformedXmlMessage);
|
||||
if (!dict.TryGetValue("Name", out string? name))
|
||||
throw new InvalidFormatException(MalformedXmlMessage);
|
||||
if (!dict.TryGetValue("Attributes", out string? attribStr) || !TryParseHex(attribStr, out uint attribs))
|
||||
throw new InvalidFormatException(MalformedXmlMessage);
|
||||
if (!dict.TryGetValue("Data", out string? base64Data) || string.IsNullOrEmpty(base64Data))
|
||||
throw new InvalidFormatException(MalformedXmlMessage);
|
||||
|
||||
try
|
||||
{
|
||||
var data = Convert.FromBase64String(base64Data);
|
||||
if (!BlkxTable.TryRead(data, out var table))
|
||||
throw new InvalidFormatException("Invalid BLKX table");
|
||||
|
||||
return new BlkxData(id, name, attribs, table!);
|
||||
}
|
||||
catch (FormatException ex)
|
||||
{
|
||||
throw new InvalidFormatException(MalformedXmlMessage, ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static DmgBlockDataStream? LoadHFSPartitionStream(Stream baseStream, DmgHeader header)
|
||||
{
|
||||
if ((header.XMLOffset + header.XMLLength) >= (ulong)baseStream.Length)
|
||||
throw new IncompleteArchiveException("XML block incomplete");
|
||||
if ((header.DataForkOffset + header.DataForkLength) >= (ulong)baseStream.Length)
|
||||
throw new IncompleteArchiveException("Data block incomplete");
|
||||
|
||||
baseStream.Position = (long)header.XMLOffset;
|
||||
var xmlBuffer = new byte[header.XMLLength];
|
||||
baseStream.Read(xmlBuffer, 0, (int)header.XMLLength);
|
||||
var xml = Encoding.ASCII.GetString(xmlBuffer);
|
||||
|
||||
var doc = XDocument.Parse(xml);
|
||||
var pList = ParsePList(doc);
|
||||
if (!pList.TryGetValue("resource-fork", out var resDict) || !resDict.TryGetValue("blkx", out var blkxDicts))
|
||||
throw new InvalidFormatException(MalformedXmlMessage);
|
||||
|
||||
var objs = new BlkxData[blkxDicts.Length];
|
||||
for (int i = 0; i < objs.Length; i++)
|
||||
objs[i] = CreateDataFromDict(blkxDicts[i]);
|
||||
|
||||
// Index 0 is the protective MBR partition
|
||||
// Index 1 is the GPT header
|
||||
// Index 2 is the GPT partition table
|
||||
|
||||
try
|
||||
{
|
||||
var headerData = objs[1];
|
||||
using var headerStream = new DmgBlockDataStream(baseStream, header, headerData.Table);
|
||||
if (!GptHeader.TryRead(headerStream, out var gptHeader))
|
||||
throw new InvalidFormatException("Invalid GPT header");
|
||||
|
||||
var tableData = objs[2];
|
||||
using var tableStream = new DmgBlockDataStream(baseStream, header, tableData.Table);
|
||||
var gptTable = new GptPartitionEntry[gptHeader!.EntriesCount];
|
||||
for (int i = 0; i < gptHeader.EntriesCount; i++)
|
||||
gptTable[i] = GptPartitionEntry.Read(tableStream);
|
||||
|
||||
foreach (var entry in gptTable)
|
||||
{
|
||||
if (entry.TypeGuid == PartitionFormat.AppleHFS)
|
||||
{
|
||||
BlkxData? partitionData = null;
|
||||
for (int i = 3; i < objs.Length; i++)
|
||||
{
|
||||
if (objs[i].Name.StartsWith(entry.Name, StringComparison.Ordinal))
|
||||
{
|
||||
partitionData = objs[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (partitionData is null)
|
||||
throw new InvalidFormatException($"Missing partition {entry.Name}");
|
||||
|
||||
return new DmgBlockDataStream(baseStream, header, partitionData.Table);
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
catch (EndOfStreamException ex)
|
||||
{
|
||||
throw new IncompleteArchiveException("Partition incomplete", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private sealed class BlkxData
|
||||
{
|
||||
public int Id { get; }
|
||||
public string Name { get; }
|
||||
public uint Attributes { get; }
|
||||
public BlkxTable Table { get; }
|
||||
|
||||
public BlkxData(int id, string name, uint attributes, BlkxTable table)
|
||||
{
|
||||
Id = id;
|
||||
Name = name;
|
||||
Attributes = attributes;
|
||||
Table = table;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
38
src/SharpCompress/Common/Dmg/DmgVolume.cs
Normal file
38
src/SharpCompress/Common/Dmg/DmgVolume.cs
Normal file
@@ -0,0 +1,38 @@
|
||||
using SharpCompress.Archives.Dmg;
|
||||
using SharpCompress.Common.Dmg.Headers;
|
||||
using SharpCompress.Common.Dmg.HFS;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
public class DmgVolume : Volume
|
||||
{
|
||||
private readonly DmgArchive _archive;
|
||||
private readonly string _fileName;
|
||||
|
||||
internal DmgHeader Header { get; }
|
||||
|
||||
public DmgVolume(DmgArchive archive, Stream stream, string fileName, Readers.ReaderOptions readerOptions)
|
||||
: base(stream, readerOptions)
|
||||
{
|
||||
_archive = archive;
|
||||
_fileName = fileName;
|
||||
|
||||
long pos = stream.Length - DmgHeader.HeaderSize;
|
||||
if (pos < 0) throw new InvalidFormatException("Invalid DMG volume");
|
||||
stream.Position = pos;
|
||||
|
||||
if (DmgHeader.TryRead(stream, out var header)) Header = header!;
|
||||
else throw new InvalidFormatException("Invalid DMG volume");
|
||||
}
|
||||
|
||||
internal IEnumerable<DmgArchiveEntry> LoadEntries()
|
||||
{
|
||||
var partitionStream = DmgUtil.LoadHFSPartitionStream(Stream, Header);
|
||||
if (partitionStream is null) return Array.Empty<DmgArchiveEntry>();
|
||||
else return HFSUtil.LoadEntriesFromPartition(partitionStream, _fileName, _archive);
|
||||
}
|
||||
}
|
||||
}
|
||||
336
src/SharpCompress/Common/Dmg/HFS/HFSCatalogRecord.cs
Normal file
336
src/SharpCompress/Common/Dmg/HFS/HFSCatalogRecord.cs
Normal file
@@ -0,0 +1,336 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSCatalogKey : HFSStructBase, IEquatable<HFSCatalogKey>, IComparable<HFSCatalogKey>, IComparable
|
||||
{
|
||||
private readonly StringComparer _comparer;
|
||||
|
||||
public uint ParentId { get; }
|
||||
|
||||
public string Name { get; }
|
||||
|
||||
private static StringComparer GetComparer(HFSKeyCompareType compareType, bool isHFSX)
|
||||
{
|
||||
if (isHFSX)
|
||||
{
|
||||
return compareType switch
|
||||
{
|
||||
HFSKeyCompareType.CaseFolding => StringComparer.InvariantCultureIgnoreCase,
|
||||
HFSKeyCompareType.BinaryCompare => StringComparer.Ordinal,
|
||||
_ => StringComparer.InvariantCultureIgnoreCase
|
||||
};
|
||||
}
|
||||
else
|
||||
{
|
||||
return StringComparer.InvariantCultureIgnoreCase;
|
||||
}
|
||||
}
|
||||
|
||||
public HFSCatalogKey(uint parentId, string name, HFSKeyCompareType compareType, bool isHFSX)
|
||||
{
|
||||
ParentId = parentId;
|
||||
Name = name;
|
||||
_comparer = GetComparer(compareType, isHFSX);
|
||||
}
|
||||
|
||||
public HFSCatalogKey(byte[] key, HFSKeyCompareType compareType, bool isHFSX)
|
||||
{
|
||||
ReadOnlySpan<byte> data = key.AsSpan();
|
||||
ParentId = ReadUInt32(ref data);
|
||||
Name = ReadString(ref data, true);
|
||||
_comparer = GetComparer(compareType, isHFSX);
|
||||
}
|
||||
|
||||
public bool Equals(HFSCatalogKey? other)
|
||||
{
|
||||
if (other is null) return false;
|
||||
else return (ParentId == other.ParentId) && _comparer.Equals(Name, other.Name);
|
||||
}
|
||||
|
||||
public override bool Equals(object? obj)
|
||||
{
|
||||
if (obj is HFSCatalogKey other) return Equals(other);
|
||||
else return false;
|
||||
}
|
||||
|
||||
public int CompareTo(HFSCatalogKey? other)
|
||||
{
|
||||
if (other is null) return 1;
|
||||
|
||||
int result = ParentId.CompareTo(other.ParentId);
|
||||
if (result == 0) result = _comparer.Compare(Name, other.Name);
|
||||
return result;
|
||||
}
|
||||
|
||||
public int CompareTo(object? obj)
|
||||
{
|
||||
if (obj is null) return 1;
|
||||
else if (obj is HFSCatalogKey other) return CompareTo(other);
|
||||
else throw new ArgumentException("Object is not of type CatalogKey", nameof(obj));
|
||||
}
|
||||
|
||||
public override int GetHashCode()
|
||||
=> ParentId.GetHashCode() ^ _comparer.GetHashCode(Name);
|
||||
|
||||
public static bool operator ==(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return right is null;
|
||||
else return left.Equals(right);
|
||||
}
|
||||
|
||||
public static bool operator !=(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return right is not null;
|
||||
else return !left.Equals(right);
|
||||
}
|
||||
|
||||
public static bool operator <(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return right is not null;
|
||||
else return left.CompareTo(right) < 0;
|
||||
}
|
||||
|
||||
public static bool operator >(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return false;
|
||||
else return left.CompareTo(right) > 0;
|
||||
}
|
||||
|
||||
public static bool operator <=(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return true;
|
||||
else return left.CompareTo(right) <= 0;
|
||||
}
|
||||
|
||||
public static bool operator >=(HFSCatalogKey? left, HFSCatalogKey? right)
|
||||
{
|
||||
if (left is null) return right is null;
|
||||
else return left.CompareTo(right) >= 0;
|
||||
}
|
||||
}
|
||||
|
||||
internal enum HFSCatalogRecordType : ushort
|
||||
{
|
||||
Folder = 0x0001,
|
||||
File = 0x0002,
|
||||
FolderThread = 0x0003,
|
||||
FileThread = 0x0004
|
||||
}
|
||||
|
||||
internal abstract class HFSCatalogRecord : HFSStructBase
|
||||
{
|
||||
public HFSCatalogRecordType Type { get; }
|
||||
|
||||
protected HFSCatalogRecord(HFSCatalogRecordType type)
|
||||
=> Type = type;
|
||||
|
||||
public static bool TryRead(ref ReadOnlySpan<byte> data, HFSKeyCompareType compareType, bool isHFSX, out HFSCatalogRecord? record)
|
||||
{
|
||||
record = null;
|
||||
|
||||
ushort rawType = ReadUInt16(ref data);
|
||||
if (!Enum.IsDefined(typeof(HFSCatalogRecordType), rawType)) return false;
|
||||
|
||||
var type = (HFSCatalogRecordType)rawType;
|
||||
switch (type)
|
||||
{
|
||||
case HFSCatalogRecordType.Folder:
|
||||
record = HFSCatalogFolder.Read(ref data);
|
||||
return true;
|
||||
|
||||
case HFSCatalogRecordType.File:
|
||||
record = HFSCatalogFile.Read(ref data);
|
||||
return true;
|
||||
|
||||
case HFSCatalogRecordType.FolderThread:
|
||||
record = HFSCatalogThread.Read(ref data, false, compareType, isHFSX);
|
||||
return true;
|
||||
|
||||
case HFSCatalogRecordType.FileThread:
|
||||
record = HFSCatalogThread.Read(ref data, true, compareType, isHFSX);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSCatalogFolder : HFSCatalogRecord
|
||||
{
|
||||
public uint Valence { get; }
|
||||
public uint FolderId { get; }
|
||||
public DateTime CreateDate { get; }
|
||||
public DateTime ContentModDate { get; }
|
||||
public DateTime AttributeModDate { get; }
|
||||
public DateTime AccessDate { get; }
|
||||
public DateTime BackupDate { get; }
|
||||
public HFSPermissions Permissions { get; }
|
||||
public HFSFolderInfo Info { get; }
|
||||
public uint TextEncoding { get; }
|
||||
|
||||
private HFSCatalogFolder(
|
||||
uint valence,
|
||||
uint folderId,
|
||||
DateTime createDate,
|
||||
DateTime contentModDate,
|
||||
DateTime attributeModDate,
|
||||
DateTime accessDate,
|
||||
DateTime backupDate,
|
||||
HFSPermissions permissions,
|
||||
HFSFolderInfo info,
|
||||
uint textEncoding)
|
||||
: base(HFSCatalogRecordType.Folder)
|
||||
{
|
||||
Valence = valence;
|
||||
FolderId = folderId;
|
||||
CreateDate = createDate;
|
||||
ContentModDate = contentModDate;
|
||||
AttributeModDate = attributeModDate;
|
||||
AccessDate = accessDate;
|
||||
BackupDate = backupDate;
|
||||
Permissions = permissions;
|
||||
Info = info;
|
||||
TextEncoding = textEncoding;
|
||||
}
|
||||
|
||||
public static HFSCatalogFolder Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
_ = ReadUInt16(ref data); // reserved
|
||||
uint valence = ReadUInt32(ref data);
|
||||
uint folderId = ReadUInt32(ref data);
|
||||
var createDate = ReadDate(ref data);
|
||||
var contentModDate = ReadDate(ref data);
|
||||
var attributeModDate = ReadDate(ref data);
|
||||
var accessDate = ReadDate(ref data);
|
||||
var backupDate = ReadDate(ref data);
|
||||
var permissions = HFSPermissions.Read(ref data);
|
||||
var info = HFSFolderInfo.Read(ref data);
|
||||
uint textEncoding = ReadUInt32(ref data);
|
||||
_ = ReadUInt32(ref data); // reserved
|
||||
|
||||
return new HFSCatalogFolder(
|
||||
valence,
|
||||
folderId,
|
||||
createDate,
|
||||
contentModDate,
|
||||
attributeModDate,
|
||||
accessDate,
|
||||
backupDate,
|
||||
permissions,
|
||||
info,
|
||||
textEncoding);
|
||||
}
|
||||
}
|
||||
|
||||
internal enum HFSFileFlags : ushort
|
||||
{
|
||||
LockedBit = 0x0000,
|
||||
LockedMask = 0x0001,
|
||||
ThreadExistsBit = 0x0001,
|
||||
ThreadExistsMask = 0x0002
|
||||
}
|
||||
|
||||
internal sealed class HFSCatalogFile : HFSCatalogRecord
|
||||
{
|
||||
public HFSFileFlags Flags { get; }
|
||||
public uint FileId { get; }
|
||||
public DateTime CreateDate { get; }
|
||||
public DateTime ContentModDate { get; }
|
||||
public DateTime AttributeModDate { get; }
|
||||
public DateTime AccessDate { get; }
|
||||
public DateTime BackupDate { get; }
|
||||
public HFSPermissions Permissions { get; }
|
||||
public HFSFileInfo Info { get; }
|
||||
public uint TextEncoding { get; }
|
||||
|
||||
public HFSForkData DataFork { get; }
|
||||
public HFSForkData ResourceFork { get; }
|
||||
|
||||
private HFSCatalogFile(
|
||||
HFSFileFlags flags,
|
||||
uint fileId,
|
||||
DateTime createDate,
|
||||
DateTime contentModDate,
|
||||
DateTime attributeModDate,
|
||||
DateTime accessDate,
|
||||
DateTime backupDate,
|
||||
HFSPermissions permissions,
|
||||
HFSFileInfo info,
|
||||
uint textEncoding,
|
||||
HFSForkData dataFork,
|
||||
HFSForkData resourceFork)
|
||||
:base(HFSCatalogRecordType.File)
|
||||
{
|
||||
Flags = flags;
|
||||
FileId = fileId;
|
||||
CreateDate = createDate;
|
||||
ContentModDate = contentModDate;
|
||||
AttributeModDate = attributeModDate;
|
||||
AccessDate = accessDate;
|
||||
BackupDate = backupDate;
|
||||
Permissions = permissions;
|
||||
Info = info;
|
||||
TextEncoding = textEncoding;
|
||||
DataFork = dataFork;
|
||||
ResourceFork = resourceFork;
|
||||
}
|
||||
|
||||
public static HFSCatalogFile Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
var flags = (HFSFileFlags)ReadUInt16(ref data);
|
||||
_ = ReadUInt32(ref data); // reserved
|
||||
uint fileId = ReadUInt32(ref data);
|
||||
var createDate = ReadDate(ref data);
|
||||
var contentModDate = ReadDate(ref data);
|
||||
var attributeModDate = ReadDate(ref data);
|
||||
var accessDate = ReadDate(ref data);
|
||||
var backupDate = ReadDate(ref data);
|
||||
var permissions = HFSPermissions.Read(ref data);
|
||||
var info = HFSFileInfo.Read(ref data);
|
||||
uint textEncoding = ReadUInt32(ref data);
|
||||
_ = ReadUInt32(ref data); // reserved
|
||||
|
||||
var dataFork = HFSForkData.Read(ref data);
|
||||
var resourceFork = HFSForkData.Read(ref data);
|
||||
|
||||
return new HFSCatalogFile(
|
||||
flags,
|
||||
fileId,
|
||||
createDate,
|
||||
contentModDate,
|
||||
attributeModDate,
|
||||
accessDate,
|
||||
backupDate,
|
||||
permissions,
|
||||
info,
|
||||
textEncoding,
|
||||
dataFork,
|
||||
resourceFork);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSCatalogThread : HFSCatalogRecord
|
||||
{
|
||||
public uint ParentId { get; }
|
||||
public string NodeName { get; }
|
||||
public HFSCatalogKey CatalogKey { get; }
|
||||
|
||||
private HFSCatalogThread(uint parentId, string nodeName, bool isFile, HFSKeyCompareType compareType, bool isHFSX)
|
||||
: base(isFile ? HFSCatalogRecordType.FileThread : HFSCatalogRecordType.FolderThread)
|
||||
{
|
||||
ParentId = parentId;
|
||||
NodeName = nodeName;
|
||||
CatalogKey = new HFSCatalogKey(ParentId, NodeName, compareType, isHFSX);
|
||||
}
|
||||
|
||||
public static HFSCatalogThread Read(ref ReadOnlySpan<byte> data, bool isFile, HFSKeyCompareType compareType, bool isHFSX)
|
||||
{
|
||||
_ = ReadInt16(ref data); // reserved
|
||||
uint parentId = ReadUInt32(ref data);
|
||||
string nodeName = ReadString(ref data, true);
|
||||
|
||||
return new HFSCatalogThread(parentId, nodeName, isFile, compareType, isHFSX);
|
||||
}
|
||||
}
|
||||
}
|
||||
31
src/SharpCompress/Common/Dmg/HFS/HFSExtentDescriptor.cs
Normal file
31
src/SharpCompress/Common/Dmg/HFS/HFSExtentDescriptor.cs
Normal file
@@ -0,0 +1,31 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSExtentDescriptor : HFSStructBase
|
||||
{
|
||||
public uint StartBlock { get; }
|
||||
public uint BlockCount { get; }
|
||||
|
||||
private HFSExtentDescriptor(uint startBlock, uint blockCount)
|
||||
{
|
||||
StartBlock = startBlock;
|
||||
BlockCount = blockCount;
|
||||
}
|
||||
|
||||
public static HFSExtentDescriptor Read(Stream stream)
|
||||
{
|
||||
return new HFSExtentDescriptor(
|
||||
ReadUInt32(stream),
|
||||
ReadUInt32(stream));
|
||||
}
|
||||
|
||||
public static HFSExtentDescriptor Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
return new HFSExtentDescriptor(
|
||||
ReadUInt32(ref data),
|
||||
ReadUInt32(ref data));
|
||||
}
|
||||
}
|
||||
}
|
||||
115
src/SharpCompress/Common/Dmg/HFS/HFSExtentRecord.cs
Normal file
115
src/SharpCompress/Common/Dmg/HFS/HFSExtentRecord.cs
Normal file
@@ -0,0 +1,115 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSExtentKey : HFSStructBase, IEquatable<HFSExtentKey>, IComparable<HFSExtentKey>, IComparable
|
||||
{
|
||||
public byte ForkType { get; }
|
||||
public uint FileId { get; }
|
||||
public uint StartBlock { get; }
|
||||
|
||||
public HFSExtentKey(byte forkType, uint fileId, uint startBlock)
|
||||
{
|
||||
ForkType = forkType;
|
||||
FileId = fileId;
|
||||
StartBlock = startBlock;
|
||||
}
|
||||
|
||||
public HFSExtentKey(byte[] key)
|
||||
{
|
||||
ReadOnlySpan<byte> data = key.AsSpan();
|
||||
ForkType = ReadUInt8(ref data);
|
||||
_ = ReadUInt8(ref data); // padding
|
||||
FileId = ReadUInt32(ref data);
|
||||
StartBlock = ReadUInt32(ref data);
|
||||
}
|
||||
|
||||
public bool Equals(HFSExtentKey? other)
|
||||
{
|
||||
if (other is null) return false;
|
||||
else return (ForkType == other.ForkType) && (FileId == other.FileId) && (StartBlock == other.StartBlock);
|
||||
}
|
||||
|
||||
public override bool Equals(object? obj)
|
||||
{
|
||||
if (obj is HFSExtentKey other) return Equals(other);
|
||||
else return false;
|
||||
}
|
||||
|
||||
public int CompareTo(HFSExtentKey? other)
|
||||
{
|
||||
if (other is null) return 1;
|
||||
|
||||
int result = FileId.CompareTo(other.FileId);
|
||||
if (result == 0) result = ForkType.CompareTo(other.ForkType);
|
||||
if (result == 0) result = StartBlock.CompareTo(other.StartBlock);
|
||||
return result;
|
||||
}
|
||||
|
||||
public int CompareTo(object? obj)
|
||||
{
|
||||
if (obj is null) return 1;
|
||||
else if (obj is HFSExtentKey other) return CompareTo(other);
|
||||
else throw new ArgumentException("Object is not of type ExtentKey", nameof(obj));
|
||||
}
|
||||
|
||||
public override int GetHashCode()
|
||||
=> ForkType.GetHashCode() ^ FileId.GetHashCode() ^ StartBlock.GetHashCode();
|
||||
|
||||
public static bool operator ==(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return right is null;
|
||||
else return left.Equals(right);
|
||||
}
|
||||
|
||||
public static bool operator !=(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return right is not null;
|
||||
else return !left.Equals(right);
|
||||
}
|
||||
|
||||
public static bool operator <(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return right is not null;
|
||||
else return left.CompareTo(right) < 0;
|
||||
}
|
||||
|
||||
public static bool operator >(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return false;
|
||||
else return left.CompareTo(right) > 0;
|
||||
}
|
||||
|
||||
public static bool operator <=(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return true;
|
||||
else return left.CompareTo(right) <= 0;
|
||||
}
|
||||
|
||||
public static bool operator >=(HFSExtentKey? left, HFSExtentKey? right)
|
||||
{
|
||||
if (left is null) return right is null;
|
||||
else return left.CompareTo(right) >= 0;
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSExtentRecord : HFSStructBase
|
||||
{
|
||||
private const int ExtentCount = 8;
|
||||
|
||||
public IReadOnlyList<HFSExtentDescriptor> Extents { get; }
|
||||
|
||||
private HFSExtentRecord(IReadOnlyList<HFSExtentDescriptor> extents)
|
||||
=> Extents = extents;
|
||||
|
||||
public static HFSExtentRecord Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
var extents = new HFSExtentDescriptor[ExtentCount];
|
||||
for (int i = 0; i < ExtentCount; i++)
|
||||
extents[i] = HFSExtentDescriptor.Read(ref data);
|
||||
|
||||
return new HFSExtentRecord(extents);
|
||||
}
|
||||
}
|
||||
}
|
||||
145
src/SharpCompress/Common/Dmg/HFS/HFSFinderInfo.cs
Normal file
145
src/SharpCompress/Common/Dmg/HFS/HFSFinderInfo.cs
Normal file
@@ -0,0 +1,145 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal struct HFSPoint
|
||||
{
|
||||
public short V;
|
||||
public short H;
|
||||
}
|
||||
|
||||
internal struct HFSRect
|
||||
{
|
||||
public short Top;
|
||||
public short Left;
|
||||
public short Bottom;
|
||||
public short Right;
|
||||
}
|
||||
|
||||
[Flags]
|
||||
internal enum HFSFinderFlags : ushort
|
||||
{
|
||||
None = 0x0000,
|
||||
|
||||
IsOnDesk = 0x0001, /* Files and folders (System 6) */
|
||||
Color = 0x000E, /* Files and folders */
|
||||
IsShared = 0x0040, /* Files only (Applications only) If */
|
||||
/* clear, the application needs */
|
||||
/* to write to its resource fork, */
|
||||
/* and therefore cannot be shared */
|
||||
/* on a server */
|
||||
HasNoINITs = 0x0080, /* Files only (Extensions/Control */
|
||||
/* Panels only) */
|
||||
/* This file contains no INIT resource */
|
||||
HasBeenInited = 0x0100, /* Files only. Clear if the file */
|
||||
/* contains desktop database resources */
|
||||
/* ('BNDL', 'FREF', 'open', 'kind'...) */
|
||||
/* that have not been added yet. Set */
|
||||
/* only by the Finder. */
|
||||
/* Reserved for folders */
|
||||
HasCustomIcon = 0x0400, /* Files and folders */
|
||||
IsStationery = 0x0800, /* Files only */
|
||||
NameLocked = 0x1000, /* Files and folders */
|
||||
HasBundle = 0x2000, /* Files only */
|
||||
IsInvisible = 0x4000, /* Files and folders */
|
||||
IsAlias = 0x8000 /* Files only */
|
||||
}
|
||||
|
||||
[Flags]
|
||||
internal enum HFSExtendedFinderFlags : ushort
|
||||
{
|
||||
None = 0x0000,
|
||||
|
||||
ExtendedFlagsAreInvalid = 0x8000, /* The other extended flags */
|
||||
/* should be ignored */
|
||||
HasCustomBadge = 0x0100, /* The file or folder has a */
|
||||
/* badge resource */
|
||||
HasRoutingInfo = 0x0004 /* The file contains routing */
|
||||
/* info resource */
|
||||
}
|
||||
|
||||
internal sealed class HFSFileInfo : HFSStructBase
|
||||
{
|
||||
public string FileType { get; } /* The type of the file */
|
||||
public string FileCreator { get; } /* The file's creator */
|
||||
public HFSFinderFlags FinderFlags { get; }
|
||||
public HFSPoint Location { get; } /* File's location in the folder. */
|
||||
public HFSExtendedFinderFlags ExtendedFinderFlags { get; }
|
||||
public int PutAwayFolderId { get; }
|
||||
|
||||
private HFSFileInfo(
|
||||
string fileType,
|
||||
string fileCreator,
|
||||
HFSFinderFlags finderFlags,
|
||||
HFSPoint location,
|
||||
HFSExtendedFinderFlags extendedFinderFlags,
|
||||
int putAwayFolderId)
|
||||
{
|
||||
FileType = fileType;
|
||||
FileCreator = fileCreator;
|
||||
FinderFlags = finderFlags;
|
||||
Location = location;
|
||||
ExtendedFinderFlags = extendedFinderFlags;
|
||||
PutAwayFolderId = putAwayFolderId;
|
||||
}
|
||||
|
||||
public static HFSFileInfo Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
string fileType = ReadOSType(ref data);
|
||||
string fileCreator = ReadOSType(ref data);
|
||||
var finderFlags = (HFSFinderFlags)ReadUInt16(ref data);
|
||||
var location = ReadPoint(ref data);
|
||||
_ = ReadUInt16(ref data); // reserved
|
||||
data = data.Slice(4 * sizeof(short)); // reserved
|
||||
var extendedFinderFlags = (HFSExtendedFinderFlags)ReadUInt16(ref data);
|
||||
_ = ReadInt16(ref data); // reserved
|
||||
int putAwayFolderId = ReadInt32(ref data);
|
||||
|
||||
return new HFSFileInfo(fileType, fileCreator, finderFlags, location, extendedFinderFlags, putAwayFolderId);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSFolderInfo : HFSStructBase
|
||||
{
|
||||
public HFSRect WindowBounds { get; } /* The position and dimension of the */
|
||||
/* folder's window */
|
||||
public HFSFinderFlags FinderFlags { get; }
|
||||
public HFSPoint Location { get; } /* Folder's location in the parent */
|
||||
/* folder. If set to {0, 0}, the Finder */
|
||||
/* will place the item automatically */
|
||||
public HFSPoint ScrollPosition { get; } /* Scroll position (for icon views) */
|
||||
public HFSExtendedFinderFlags ExtendedFinderFlags { get; }
|
||||
public int PutAwayFolderId { get; }
|
||||
|
||||
private HFSFolderInfo(
|
||||
HFSRect windowBounds,
|
||||
HFSFinderFlags finderFlags,
|
||||
HFSPoint location,
|
||||
HFSPoint scrollPosition,
|
||||
HFSExtendedFinderFlags extendedFinderFlags,
|
||||
int putAwayFolderId)
|
||||
{
|
||||
WindowBounds = windowBounds;
|
||||
FinderFlags = finderFlags;
|
||||
Location = location;
|
||||
ScrollPosition = scrollPosition;
|
||||
ExtendedFinderFlags = extendedFinderFlags;
|
||||
PutAwayFolderId = putAwayFolderId;
|
||||
}
|
||||
|
||||
public static HFSFolderInfo Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
var windowBounds = ReadRect(ref data);
|
||||
var finderFlags = (HFSFinderFlags)ReadUInt16(ref data);
|
||||
var location = ReadPoint(ref data);
|
||||
_ = ReadUInt16(ref data); // reserved
|
||||
var scrollPosition = ReadPoint(ref data);
|
||||
_ = ReadInt32(ref data); // reserved
|
||||
var extendedFinderFlags = (HFSExtendedFinderFlags)ReadUInt16(ref data);
|
||||
_ = ReadInt16(ref data); // reserved
|
||||
int putAwayFolderId = ReadInt32(ref data);
|
||||
|
||||
return new HFSFolderInfo(windowBounds, finderFlags, location, scrollPosition, extendedFinderFlags, putAwayFolderId);
|
||||
}
|
||||
}
|
||||
}
|
||||
50
src/SharpCompress/Common/Dmg/HFS/HFSForkData.cs
Normal file
50
src/SharpCompress/Common/Dmg/HFS/HFSForkData.cs
Normal file
@@ -0,0 +1,50 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSForkData : HFSStructBase
|
||||
{
|
||||
private const int ExtentCount = 8;
|
||||
|
||||
public ulong LogicalSize { get; }
|
||||
public uint ClumpSize { get; }
|
||||
public uint TotalBlocks { get; }
|
||||
public IReadOnlyList<HFSExtentDescriptor> Extents { get; }
|
||||
|
||||
private HFSForkData(ulong logicalSize, uint clumpSize, uint totalBlocks, IReadOnlyList<HFSExtentDescriptor> extents)
|
||||
{
|
||||
LogicalSize = logicalSize;
|
||||
ClumpSize = clumpSize;
|
||||
TotalBlocks = totalBlocks;
|
||||
Extents = extents;
|
||||
}
|
||||
|
||||
public static HFSForkData Read(Stream stream)
|
||||
{
|
||||
ulong logicalSize = ReadUInt64(stream);
|
||||
uint clumpSize = ReadUInt32(stream);
|
||||
uint totalBlocks = ReadUInt32(stream);
|
||||
|
||||
var extents = new HFSExtentDescriptor[ExtentCount];
|
||||
for (int i = 0; i < ExtentCount; i++)
|
||||
extents[i] = HFSExtentDescriptor.Read(stream);
|
||||
|
||||
return new HFSForkData(logicalSize, clumpSize, totalBlocks, extents);
|
||||
}
|
||||
|
||||
public static HFSForkData Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
ulong logicalSize = ReadUInt64(ref data);
|
||||
uint clumpSize = ReadUInt32(ref data);
|
||||
uint totalBlocks = ReadUInt32(ref data);
|
||||
|
||||
var extents = new HFSExtentDescriptor[ExtentCount];
|
||||
for (int i = 0; i < ExtentCount; i++)
|
||||
extents[i] = HFSExtentDescriptor.Read(ref data);
|
||||
|
||||
return new HFSForkData(logicalSize, clumpSize, totalBlocks, extents);
|
||||
}
|
||||
}
|
||||
}
|
||||
196
src/SharpCompress/Common/Dmg/HFS/HFSForkStream.cs
Normal file
196
src/SharpCompress/Common/Dmg/HFS/HFSForkStream.cs
Normal file
@@ -0,0 +1,196 @@
|
||||
using SharpCompress.IO;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSForkStream : Stream
|
||||
{
|
||||
private readonly Stream _baseStream;
|
||||
private readonly HFSVolumeHeader _volumeHeader;
|
||||
private readonly IReadOnlyList<HFSExtentDescriptor> _extents;
|
||||
private long _position;
|
||||
private bool _isEnded;
|
||||
private int _extentIndex;
|
||||
private Stream? _extentStream;
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanWrite => false;
|
||||
public override bool CanSeek => true;
|
||||
public override long Length { get; }
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _position;
|
||||
set
|
||||
{
|
||||
if ((value < 0) || (value > Length)) throw new ArgumentOutOfRangeException(nameof(value));
|
||||
|
||||
if (value == Length)
|
||||
{
|
||||
// End of the stream
|
||||
|
||||
_position = Length;
|
||||
_isEnded = true;
|
||||
_extentIndex = -1;
|
||||
_extentStream = null;
|
||||
}
|
||||
else if (value != _position)
|
||||
{
|
||||
_position = value;
|
||||
|
||||
// We first have to determine in which extent we are now, then we seek to the exact position in that extent.
|
||||
|
||||
long offsetInExtent = _position;
|
||||
for (int i = 0; i < _extents.Count; i++)
|
||||
{
|
||||
var extent = _extents[i];
|
||||
long extentSize = extent.BlockCount * _volumeHeader.BlockSize;
|
||||
if (extentSize < offsetInExtent)
|
||||
{
|
||||
if (i == _extentIndex)
|
||||
{
|
||||
// We are in the same extent so just seek to the correct position
|
||||
_extentStream!.Position = offsetInExtent;
|
||||
}
|
||||
else
|
||||
{
|
||||
_extentIndex = i;
|
||||
_extentStream = GetExtentStream();
|
||||
_extentStream.Position = offsetInExtent;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
offsetInExtent -= extentSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public HFSForkStream(Stream baseStream, HFSVolumeHeader volumeHeader, HFSForkData forkData)
|
||||
{
|
||||
_baseStream = baseStream;
|
||||
_volumeHeader = volumeHeader;
|
||||
_extents = forkData.Extents;
|
||||
Length = (long)forkData.LogicalSize;
|
||||
|
||||
_position = 0;
|
||||
_extentIndex = -1;
|
||||
_extentIndex = GetNextExtent();
|
||||
_isEnded = _extentIndex < 0;
|
||||
if (!_isEnded) _extentStream = GetExtentStream();
|
||||
}
|
||||
|
||||
public HFSForkStream(
|
||||
Stream baseStream, HFSVolumeHeader volumeHeader, HFSForkData forkData, uint fileId,
|
||||
IReadOnlyDictionary<HFSExtentKey, HFSExtentRecord> extents)
|
||||
{
|
||||
_baseStream = baseStream;
|
||||
_volumeHeader = volumeHeader;
|
||||
Length = (long)forkData.LogicalSize;
|
||||
|
||||
uint blocks = (uint)forkData.Extents.Sum(e => e.BlockCount);
|
||||
var totalExtents = new List<HFSExtentDescriptor>(forkData.Extents);
|
||||
_extents = totalExtents;
|
||||
|
||||
var nextKey = new HFSExtentKey(0, fileId, blocks);
|
||||
while (extents.TryGetValue(nextKey, out var record))
|
||||
{
|
||||
blocks += (uint)record.Extents.Sum(e => e.BlockCount);
|
||||
totalExtents.AddRange(record.Extents);
|
||||
|
||||
nextKey = new HFSExtentKey(0, fileId, blocks);
|
||||
}
|
||||
|
||||
_position = 0;
|
||||
_extentIndex = -1;
|
||||
_extentIndex = GetNextExtent();
|
||||
_isEnded = _extentIndex < 0;
|
||||
if (!_isEnded) _extentStream = GetExtentStream();
|
||||
}
|
||||
|
||||
private int GetNextExtent()
|
||||
{
|
||||
int index = _extentIndex + 1;
|
||||
if (index >= _extents.Count) return -1;
|
||||
|
||||
var extent = _extents[index];
|
||||
if ((extent.StartBlock == 0) && (extent.BlockCount == 0)) return -1;
|
||||
return index;
|
||||
}
|
||||
|
||||
private Stream GetExtentStream()
|
||||
{
|
||||
if (_extentIndex < 0)
|
||||
throw new InvalidOperationException("Invalid extent index");
|
||||
|
||||
var extent = _extents[_extentIndex];
|
||||
return new HFSExtentStream(_baseStream, _volumeHeader, extent);
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{ }
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_isEnded) return 0;
|
||||
|
||||
count = (int)Math.Min(count, Length - Position);
|
||||
int readCount = _extentStream!.Read(buffer, offset, count);
|
||||
while (readCount < count)
|
||||
{
|
||||
_extentIndex = GetNextExtent();
|
||||
if (_extentIndex < 0)
|
||||
{
|
||||
_isEnded = true;
|
||||
return readCount;
|
||||
}
|
||||
|
||||
_extentStream = GetExtentStream();
|
||||
readCount += _extentStream.Read(buffer, offset + readCount, count - readCount);
|
||||
}
|
||||
|
||||
_position += readCount;
|
||||
return readCount;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
=> throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
=> throw new NotSupportedException();
|
||||
|
||||
private sealed class HFSExtentStream : SeekableSubStream
|
||||
{
|
||||
public HFSExtentStream(Stream stream, HFSVolumeHeader volumeHeader, HFSExtentDescriptor extent)
|
||||
: base(stream, (long)extent.StartBlock * volumeHeader.BlockSize, (long)extent.BlockCount * volumeHeader.BlockSize)
|
||||
{ }
|
||||
}
|
||||
}
|
||||
}
|
||||
91
src/SharpCompress/Common/Dmg/HFS/HFSKeyedRecord.cs
Normal file
91
src/SharpCompress/Common/Dmg/HFS/HFSKeyedRecord.cs
Normal file
@@ -0,0 +1,91 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal abstract class HFSKeyedRecord : HFSStructBase
|
||||
{
|
||||
private readonly HFSKeyCompareType _compareType;
|
||||
private readonly bool _isHFSX;
|
||||
private HFSCatalogKey? _catalogKey;
|
||||
private HFSExtentKey? _extentKey;
|
||||
|
||||
public byte[] Key { get; }
|
||||
|
||||
public HFSCatalogKey GetCatalogKey() => _catalogKey ??= new HFSCatalogKey(Key, _compareType, _isHFSX);
|
||||
|
||||
public HFSExtentKey GetExtentKey() => _extentKey ??= new HFSExtentKey(Key);
|
||||
|
||||
protected HFSKeyedRecord(byte[] key, HFSKeyCompareType compareType, bool isHFSX)
|
||||
{
|
||||
Key = key;
|
||||
_compareType = compareType;
|
||||
_isHFSX = isHFSX;
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSPointerRecord : HFSKeyedRecord
|
||||
{
|
||||
public uint NodeNumber { get; }
|
||||
|
||||
private HFSPointerRecord(byte[] key, uint nodeNumber, HFSKeyCompareType compareType, bool isHFSX)
|
||||
: base(key, compareType, isHFSX)
|
||||
{
|
||||
NodeNumber = nodeNumber;
|
||||
}
|
||||
|
||||
public static HFSPointerRecord Read(ref ReadOnlySpan<byte> data, HFSTreeHeaderRecord headerRecord, bool isHFSX)
|
||||
{
|
||||
bool isBigKey = headerRecord.Attributes.HasFlag(HFSTreeAttributes.BigKeys);
|
||||
ushort keyLength = isBigKey ? ReadUInt16(ref data) : ReadUInt8(ref data);
|
||||
if (!headerRecord.Attributes.HasFlag(HFSTreeAttributes.VariableIndexKeys)) keyLength = headerRecord.MaxKeyLength;
|
||||
int keySize = (isBigKey ? 2 : 1) + keyLength;
|
||||
|
||||
var key = new byte[keyLength];
|
||||
data.Slice(0, keyLength).CopyTo(key);
|
||||
data = data.Slice(keyLength);
|
||||
|
||||
// data is always aligned to 2 bytes
|
||||
if (keySize % 2 == 1) data = data.Slice(1);
|
||||
|
||||
uint nodeNumber = ReadUInt32(ref data);
|
||||
|
||||
return new HFSPointerRecord(key, nodeNumber, headerRecord.KeyCompareType, isHFSX);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSDataRecord : HFSKeyedRecord
|
||||
{
|
||||
public byte[] Data { get; }
|
||||
|
||||
private HFSDataRecord(byte[] key, byte[] data, HFSKeyCompareType compareType, bool isHFSX)
|
||||
: base(key, compareType, isHFSX)
|
||||
{
|
||||
Data = data;
|
||||
}
|
||||
|
||||
public static HFSDataRecord Read(ref ReadOnlySpan<byte> data, int size, HFSTreeHeaderRecord headerRecord, bool isHFSX)
|
||||
{
|
||||
bool isBigKey = headerRecord.Attributes.HasFlag(HFSTreeAttributes.BigKeys);
|
||||
ushort keyLength = isBigKey ? ReadUInt16(ref data) : ReadUInt8(ref data);
|
||||
int keySize = (isBigKey ? 2 : 1) + keyLength;
|
||||
size -= keySize;
|
||||
|
||||
var key = new byte[keyLength];
|
||||
data.Slice(0, keyLength).CopyTo(key);
|
||||
data = data.Slice(keyLength);
|
||||
|
||||
// data is always aligned to 2 bytes
|
||||
if (keySize % 2 == 1)
|
||||
{
|
||||
data = data.Slice(1);
|
||||
size--;
|
||||
}
|
||||
|
||||
var structData = new byte[size];
|
||||
data.Slice(0, size).CopyTo(structData);
|
||||
data = data.Slice(size);
|
||||
|
||||
return new HFSDataRecord(key, structData, headerRecord.KeyCompareType, isHFSX);
|
||||
}
|
||||
}
|
||||
}
|
||||
35
src/SharpCompress/Common/Dmg/HFS/HFSPermissions.cs
Normal file
35
src/SharpCompress/Common/Dmg/HFS/HFSPermissions.cs
Normal file
@@ -0,0 +1,35 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSPermissions : HFSStructBase
|
||||
{
|
||||
public uint OwnerID { get; }
|
||||
public uint GroupID { get; }
|
||||
public byte AdminFlags { get; }
|
||||
public byte OwnerFlags { get; }
|
||||
public ushort FileMode { get; }
|
||||
public uint Special { get; }
|
||||
|
||||
private HFSPermissions(uint ownerID, uint groupID, byte adminFlags, byte ownerFlags, ushort fileMode, uint special)
|
||||
{
|
||||
OwnerID = ownerID;
|
||||
GroupID = groupID;
|
||||
AdminFlags = adminFlags;
|
||||
OwnerFlags = ownerFlags;
|
||||
FileMode = fileMode;
|
||||
Special = special;
|
||||
}
|
||||
|
||||
public static HFSPermissions Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
return new HFSPermissions(
|
||||
ReadUInt32(ref data),
|
||||
ReadUInt32(ref data),
|
||||
ReadUInt8(ref data),
|
||||
ReadUInt8(ref data),
|
||||
ReadUInt16(ref data),
|
||||
ReadUInt32(ref data));
|
||||
}
|
||||
}
|
||||
}
|
||||
187
src/SharpCompress/Common/Dmg/HFS/HFSStructBase.cs
Normal file
187
src/SharpCompress/Common/Dmg/HFS/HFSStructBase.cs
Normal file
@@ -0,0 +1,187 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal abstract class HFSStructBase
|
||||
{
|
||||
private const int StringSize = 510;
|
||||
private const int OSTypeSize = 4;
|
||||
private static readonly DateTime Epoch = new DateTime(1904, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
private static readonly byte[] _buffer = new byte[StringSize];
|
||||
|
||||
protected static byte ReadUInt8(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(byte)) != sizeof(byte))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return _buffer[0];
|
||||
}
|
||||
|
||||
protected static ushort ReadUInt16(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(ushort)) != sizeof(ushort))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt16BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static short ReadInt16(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(short)) != sizeof(short))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadInt16BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static uint ReadUInt32(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(uint)) != sizeof(uint))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt32BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static int ReadInt32(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(int)) != sizeof(int))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadInt32BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static ulong ReadUInt64(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(ulong)) != sizeof(ulong))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt64BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static long ReadInt64(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(long)) != sizeof(long))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadInt64BigEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static string ReadString(Stream stream)
|
||||
{
|
||||
ushort length = ReadUInt16(stream);
|
||||
if (stream.Read(_buffer, 0, StringSize) != StringSize)
|
||||
throw new EndOfStreamException();
|
||||
return Encoding.Unicode.GetString(_buffer, 0, Math.Min(length * 2, StringSize));
|
||||
}
|
||||
|
||||
protected static DateTime ReadDate(Stream stream)
|
||||
{
|
||||
uint seconds = ReadUInt32(stream);
|
||||
var span = TimeSpan.FromSeconds(seconds);
|
||||
return Epoch + span;
|
||||
}
|
||||
|
||||
protected static byte ReadUInt8(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
byte val = data[0];
|
||||
data = data.Slice(sizeof(byte));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static ushort ReadUInt16(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
ushort val = BinaryPrimitives.ReadUInt16BigEndian(data);
|
||||
data = data.Slice(sizeof(ushort));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static short ReadInt16(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
short val = BinaryPrimitives.ReadInt16BigEndian(data);
|
||||
data = data.Slice(sizeof(short));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static uint ReadUInt32(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
uint val = BinaryPrimitives.ReadUInt32BigEndian(data);
|
||||
data = data.Slice(sizeof(uint));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static int ReadInt32(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
int val = BinaryPrimitives.ReadInt32BigEndian(data);
|
||||
data = data.Slice(sizeof(int));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static ulong ReadUInt64(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
ulong val = BinaryPrimitives.ReadUInt64BigEndian(data);
|
||||
data = data.Slice(sizeof(ulong));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static long ReadInt64(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
long val = BinaryPrimitives.ReadInt64BigEndian(data);
|
||||
data = data.Slice(sizeof(long));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static string ReadString(ref ReadOnlySpan<byte> data, bool truncate)
|
||||
{
|
||||
int length = ReadUInt16(ref data);
|
||||
if (truncate)
|
||||
{
|
||||
length = Math.Min(length * 2, StringSize);
|
||||
data.Slice(0, length).CopyTo(_buffer);
|
||||
data = data.Slice(length);
|
||||
return Encoding.BigEndianUnicode.GetString(_buffer, 0, length);
|
||||
}
|
||||
else
|
||||
{
|
||||
data.Slice(0, StringSize).CopyTo(_buffer);
|
||||
data = data.Slice(StringSize);
|
||||
return Encoding.BigEndianUnicode.GetString(_buffer, 0, Math.Min(length * 2, StringSize));
|
||||
}
|
||||
}
|
||||
|
||||
protected static DateTime ReadDate(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
uint seconds = ReadUInt32(ref data);
|
||||
var span = TimeSpan.FromSeconds(seconds);
|
||||
return Epoch + span;
|
||||
}
|
||||
|
||||
protected static string ReadOSType(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
data.Slice(0, OSTypeSize).CopyTo(_buffer);
|
||||
data = data.Slice(OSTypeSize);
|
||||
return Encoding.ASCII.GetString(_buffer, 0, OSTypeSize).NullTerminate();
|
||||
}
|
||||
|
||||
protected static HFSPoint ReadPoint(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
return new HFSPoint()
|
||||
{
|
||||
V = ReadInt16(ref data),
|
||||
H = ReadInt16(ref data)
|
||||
};
|
||||
}
|
||||
|
||||
protected static HFSRect ReadRect(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
return new HFSRect()
|
||||
{
|
||||
Top = ReadInt16(ref data),
|
||||
Left = ReadInt16(ref data),
|
||||
Bottom = ReadInt16(ref data),
|
||||
Right = ReadInt16(ref data)
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
108
src/SharpCompress/Common/Dmg/HFS/HFSTreeHeaderRecord.cs
Normal file
108
src/SharpCompress/Common/Dmg/HFS/HFSTreeHeaderRecord.cs
Normal file
@@ -0,0 +1,108 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal enum HFSTreeType : byte
|
||||
{
|
||||
HFS = 0, // control file
|
||||
User = 128, // user btree type starts from 128
|
||||
Reserved = 255
|
||||
}
|
||||
|
||||
internal enum HFSKeyCompareType : byte
|
||||
{
|
||||
CaseFolding = 0xCF, // case-insensitive
|
||||
BinaryCompare = 0xBC // case-sensitive
|
||||
}
|
||||
|
||||
[Flags]
|
||||
internal enum HFSTreeAttributes : uint
|
||||
{
|
||||
None = 0x00000000,
|
||||
BadClose = 0x00000001,
|
||||
BigKeys = 0x00000002,
|
||||
VariableIndexKeys = 0x00000004
|
||||
}
|
||||
|
||||
internal sealed class HFSTreeHeaderRecord : HFSStructBase
|
||||
{
|
||||
public ushort TreeDepth;
|
||||
public uint RootNode;
|
||||
public uint LeafRecords;
|
||||
public uint FirstLeafNode;
|
||||
public uint LastLeafNode;
|
||||
public ushort NodeSize;
|
||||
public ushort MaxKeyLength;
|
||||
public uint TotalNodes;
|
||||
public uint FreeNodes;
|
||||
public uint ClumpSize;
|
||||
public HFSTreeType TreeType;
|
||||
public HFSKeyCompareType KeyCompareType;
|
||||
public HFSTreeAttributes Attributes;
|
||||
|
||||
private HFSTreeHeaderRecord(
|
||||
ushort treeDepth,
|
||||
uint rootNode,
|
||||
uint leafRecords,
|
||||
uint firstLeafNode,
|
||||
uint lastLeafNode,
|
||||
ushort nodeSize,
|
||||
ushort maxKeyLength,
|
||||
uint totalNodes,
|
||||
uint freeNodes,
|
||||
uint clumpSize,
|
||||
HFSTreeType treeType,
|
||||
HFSKeyCompareType keyCompareType,
|
||||
HFSTreeAttributes attributes)
|
||||
{
|
||||
TreeDepth = treeDepth;
|
||||
RootNode = rootNode;
|
||||
LeafRecords = leafRecords;
|
||||
FirstLeafNode = firstLeafNode;
|
||||
LastLeafNode = lastLeafNode;
|
||||
NodeSize = nodeSize;
|
||||
MaxKeyLength = maxKeyLength;
|
||||
TotalNodes = totalNodes;
|
||||
FreeNodes = freeNodes;
|
||||
ClumpSize = clumpSize;
|
||||
TreeType = treeType;
|
||||
KeyCompareType = keyCompareType;
|
||||
Attributes = attributes;
|
||||
}
|
||||
|
||||
public static HFSTreeHeaderRecord Read(Stream stream)
|
||||
{
|
||||
ushort treeDepth = ReadUInt16(stream);
|
||||
uint rootNode = ReadUInt32(stream);
|
||||
uint leafRecords = ReadUInt32(stream);
|
||||
uint firstLeafNode = ReadUInt32(stream);
|
||||
uint lastLeafNode = ReadUInt32(stream);
|
||||
ushort nodeSize = ReadUInt16(stream);
|
||||
ushort maxKeyLength = ReadUInt16(stream);
|
||||
uint totalNodes = ReadUInt32(stream);
|
||||
uint freeNodes = ReadUInt32(stream);
|
||||
_ = ReadUInt16(stream); // reserved
|
||||
uint clumpSize = ReadUInt32(stream);
|
||||
var treeType = (HFSTreeType)ReadUInt8(stream);
|
||||
var keyCompareType = (HFSKeyCompareType)ReadUInt8(stream);
|
||||
var attributes = (HFSTreeAttributes)ReadUInt32(stream);
|
||||
for (int i = 0; i < 16; i++) _ = ReadUInt32(stream); // reserved
|
||||
|
||||
return new HFSTreeHeaderRecord(
|
||||
treeDepth,
|
||||
rootNode,
|
||||
leafRecords,
|
||||
firstLeafNode,
|
||||
lastLeafNode,
|
||||
nodeSize,
|
||||
maxKeyLength,
|
||||
totalNodes,
|
||||
freeNodes,
|
||||
clumpSize,
|
||||
treeType,
|
||||
keyCompareType,
|
||||
attributes);
|
||||
}
|
||||
}
|
||||
}
|
||||
167
src/SharpCompress/Common/Dmg/HFS/HFSTreeNode.cs
Normal file
167
src/SharpCompress/Common/Dmg/HFS/HFSTreeNode.cs
Normal file
@@ -0,0 +1,167 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal abstract class HFSTreeNode : HFSStructBase
|
||||
{
|
||||
private static byte[]? _buffer = null;
|
||||
|
||||
public HFSTreeNodeDescriptor Descriptor { get; }
|
||||
|
||||
protected HFSTreeNode(HFSTreeNodeDescriptor descriptor)
|
||||
=> Descriptor = descriptor;
|
||||
|
||||
public static bool TryRead(Stream stream, HFSTreeHeaderRecord headerRecord, bool isHFSX, out HFSTreeNode? node)
|
||||
{
|
||||
node = null;
|
||||
|
||||
if (!HFSTreeNodeDescriptor.TryRead(stream, out var descriptor)) return false;
|
||||
|
||||
int size = (int)headerRecord.NodeSize - HFSTreeNodeDescriptor.Size;
|
||||
if ((_buffer is null) || (_buffer.Length < size))
|
||||
_buffer = new byte[size * 2];
|
||||
|
||||
if (stream.Read(_buffer, 0, size) != size)
|
||||
throw new EndOfStreamException();
|
||||
ReadOnlySpan<byte> data = _buffer.AsSpan(0, size);
|
||||
|
||||
switch (descriptor!.Kind)
|
||||
{
|
||||
case HFSTreeNodeKind.Leaf:
|
||||
node = HFSLeafTreeNode.Read(descriptor, data, headerRecord, isHFSX);
|
||||
return true;
|
||||
|
||||
case HFSTreeNodeKind.Index:
|
||||
node = HFSIndexTreeNode.Read(descriptor, data, headerRecord, isHFSX);
|
||||
return true;
|
||||
|
||||
case HFSTreeNodeKind.Map:
|
||||
node = HFSMapTreeNode.Read(descriptor, data);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSHeaderTreeNode : HFSTreeNode
|
||||
{
|
||||
private const int UserDataSize = 128;
|
||||
|
||||
public HFSTreeHeaderRecord HeaderRecord { get; }
|
||||
|
||||
public IReadOnlyList<byte> UserData { get; }
|
||||
|
||||
public IReadOnlyList<byte> Map { get; }
|
||||
|
||||
private HFSHeaderTreeNode(
|
||||
HFSTreeNodeDescriptor descriptor,
|
||||
HFSTreeHeaderRecord headerRecord,
|
||||
IReadOnlyList<byte> userData,
|
||||
IReadOnlyList<byte> map)
|
||||
: base(descriptor)
|
||||
{
|
||||
HeaderRecord = headerRecord;
|
||||
UserData = userData;
|
||||
Map = map;
|
||||
}
|
||||
|
||||
public static HFSHeaderTreeNode Read(HFSTreeNodeDescriptor descriptor, Stream stream)
|
||||
{
|
||||
if (descriptor.Kind != HFSTreeNodeKind.Header)
|
||||
throw new ArgumentException("Descriptor does not define a header node");
|
||||
|
||||
var headerRecord = HFSTreeHeaderRecord.Read(stream);
|
||||
var userData = new byte[UserDataSize];
|
||||
if (stream.Read(userData, 0, UserDataSize) != UserDataSize)
|
||||
throw new EndOfStreamException();
|
||||
|
||||
int mapSize = (int)(headerRecord.NodeSize - 256);
|
||||
var map = new byte[mapSize];
|
||||
if (stream.Read(map, 0, mapSize) != mapSize)
|
||||
throw new EndOfStreamException();
|
||||
|
||||
// offset values (not required for header node)
|
||||
_ = ReadUInt16(stream);
|
||||
_ = ReadUInt16(stream);
|
||||
_ = ReadUInt16(stream);
|
||||
_ = ReadUInt16(stream);
|
||||
|
||||
return new HFSHeaderTreeNode(descriptor, headerRecord, userData, map);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSMapTreeNode : HFSTreeNode
|
||||
{
|
||||
public IReadOnlyList<byte> Map { get; }
|
||||
|
||||
private HFSMapTreeNode(HFSTreeNodeDescriptor descriptor, IReadOnlyList<byte> map)
|
||||
: base(descriptor)
|
||||
{
|
||||
Map = map;
|
||||
}
|
||||
|
||||
public static HFSMapTreeNode Read(HFSTreeNodeDescriptor descriptor, ReadOnlySpan<byte> data)
|
||||
{
|
||||
int mapSize = data.Length - 6;
|
||||
var map = new byte[mapSize];
|
||||
data.Slice(0, mapSize).CopyTo(map);
|
||||
|
||||
return new HFSMapTreeNode(descriptor, map);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSIndexTreeNode : HFSTreeNode
|
||||
{
|
||||
public IReadOnlyList<HFSPointerRecord> Records { get; }
|
||||
|
||||
private HFSIndexTreeNode(HFSTreeNodeDescriptor descriptor, IReadOnlyList<HFSPointerRecord> records)
|
||||
: base(descriptor)
|
||||
{
|
||||
Records = records;
|
||||
}
|
||||
|
||||
public static HFSIndexTreeNode Read(HFSTreeNodeDescriptor descriptor, ReadOnlySpan<byte> data, HFSTreeHeaderRecord headerRecord, bool isHFSX)
|
||||
{
|
||||
int recordCount = descriptor.NumRecords;
|
||||
var records = new HFSPointerRecord[recordCount];
|
||||
for (int i = 0; i < recordCount; i++)
|
||||
records[i] = HFSPointerRecord.Read(ref data, headerRecord, isHFSX);
|
||||
return new HFSIndexTreeNode(descriptor, records);
|
||||
}
|
||||
}
|
||||
|
||||
internal sealed class HFSLeafTreeNode : HFSTreeNode
|
||||
{
|
||||
public IReadOnlyList<HFSDataRecord> Records { get; }
|
||||
|
||||
private HFSLeafTreeNode(HFSTreeNodeDescriptor descriptor, IReadOnlyList<HFSDataRecord> records)
|
||||
: base(descriptor)
|
||||
{
|
||||
Records = records;
|
||||
}
|
||||
|
||||
public static HFSLeafTreeNode Read(HFSTreeNodeDescriptor descriptor, ReadOnlySpan<byte> data, HFSTreeHeaderRecord headerRecord, bool isHFSX)
|
||||
{
|
||||
int recordCount = descriptor.NumRecords;
|
||||
var recordOffsets = new int[recordCount + 1];
|
||||
for (int i = 0; i < recordOffsets.Length; i++)
|
||||
{
|
||||
var offsetData = data.Slice(data.Length - (2 * i) - 2);
|
||||
ushort offset = ReadUInt16(ref offsetData);
|
||||
recordOffsets[i] = offset;
|
||||
}
|
||||
|
||||
var records = new HFSDataRecord[recordCount];
|
||||
for (int i = 0; i < recordCount; i++)
|
||||
{
|
||||
int size = recordOffsets[i + 1] - recordOffsets[i];
|
||||
records[i] = HFSDataRecord.Read(ref data, size, headerRecord, isHFSX);
|
||||
}
|
||||
|
||||
return new HFSLeafTreeNode(descriptor, records);
|
||||
}
|
||||
}
|
||||
}
|
||||
55
src/SharpCompress/Common/Dmg/HFS/HFSTreeNodeDescriptor.cs
Normal file
55
src/SharpCompress/Common/Dmg/HFS/HFSTreeNodeDescriptor.cs
Normal file
@@ -0,0 +1,55 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal enum HFSTreeNodeKind : sbyte
|
||||
{
|
||||
Leaf = -1,
|
||||
Index = 0,
|
||||
Header = 1,
|
||||
Map = 2
|
||||
}
|
||||
|
||||
internal sealed class HFSTreeNodeDescriptor : HFSStructBase
|
||||
{
|
||||
public const int Size = 14;
|
||||
|
||||
public uint FLink { get; }
|
||||
public uint BLink { get; }
|
||||
public HFSTreeNodeKind Kind { get; }
|
||||
public byte Height { get; }
|
||||
public ushort NumRecords { get; }
|
||||
|
||||
private HFSTreeNodeDescriptor(uint fLink, uint bLink, HFSTreeNodeKind kind, byte height, ushort numRecords)
|
||||
{
|
||||
FLink = fLink;
|
||||
BLink = bLink;
|
||||
Kind = kind;
|
||||
Height = height;
|
||||
NumRecords = numRecords;
|
||||
}
|
||||
|
||||
public static bool TryRead(Stream stream, out HFSTreeNodeDescriptor? descriptor)
|
||||
{
|
||||
descriptor = null;
|
||||
|
||||
uint fLink = ReadUInt32(stream);
|
||||
uint bLink = ReadUInt32(stream);
|
||||
|
||||
sbyte rawKind = (sbyte)ReadUInt8(stream);
|
||||
if (!Enum.IsDefined(typeof(HFSTreeNodeKind), rawKind)) return false;
|
||||
var kind = (HFSTreeNodeKind)rawKind;
|
||||
|
||||
byte height = ReadUInt8(stream);
|
||||
if (((kind == HFSTreeNodeKind.Header) || (kind == HFSTreeNodeKind.Map)) && (height != 0)) return false;
|
||||
if ((kind == HFSTreeNodeKind.Leaf) && (height != 1)) return false;
|
||||
|
||||
ushort numRecords = ReadUInt16(stream);
|
||||
_ = ReadUInt16(stream); // reserved
|
||||
|
||||
descriptor = new HFSTreeNodeDescriptor(fLink, bLink, kind, height, numRecords);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
206
src/SharpCompress/Common/Dmg/HFS/HFSUtil.cs
Normal file
206
src/SharpCompress/Common/Dmg/HFS/HFSUtil.cs
Normal file
@@ -0,0 +1,206 @@
|
||||
using SharpCompress.Archives.Dmg;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal static class HFSUtil
|
||||
{
|
||||
private const string CorruptHFSMessage = "Corrupt HFS volume";
|
||||
|
||||
private static (HFSHeaderTreeNode, IReadOnlyList<HFSTreeNode>) ReadTree(Stream stream, bool isHFSX)
|
||||
{
|
||||
if (!HFSTreeNodeDescriptor.TryRead(stream, out var headerDesc))
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
var header = HFSHeaderTreeNode.Read(headerDesc!, stream);
|
||||
|
||||
var nodes = new HFSTreeNode[header.HeaderRecord.TotalNodes];
|
||||
nodes[0] = header;
|
||||
|
||||
for (int i = 1; i < nodes.Length; i++)
|
||||
{
|
||||
if (!HFSTreeNode.TryRead(stream, header.HeaderRecord, isHFSX, out var node))
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
|
||||
nodes[i] = node!;
|
||||
}
|
||||
|
||||
return (header, nodes);
|
||||
}
|
||||
|
||||
private static void EnumerateExtentsTree(
|
||||
IReadOnlyList<HFSTreeNode> extentsTree,
|
||||
IDictionary<HFSExtentKey, HFSExtentRecord> records,
|
||||
int parentIndex)
|
||||
{
|
||||
var parent = extentsTree[parentIndex];
|
||||
if (parent is HFSLeafTreeNode leafNode)
|
||||
{
|
||||
foreach (var record in leafNode.Records)
|
||||
{
|
||||
ReadOnlySpan<byte> data = record.Data.AsSpan();
|
||||
var recordData = HFSExtentRecord.Read(ref data);
|
||||
var key = record.GetExtentKey();
|
||||
records.Add(key, recordData);
|
||||
}
|
||||
}
|
||||
else if (parent is HFSIndexTreeNode indexNode)
|
||||
{
|
||||
foreach (var record in indexNode.Records)
|
||||
EnumerateExtentsTree(extentsTree, records, (int)record.NodeNumber);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
}
|
||||
}
|
||||
|
||||
private static IReadOnlyDictionary<HFSExtentKey, HFSExtentRecord> LoadExtents(IReadOnlyList<HFSTreeNode> extentsTree, int rootIndex)
|
||||
{
|
||||
var records = new Dictionary<HFSExtentKey, HFSExtentRecord>();
|
||||
if (rootIndex == 0) return records;
|
||||
|
||||
EnumerateExtentsTree(extentsTree, records, rootIndex);
|
||||
return records;
|
||||
}
|
||||
|
||||
private static void EnumerateCatalogTree(
|
||||
HFSHeaderTreeNode catalogHeader,
|
||||
IReadOnlyList<HFSTreeNode> catalogTree,
|
||||
IDictionary<HFSCatalogKey, HFSCatalogRecord> records,
|
||||
IDictionary<uint, HFSCatalogThread> threads,
|
||||
int parentIndex,
|
||||
bool isHFSX)
|
||||
{
|
||||
var parent = catalogTree[parentIndex];
|
||||
if (parent is HFSLeafTreeNode leafNode)
|
||||
{
|
||||
foreach (var record in leafNode.Records)
|
||||
{
|
||||
ReadOnlySpan<byte> data = record.Data.AsSpan();
|
||||
if (HFSCatalogRecord.TryRead(ref data, catalogHeader.HeaderRecord.KeyCompareType, isHFSX, out var recordData))
|
||||
{
|
||||
var key = record.GetCatalogKey();
|
||||
if ((recordData!.Type == HFSCatalogRecordType.FileThread) || (recordData!.Type == HFSCatalogRecordType.FolderThread))
|
||||
{
|
||||
threads.Add(key.ParentId, (HFSCatalogThread)recordData);
|
||||
}
|
||||
else
|
||||
{
|
||||
records.Add(key, recordData);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (parent is HFSIndexTreeNode indexNode)
|
||||
{
|
||||
foreach (var record in indexNode.Records)
|
||||
EnumerateCatalogTree(catalogHeader, catalogTree, records, threads, (int)record.NodeNumber, isHFSX);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
}
|
||||
}
|
||||
|
||||
private static (HFSCatalogKey, HFSCatalogRecord) GetRecord(uint id, IDictionary<HFSCatalogKey, HFSCatalogRecord> records, IDictionary<uint, HFSCatalogThread> threads)
|
||||
{
|
||||
if (threads.TryGetValue(id, out var thread))
|
||||
{
|
||||
if (records.TryGetValue(thread.CatalogKey, out var record))
|
||||
return (thread.CatalogKey, record!);
|
||||
}
|
||||
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
}
|
||||
|
||||
private static string SanitizePath(string path)
|
||||
{
|
||||
var sb = new StringBuilder(path.Length);
|
||||
foreach (char c in path)
|
||||
{
|
||||
if (!char.IsControl(c))
|
||||
sb.Append(c);
|
||||
}
|
||||
return sb.ToString();
|
||||
}
|
||||
|
||||
private static string GetPath(HFSCatalogKey key, IDictionary<HFSCatalogKey, HFSCatalogRecord> records, IDictionary<uint, HFSCatalogThread> threads)
|
||||
{
|
||||
if (key.ParentId == 1)
|
||||
{
|
||||
return key.Name;
|
||||
}
|
||||
else
|
||||
{
|
||||
var (parentKey, _) = GetRecord(key.ParentId, records, threads);
|
||||
var path = Path.Combine(GetPath(parentKey, records, threads), key.Name);
|
||||
return SanitizePath(path);
|
||||
}
|
||||
}
|
||||
|
||||
private static IEnumerable<DmgArchiveEntry> LoadEntriesFromCatalogTree(
|
||||
Stream partitionStream,
|
||||
DmgFilePart filePart,
|
||||
HFSVolumeHeader volumeHeader,
|
||||
HFSHeaderTreeNode catalogHeader,
|
||||
IReadOnlyList<HFSTreeNode> catalogTree,
|
||||
IReadOnlyDictionary<HFSExtentKey, HFSExtentRecord> extents,
|
||||
DmgArchive archive,
|
||||
int rootIndex)
|
||||
{
|
||||
if (rootIndex == 0) return Array.Empty<DmgArchiveEntry>();
|
||||
|
||||
var records = new Dictionary<HFSCatalogKey, HFSCatalogRecord>();
|
||||
var threads = new Dictionary<uint, HFSCatalogThread>();
|
||||
EnumerateCatalogTree(catalogHeader, catalogTree, records, threads, rootIndex, volumeHeader.IsHFSX);
|
||||
|
||||
var entries = new List<DmgArchiveEntry>();
|
||||
foreach (var kvp in records)
|
||||
{
|
||||
var key = kvp.Key;
|
||||
var record = kvp.Value;
|
||||
|
||||
string path = GetPath(key, records, threads);
|
||||
var stream = (record is HFSCatalogFile file) ? new HFSForkStream(partitionStream, volumeHeader, file.DataFork, file.FileId, extents) : null;
|
||||
var entry = new DmgArchiveEntry(stream, archive, record, path, filePart);
|
||||
entries.Add(entry);
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
public static IEnumerable<DmgArchiveEntry> LoadEntriesFromPartition(Stream partitionStream, string fileName, DmgArchive archive)
|
||||
{
|
||||
if (!HFSVolumeHeader.TryRead(partitionStream, out var volumeHeader))
|
||||
throw new InvalidFormatException(CorruptHFSMessage);
|
||||
var filePart = new DmgFilePart(partitionStream, fileName);
|
||||
|
||||
var extentsFile = volumeHeader!.ExtentsFile;
|
||||
var extentsStream = new HFSForkStream(partitionStream, volumeHeader, extentsFile);
|
||||
var (extentsHeader, extentsTree) = ReadTree(extentsStream, volumeHeader.IsHFSX);
|
||||
|
||||
var extents = LoadExtents(extentsTree, (int)extentsHeader.HeaderRecord.RootNode);
|
||||
|
||||
var catalogFile = volumeHeader!.CatalogFile;
|
||||
var catalogStream = new HFSForkStream(partitionStream, volumeHeader, catalogFile);
|
||||
var (catalogHeader, catalogTree) = ReadTree(catalogStream, volumeHeader.IsHFSX);
|
||||
|
||||
return LoadEntriesFromCatalogTree(
|
||||
partitionStream,
|
||||
filePart,
|
||||
volumeHeader,
|
||||
catalogHeader,
|
||||
catalogTree,
|
||||
extents,
|
||||
archive,
|
||||
(int)catalogHeader.HeaderRecord.RootNode);
|
||||
}
|
||||
}
|
||||
}
|
||||
179
src/SharpCompress/Common/Dmg/HFS/HFSVolumeHeader.cs
Normal file
179
src/SharpCompress/Common/Dmg/HFS/HFSVolumeHeader.cs
Normal file
@@ -0,0 +1,179 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.HFS
|
||||
{
|
||||
internal sealed class HFSVolumeHeader : HFSStructBase
|
||||
{
|
||||
private const ushort SignaturePlus = 0x482B;
|
||||
private const ushort SignatureX = 0x4858;
|
||||
private const int FinderInfoCount = 8;
|
||||
|
||||
public bool IsHFSX { get; }
|
||||
public ushort Version { get; }
|
||||
public uint Attributes { get; }
|
||||
public uint LastMountedVersion { get; }
|
||||
public uint JournalInfoBlock { get; }
|
||||
|
||||
public DateTime CreateDate { get; }
|
||||
public DateTime ModifyDate { get; }
|
||||
public DateTime BackupDate { get; }
|
||||
public DateTime CheckedDate { get; }
|
||||
|
||||
public uint FileCount { get; }
|
||||
public uint FolderCount { get; }
|
||||
|
||||
public uint BlockSize { get; }
|
||||
public uint TotalBlocks { get; }
|
||||
public uint FreeBlocks { get; }
|
||||
|
||||
public uint NextAllocation { get; }
|
||||
public uint RsrcClumpSize { get; }
|
||||
public uint DataClumpSize { get; }
|
||||
public uint NextCatalogID { get; }
|
||||
|
||||
public uint WriteCount { get; }
|
||||
public ulong EncodingsBitmap { get; }
|
||||
|
||||
public IReadOnlyList<uint> FinderInfo { get; }
|
||||
|
||||
public HFSForkData AllocationFile { get; }
|
||||
public HFSForkData ExtentsFile { get; }
|
||||
public HFSForkData CatalogFile { get; }
|
||||
public HFSForkData AttributesFile { get; }
|
||||
public HFSForkData StartupFile { get; }
|
||||
|
||||
public HFSVolumeHeader(
|
||||
bool isHFSX,
|
||||
ushort version,
|
||||
uint attributes,
|
||||
uint lastMountedVersion,
|
||||
uint journalInfoBlock,
|
||||
DateTime createDate,
|
||||
DateTime modifyDate,
|
||||
DateTime backupDate,
|
||||
DateTime checkedDate,
|
||||
uint fileCount,
|
||||
uint folderCount,
|
||||
uint blockSize,
|
||||
uint totalBlocks,
|
||||
uint freeBlocks,
|
||||
uint nextAllocation,
|
||||
uint rsrcClumpSize,
|
||||
uint dataClumpSize,
|
||||
uint nextCatalogID,
|
||||
uint writeCount,
|
||||
ulong encodingsBitmap,
|
||||
IReadOnlyList<uint> finderInfo,
|
||||
HFSForkData allocationFile,
|
||||
HFSForkData extentsFile,
|
||||
HFSForkData catalogFile,
|
||||
HFSForkData attributesFile,
|
||||
HFSForkData startupFile)
|
||||
{
|
||||
IsHFSX = isHFSX;
|
||||
Version = version;
|
||||
Attributes = attributes;
|
||||
LastMountedVersion = lastMountedVersion;
|
||||
JournalInfoBlock = journalInfoBlock;
|
||||
CreateDate = createDate;
|
||||
ModifyDate = modifyDate;
|
||||
BackupDate = backupDate;
|
||||
CheckedDate = checkedDate;
|
||||
FileCount = fileCount;
|
||||
FolderCount = folderCount;
|
||||
BlockSize = blockSize;
|
||||
TotalBlocks = totalBlocks;
|
||||
FreeBlocks = freeBlocks;
|
||||
NextAllocation = nextAllocation;
|
||||
RsrcClumpSize = rsrcClumpSize;
|
||||
DataClumpSize = dataClumpSize;
|
||||
NextCatalogID = nextCatalogID;
|
||||
WriteCount = writeCount;
|
||||
EncodingsBitmap = encodingsBitmap;
|
||||
FinderInfo = finderInfo;
|
||||
AllocationFile = allocationFile;
|
||||
ExtentsFile = extentsFile;
|
||||
CatalogFile = catalogFile;
|
||||
AttributesFile = attributesFile;
|
||||
StartupFile = startupFile;
|
||||
}
|
||||
|
||||
private static IReadOnlyList<uint> ReadFinderInfo(Stream stream)
|
||||
{
|
||||
var finderInfo = new uint[FinderInfoCount];
|
||||
for (int i = 0; i < FinderInfoCount; i++)
|
||||
finderInfo[i] = ReadUInt32(stream);
|
||||
return finderInfo;
|
||||
}
|
||||
|
||||
public static bool TryRead(Stream stream, out HFSVolumeHeader? header)
|
||||
{
|
||||
header = null;
|
||||
stream.Skip(1024); // reserved bytes
|
||||
|
||||
bool isHFSX;
|
||||
ushort sig = ReadUInt16(stream);
|
||||
if (sig == SignaturePlus) isHFSX = false;
|
||||
else if (sig == SignatureX) isHFSX = true;
|
||||
else return false;
|
||||
|
||||
ushort version = ReadUInt16(stream);
|
||||
uint attributes = ReadUInt32(stream);
|
||||
uint lastMountedVersion = ReadUInt32(stream);
|
||||
uint journalInfoBlock = ReadUInt32(stream);
|
||||
DateTime createDate = ReadDate(stream);
|
||||
DateTime modifyDate = ReadDate(stream);
|
||||
DateTime backupDate = ReadDate(stream);
|
||||
DateTime checkedDate = ReadDate(stream);
|
||||
uint fileCount = ReadUInt32(stream);
|
||||
uint folderCount = ReadUInt32(stream);
|
||||
uint blockSize = ReadUInt32(stream);
|
||||
uint totalBlocks = ReadUInt32(stream);
|
||||
uint freeBlocks = ReadUInt32(stream);
|
||||
uint nextAllocation = ReadUInt32(stream);
|
||||
uint rsrcClumpSize = ReadUInt32(stream);
|
||||
uint dataClumpSize = ReadUInt32(stream);
|
||||
uint nextCatalogID = ReadUInt32(stream);
|
||||
uint writeCount = ReadUInt32(stream);
|
||||
ulong encodingsBitmap = ReadUInt64(stream);
|
||||
IReadOnlyList<uint> finderInfo = ReadFinderInfo(stream);
|
||||
HFSForkData allocationFile = HFSForkData.Read(stream);
|
||||
HFSForkData extentsFile = HFSForkData.Read(stream);
|
||||
HFSForkData catalogFile = HFSForkData.Read(stream);
|
||||
HFSForkData attributesFile = HFSForkData.Read(stream);
|
||||
HFSForkData startupFile = HFSForkData.Read(stream);
|
||||
|
||||
header = new HFSVolumeHeader(
|
||||
isHFSX,
|
||||
version,
|
||||
attributes,
|
||||
lastMountedVersion,
|
||||
journalInfoBlock,
|
||||
createDate,
|
||||
modifyDate,
|
||||
backupDate,
|
||||
checkedDate,
|
||||
fileCount,
|
||||
folderCount,
|
||||
blockSize,
|
||||
totalBlocks,
|
||||
freeBlocks,
|
||||
nextAllocation,
|
||||
rsrcClumpSize,
|
||||
dataClumpSize,
|
||||
nextCatalogID,
|
||||
writeCount,
|
||||
encodingsBitmap,
|
||||
finderInfo,
|
||||
allocationFile,
|
||||
extentsFile,
|
||||
catalogFile,
|
||||
attributesFile,
|
||||
startupFile);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
49
src/SharpCompress/Common/Dmg/Headers/BlkxChunk.cs
Normal file
49
src/SharpCompress/Common/Dmg/Headers/BlkxChunk.cs
Normal file
@@ -0,0 +1,49 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal enum BlkxChunkType : uint
|
||||
{
|
||||
Zero = 0x00000000u,
|
||||
Uncompressed = 0x00000001u,
|
||||
Ignore = 0x00000002u,
|
||||
AdcCompressed = 0x80000004u,
|
||||
ZlibCompressed = 0x80000005u,
|
||||
Bz2Compressed = 0x80000006u,
|
||||
Comment = 0x7FFFFFFEu,
|
||||
Last = 0xFFFFFFFFu,
|
||||
}
|
||||
|
||||
internal sealed class BlkxChunk : DmgStructBase
|
||||
{
|
||||
private const int SectorSize = 512;
|
||||
|
||||
public BlkxChunkType Type { get; } // Compression type used or chunk type
|
||||
public uint Comment { get; } // "+beg" or "+end", if EntryType is comment (0x7FFFFFFE). Else reserved.
|
||||
public ulong UncompressedOffset { get; } // Start sector of this chunk
|
||||
public ulong UncompressedLength { get; } // Number of sectors in this chunk
|
||||
public ulong CompressedOffset { get; } // Start of chunk in data fork
|
||||
public ulong CompressedLength { get; } // Count of bytes of chunk, in data fork
|
||||
|
||||
private BlkxChunk(BlkxChunkType type, uint comment, ulong sectorNumber, ulong sectorCount, ulong compressedOffset, ulong compressedLength)
|
||||
{
|
||||
Type = type;
|
||||
Comment = comment;
|
||||
UncompressedOffset = sectorNumber * SectorSize;
|
||||
UncompressedLength = sectorCount * SectorSize;
|
||||
CompressedOffset = compressedOffset;
|
||||
CompressedLength = compressedLength;
|
||||
}
|
||||
|
||||
public static bool TryRead(ref ReadOnlySpan<byte> data, out BlkxChunk? chunk)
|
||||
{
|
||||
chunk = null;
|
||||
|
||||
var type = (BlkxChunkType)ReadUInt32(ref data);
|
||||
if (!Enum.IsDefined(typeof(BlkxChunkType), type)) return false;
|
||||
|
||||
chunk = new BlkxChunk(type, ReadUInt32(ref data), ReadUInt64(ref data), ReadUInt64(ref data), ReadUInt64(ref data), ReadUInt64(ref data));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
75
src/SharpCompress/Common/Dmg/Headers/BlkxTable.cs
Normal file
75
src/SharpCompress/Common/Dmg/Headers/BlkxTable.cs
Normal file
@@ -0,0 +1,75 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal sealed class BlkxTable : DmgStructBase
|
||||
{
|
||||
private const uint Signature = 0x6d697368u;
|
||||
|
||||
public uint Version { get; } // Current version is 1
|
||||
public ulong SectorNumber { get; } // Starting disk sector in this blkx descriptor
|
||||
public ulong SectorCount { get; } // Number of disk sectors in this blkx descriptor
|
||||
|
||||
public ulong DataOffset { get; }
|
||||
public uint BuffersNeeded { get; }
|
||||
public uint BlockDescriptors { get; } // Number of descriptors
|
||||
|
||||
public UdifChecksum Checksum { get; }
|
||||
|
||||
public IReadOnlyList<BlkxChunk> Chunks { get; }
|
||||
|
||||
private BlkxTable(
|
||||
uint version,
|
||||
ulong sectorNumber,
|
||||
ulong sectorCount,
|
||||
ulong dataOffset,
|
||||
uint buffersNeeded,
|
||||
uint blockDescriptors,
|
||||
UdifChecksum checksum,
|
||||
IReadOnlyList<BlkxChunk> chunks)
|
||||
{
|
||||
Version = version;
|
||||
SectorNumber = sectorNumber;
|
||||
SectorCount = sectorCount;
|
||||
DataOffset = dataOffset;
|
||||
BuffersNeeded = buffersNeeded;
|
||||
BlockDescriptors = blockDescriptors;
|
||||
Checksum = checksum;
|
||||
Chunks = chunks;
|
||||
}
|
||||
|
||||
public static bool TryRead(in byte[] buffer, out BlkxTable? header)
|
||||
{
|
||||
header = null;
|
||||
|
||||
ReadOnlySpan<byte> data = buffer.AsSpan();
|
||||
|
||||
uint sig = ReadUInt32(ref data);
|
||||
if (sig != Signature) return false;
|
||||
|
||||
uint version = ReadUInt32(ref data);
|
||||
ulong sectorNumber = ReadUInt64(ref data);
|
||||
ulong sectorCount = ReadUInt64(ref data);
|
||||
|
||||
ulong dataOffset = ReadUInt64(ref data);
|
||||
uint buffersNeeded = ReadUInt32(ref data);
|
||||
uint blockDescriptors = ReadUInt32(ref data);
|
||||
|
||||
data = data.Slice(6 * sizeof(uint)); // reserved
|
||||
|
||||
var checksum = UdifChecksum.Read(ref data);
|
||||
|
||||
uint chunkCount = ReadUInt32(ref data);
|
||||
var chunks = new BlkxChunk[chunkCount];
|
||||
for (int i = 0; i < chunkCount; i++)
|
||||
{
|
||||
if (!BlkxChunk.TryRead(ref data, out var chunk)) return false;
|
||||
chunks[i] = chunk!;
|
||||
}
|
||||
|
||||
header = new BlkxTable(version, sectorNumber, sectorCount, dataOffset, buffersNeeded, blockDescriptors, checksum, chunks);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
138
src/SharpCompress/Common/Dmg/Headers/DmgHeader.cs
Normal file
138
src/SharpCompress/Common/Dmg/Headers/DmgHeader.cs
Normal file
@@ -0,0 +1,138 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal sealed class DmgHeader : DmgStructBase
|
||||
{
|
||||
public const int HeaderSize = 512;
|
||||
private const uint Signature = 0x6B6F6C79u;
|
||||
private const int UuidSize = 16; // 128 bit
|
||||
|
||||
public uint Version { get; } // Current version is 4
|
||||
public uint Flags { get; } // Flags
|
||||
public ulong RunningDataForkOffset { get; } //
|
||||
public ulong DataForkOffset { get; } // Data fork offset (usually 0, beginning of file)
|
||||
public ulong DataForkLength { get; } // Size of data fork (usually up to the XMLOffset, below)
|
||||
public ulong RsrcForkOffset { get; } // Resource fork offset, if any
|
||||
public ulong RsrcForkLength { get; } // Resource fork length, if any
|
||||
public uint SegmentNumber { get; } // Usually 1, may be 0
|
||||
public uint SegmentCount { get; } // Usually 1, may be 0
|
||||
public IReadOnlyList<byte> SegmentID { get; } // 128-bit GUID identifier of segment (if SegmentNumber !=0)
|
||||
|
||||
public UdifChecksum DataChecksum { get; }
|
||||
|
||||
public ulong XMLOffset { get; } // Offset of property list in DMG, from beginning
|
||||
public ulong XMLLength { get; } // Length of property list
|
||||
|
||||
public UdifChecksum Checksum { get; }
|
||||
|
||||
public uint ImageVariant { get; } // Commonly 1
|
||||
public ulong SectorCount { get; } // Size of DMG when expanded, in sectors
|
||||
|
||||
private DmgHeader(
|
||||
uint version,
|
||||
uint flags,
|
||||
ulong runningDataForkOffset,
|
||||
ulong dataForkOffset,
|
||||
ulong dataForkLength,
|
||||
ulong rsrcForkOffset,
|
||||
ulong rsrcForkLength,
|
||||
uint segmentNumber,
|
||||
uint segmentCount,
|
||||
IReadOnlyList<byte> segmentID,
|
||||
UdifChecksum dataChecksum,
|
||||
ulong xMLOffset,
|
||||
ulong xMLLength,
|
||||
UdifChecksum checksum,
|
||||
uint imageVariant,
|
||||
ulong sectorCount)
|
||||
{
|
||||
Version = version;
|
||||
Flags = flags;
|
||||
RunningDataForkOffset = runningDataForkOffset;
|
||||
DataForkOffset = dataForkOffset;
|
||||
DataForkLength = dataForkLength;
|
||||
RsrcForkOffset = rsrcForkOffset;
|
||||
RsrcForkLength = rsrcForkLength;
|
||||
SegmentNumber = segmentNumber;
|
||||
SegmentCount = segmentCount;
|
||||
SegmentID = segmentID;
|
||||
DataChecksum = dataChecksum;
|
||||
XMLOffset = xMLOffset;
|
||||
XMLLength = xMLLength;
|
||||
Checksum = checksum;
|
||||
ImageVariant = imageVariant;
|
||||
SectorCount = sectorCount;
|
||||
}
|
||||
|
||||
private static void ReadUuid(ref ReadOnlySpan<byte> data, byte[] buffer)
|
||||
{
|
||||
data.Slice(0, UuidSize).CopyTo(buffer);
|
||||
data = data.Slice(UuidSize);
|
||||
}
|
||||
|
||||
internal static bool TryRead(Stream input, out DmgHeader? header)
|
||||
{
|
||||
header = null;
|
||||
|
||||
var buffer = new byte[HeaderSize];
|
||||
int count = input.Read(buffer, 0, HeaderSize);
|
||||
if (count != HeaderSize) return false;
|
||||
ReadOnlySpan<byte> data = buffer.AsSpan();
|
||||
|
||||
uint sig = ReadUInt32(ref data);
|
||||
if (sig != Signature) return false;
|
||||
|
||||
uint version = ReadUInt32(ref data);
|
||||
|
||||
uint size = ReadUInt32(ref data);
|
||||
if (size != (uint)HeaderSize) return false;
|
||||
|
||||
uint flags = ReadUInt32(ref data);
|
||||
ulong runningDataForkOffset = ReadUInt64(ref data);
|
||||
ulong dataForkOffset = ReadUInt64(ref data);
|
||||
ulong dataForkLength = ReadUInt64(ref data);
|
||||
ulong rsrcForkOffset = ReadUInt64(ref data);
|
||||
ulong rsrcForkLength = ReadUInt64(ref data);
|
||||
uint segmentNumber = ReadUInt32(ref data);
|
||||
uint segmentCount = ReadUInt32(ref data);
|
||||
|
||||
var segmentID = new byte[UuidSize];
|
||||
ReadUuid(ref data, segmentID);
|
||||
|
||||
var dataChecksum = UdifChecksum.Read(ref data);
|
||||
|
||||
ulong xmlOffset = ReadUInt64(ref data);
|
||||
ulong xmlLength = ReadUInt64(ref data);
|
||||
|
||||
data = data.Slice(120); // Reserved bytes
|
||||
|
||||
var checksum = UdifChecksum.Read(ref data);
|
||||
|
||||
uint imageVariant = ReadUInt32(ref data);
|
||||
ulong sectorCount = ReadUInt64(ref data);
|
||||
|
||||
header = new DmgHeader(
|
||||
version,
|
||||
flags,
|
||||
runningDataForkOffset,
|
||||
dataForkOffset,
|
||||
dataForkLength,
|
||||
rsrcForkOffset,
|
||||
rsrcForkLength,
|
||||
segmentNumber,
|
||||
segmentCount,
|
||||
segmentID,
|
||||
dataChecksum,
|
||||
xmlOffset,
|
||||
xmlLength,
|
||||
checksum,
|
||||
imageVariant,
|
||||
sectorCount);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
22
src/SharpCompress/Common/Dmg/Headers/DmgStructBase.cs
Normal file
22
src/SharpCompress/Common/Dmg/Headers/DmgStructBase.cs
Normal file
@@ -0,0 +1,22 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal abstract class DmgStructBase
|
||||
{
|
||||
protected static uint ReadUInt32(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
uint val = BinaryPrimitives.ReadUInt32BigEndian(data);
|
||||
data = data.Slice(sizeof(uint));
|
||||
return val;
|
||||
}
|
||||
|
||||
protected static ulong ReadUInt64(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
ulong val = BinaryPrimitives.ReadUInt64BigEndian(data);
|
||||
data = data.Slice(sizeof(ulong));
|
||||
return val;
|
||||
}
|
||||
}
|
||||
}
|
||||
90
src/SharpCompress/Common/Dmg/Headers/GptHeader.cs
Normal file
90
src/SharpCompress/Common/Dmg/Headers/GptHeader.cs
Normal file
@@ -0,0 +1,90 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal sealed class GptHeader : GptStructBase
|
||||
{
|
||||
private const int HeaderSize = 92;
|
||||
private static readonly ulong Signature = BinaryPrimitives.ReadUInt64LittleEndian(new byte[] { 69, 70, 73, 32, 80, 65, 82, 84 });
|
||||
|
||||
public uint Revision { get; }
|
||||
public uint Crc32Header { get; }
|
||||
public ulong CurrentLba { get; }
|
||||
public ulong BackupLba { get; }
|
||||
public ulong FirstUsableLba { get; }
|
||||
public ulong LastUsableLba { get; }
|
||||
public Guid DiskGuid { get; }
|
||||
public ulong EntriesStart { get; }
|
||||
public uint EntriesCount { get; }
|
||||
public uint EntriesSize { get; }
|
||||
public uint Crc32Array { get; }
|
||||
|
||||
private GptHeader(
|
||||
uint revision,
|
||||
uint crc32Header,
|
||||
ulong currentLba,
|
||||
ulong backupLba,
|
||||
ulong firstUsableLba,
|
||||
ulong lastUsableLba,
|
||||
Guid diskGuid,
|
||||
ulong entriesStart,
|
||||
uint entriesCount,
|
||||
uint entriesSize,
|
||||
uint crc32Array)
|
||||
{
|
||||
Revision = revision;
|
||||
Crc32Header = crc32Header;
|
||||
CurrentLba = currentLba;
|
||||
BackupLba = backupLba;
|
||||
FirstUsableLba = firstUsableLba;
|
||||
LastUsableLba = lastUsableLba;
|
||||
DiskGuid = diskGuid;
|
||||
EntriesStart = entriesStart;
|
||||
EntriesCount = entriesCount;
|
||||
EntriesSize = entriesSize;
|
||||
Crc32Array = crc32Array;
|
||||
}
|
||||
|
||||
public static bool TryRead(Stream stream, out GptHeader? header)
|
||||
{
|
||||
header = null;
|
||||
|
||||
ulong sig = ReadUInt64(stream);
|
||||
if (sig != Signature) return false;
|
||||
|
||||
uint revision = ReadUInt32(stream);
|
||||
|
||||
uint headerSize = ReadUInt32(stream);
|
||||
if (headerSize != HeaderSize) return false;
|
||||
|
||||
uint crc32Header = ReadUInt32(stream);
|
||||
_ = ReadUInt32(stream); // reserved
|
||||
ulong currentLba = ReadUInt64(stream);
|
||||
ulong backupLba = ReadUInt64(stream);
|
||||
ulong firstUsableLba = ReadUInt64(stream);
|
||||
ulong lastUsableLba = ReadUInt64(stream);
|
||||
Guid diskGuid = ReadGuid(stream);
|
||||
ulong entriesStart = ReadUInt64(stream);
|
||||
uint entriesCount = ReadUInt32(stream);
|
||||
uint entriesSize = ReadUInt32(stream);
|
||||
uint crc32Array = ReadUInt32(stream);
|
||||
|
||||
header = new GptHeader(
|
||||
revision,
|
||||
crc32Header,
|
||||
currentLba,
|
||||
backupLba,
|
||||
firstUsableLba,
|
||||
lastUsableLba,
|
||||
diskGuid,
|
||||
entriesStart,
|
||||
entriesCount,
|
||||
entriesSize,
|
||||
crc32Array);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
36
src/SharpCompress/Common/Dmg/Headers/GptPartitionEntry.cs
Normal file
36
src/SharpCompress/Common/Dmg/Headers/GptPartitionEntry.cs
Normal file
@@ -0,0 +1,36 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal sealed class GptPartitionEntry : GptStructBase
|
||||
{
|
||||
public Guid TypeGuid { get; }
|
||||
public Guid Guid { get; }
|
||||
public ulong FirstLba { get; }
|
||||
public ulong LastLba { get; }
|
||||
public ulong Attributes { get; }
|
||||
public string Name { get; }
|
||||
|
||||
private GptPartitionEntry(Guid typeGuid, Guid guid, ulong firstLba, ulong lastLba, ulong attributes, string name)
|
||||
{
|
||||
TypeGuid = typeGuid;
|
||||
Guid = guid;
|
||||
FirstLba = firstLba;
|
||||
LastLba = lastLba;
|
||||
Attributes = attributes;
|
||||
Name = name;
|
||||
}
|
||||
|
||||
public static GptPartitionEntry Read(Stream stream)
|
||||
{
|
||||
return new GptPartitionEntry(
|
||||
ReadGuid(stream),
|
||||
ReadGuid(stream),
|
||||
ReadUInt64(stream),
|
||||
ReadUInt64(stream),
|
||||
ReadUInt64(stream),
|
||||
ReadString(stream, 72));
|
||||
}
|
||||
}
|
||||
}
|
||||
56
src/SharpCompress/Common/Dmg/Headers/GptStructBase.cs
Normal file
56
src/SharpCompress/Common/Dmg/Headers/GptStructBase.cs
Normal file
@@ -0,0 +1,56 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal abstract class GptStructBase
|
||||
{
|
||||
private static readonly byte[] _buffer = new byte[8];
|
||||
|
||||
protected static ushort ReadUInt16(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(ushort)) != sizeof(ushort))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt16LittleEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static uint ReadUInt32(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(uint)) != sizeof(uint))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt32LittleEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static ulong ReadUInt64(Stream stream)
|
||||
{
|
||||
if (stream.Read(_buffer, 0, sizeof(ulong)) != sizeof(ulong))
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return BinaryPrimitives.ReadUInt64LittleEndian(_buffer);
|
||||
}
|
||||
|
||||
protected static Guid ReadGuid(Stream stream)
|
||||
{
|
||||
int a = (int)ReadUInt32(stream);
|
||||
short b = (short)ReadUInt16(stream);
|
||||
short c = (short)ReadUInt16(stream);
|
||||
|
||||
if (stream.Read(_buffer, 0, 8) != 8)
|
||||
throw new EndOfStreamException();
|
||||
|
||||
return new Guid(a, b, c, _buffer);
|
||||
}
|
||||
|
||||
protected static string ReadString(Stream stream, int byteSize)
|
||||
{
|
||||
var buffer = new byte[byteSize];
|
||||
if (stream.Read(buffer, 0, byteSize) != byteSize)
|
||||
throw new EndOfStreamException();
|
||||
return Encoding.Unicode.GetString(buffer).NullTerminate();
|
||||
}
|
||||
}
|
||||
}
|
||||
33
src/SharpCompress/Common/Dmg/Headers/UdifChecksum.cs
Normal file
33
src/SharpCompress/Common/Dmg/Headers/UdifChecksum.cs
Normal file
@@ -0,0 +1,33 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Common.Dmg.Headers
|
||||
{
|
||||
internal sealed class UdifChecksum : DmgStructBase
|
||||
{
|
||||
private const int MaxSize = 32; // * 4 to get byte size
|
||||
|
||||
public uint Type { get; }
|
||||
public uint Size { get; } // in bits
|
||||
public IReadOnlyList<uint> Bits { get; }
|
||||
|
||||
private UdifChecksum(uint type, uint size, IReadOnlyList<uint> bits)
|
||||
{
|
||||
Type = type;
|
||||
Size = size;
|
||||
Bits = bits;
|
||||
}
|
||||
|
||||
public static UdifChecksum Read(ref ReadOnlySpan<byte> data)
|
||||
{
|
||||
uint type = ReadUInt32(ref data);
|
||||
uint size = ReadUInt32(ref data);
|
||||
|
||||
var bits = new uint[MaxSize];
|
||||
for (int i = 0; i < MaxSize; i++)
|
||||
bits[i] = ReadUInt32(ref data);
|
||||
|
||||
return new UdifChecksum(type, size, bits);
|
||||
}
|
||||
}
|
||||
}
|
||||
14
src/SharpCompress/Common/Dmg/PartitionFormat.cs
Normal file
14
src/SharpCompress/Common/Dmg/PartitionFormat.cs
Normal file
@@ -0,0 +1,14 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Dmg
|
||||
{
|
||||
internal static class PartitionFormat
|
||||
{
|
||||
public static readonly Guid AppleHFS = new Guid("48465300-0000-11AA-AA11-00306543ECAC");
|
||||
public static readonly Guid AppleUFS = new Guid("55465300-0000-11AA-AA11-00306543ECAC");
|
||||
public static readonly Guid AppleBoot = new Guid("426F6F74-0000-11AA-AA11-00306543ECAC");
|
||||
public static readonly Guid AppleRaid = new Guid("52414944-0000-11AA-AA11-00306543ECAC");
|
||||
public static readonly Guid AppleRaidOffline = new Guid("52414944-5F4F-11AA-AA11-00306543ECAC");
|
||||
public static readonly Guid AppleLabel = new Guid("4C616265-6C00-11AA-AA11-00306543ECAC");
|
||||
}
|
||||
}
|
||||
@@ -47,7 +47,8 @@ namespace SharpCompress.Common
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override void Flush() {
|
||||
public override void Flush()
|
||||
{
|
||||
}
|
||||
|
||||
public override long Length => _stream.Length;
|
||||
|
||||
@@ -8,9 +8,9 @@ namespace SharpCompress.Common
|
||||
/// <summary>
|
||||
/// Extract to specific directory, retaining filename
|
||||
/// </summary>
|
||||
public static void WriteEntryToDirectory(IEntry entry,
|
||||
public static void WriteEntryToDirectory(IEntry entry,
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options,
|
||||
ExtractionOptions? options,
|
||||
Action<string, ExtractionOptions?> write)
|
||||
{
|
||||
string destinationFileName;
|
||||
@@ -18,20 +18,18 @@ namespace SharpCompress.Common
|
||||
string fullDestinationDirectoryPath = Path.GetFullPath(destinationDirectory);
|
||||
|
||||
options ??= new ExtractionOptions()
|
||||
{
|
||||
Overwrite = true
|
||||
};
|
||||
{
|
||||
Overwrite = true
|
||||
};
|
||||
|
||||
if (options.ExtractFullPath)
|
||||
{
|
||||
string folder = Path.GetDirectoryName(entry.Key);
|
||||
string destdir = Path.GetFullPath(
|
||||
Path.Combine(fullDestinationDirectoryPath, folder)
|
||||
);
|
||||
string folder = Path.GetDirectoryName(entry.Key)!;
|
||||
string destdir = Path.GetFullPath(Path.Combine(fullDestinationDirectoryPath, folder));
|
||||
|
||||
if (!Directory.Exists(destdir))
|
||||
{
|
||||
if (!destdir.StartsWith(fullDestinationDirectoryPath))
|
||||
if (!destdir.StartsWith(fullDestinationDirectoryPath, StringComparison.Ordinal))
|
||||
{
|
||||
throw new ExtractionException("Entry is trying to create a directory outside of the destination directory.");
|
||||
}
|
||||
@@ -41,7 +39,7 @@ namespace SharpCompress.Common
|
||||
destinationFileName = Path.Combine(destdir, file);
|
||||
}
|
||||
else
|
||||
{
|
||||
{
|
||||
destinationFileName = Path.Combine(fullDestinationDirectoryPath, file);
|
||||
|
||||
}
|
||||
@@ -50,7 +48,7 @@ namespace SharpCompress.Common
|
||||
{
|
||||
destinationFileName = Path.GetFullPath(destinationFileName);
|
||||
|
||||
if (!destinationFileName.StartsWith(fullDestinationDirectoryPath))
|
||||
if (!destinationFileName.StartsWith(fullDestinationDirectoryPath, StringComparison.Ordinal))
|
||||
{
|
||||
throw new ExtractionException("Entry is trying to write a file outside of the destination directory.");
|
||||
}
|
||||
@@ -61,7 +59,7 @@ namespace SharpCompress.Common
|
||||
Directory.CreateDirectory(destinationFileName);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static void WriteEntryToFile(IEntry entry, string destinationFileName,
|
||||
ExtractionOptions? options,
|
||||
Action<string, FileMode> openAndWrite)
|
||||
@@ -78,9 +76,9 @@ namespace SharpCompress.Common
|
||||
{
|
||||
FileMode fm = FileMode.Create;
|
||||
options ??= new ExtractionOptions()
|
||||
{
|
||||
Overwrite = true
|
||||
};
|
||||
{
|
||||
Overwrite = true
|
||||
};
|
||||
|
||||
if (!options.Overwrite)
|
||||
{
|
||||
|
||||
@@ -7,7 +7,7 @@ namespace SharpCompress.Common
|
||||
/// <summary>
|
||||
/// overwrite target if it exists
|
||||
/// </summary>
|
||||
public bool Overwrite {get; set; }
|
||||
public bool Overwrite { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// extract with internal directory structure
|
||||
|
||||
@@ -15,7 +15,7 @@ namespace SharpCompress.Common.GZip
|
||||
|
||||
public override CompressionType CompressionType => CompressionType.GZip;
|
||||
|
||||
public override long Crc => 0;
|
||||
public override long Crc => _filePart.Crc ?? 0;
|
||||
|
||||
public override string Key => _filePart.FilePartName;
|
||||
|
||||
@@ -23,7 +23,7 @@ namespace SharpCompress.Common.GZip
|
||||
|
||||
public override long CompressedSize => 0;
|
||||
|
||||
public override long Size => 0;
|
||||
public override long Size => _filePart.UncompressedSize ?? 0;
|
||||
|
||||
public override DateTime? LastModifiedTime => _filePart.DateModified;
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ using SharpCompress.Compressors.Deflate;
|
||||
|
||||
namespace SharpCompress.Common.GZip
|
||||
{
|
||||
internal class GZipFilePart : FilePart
|
||||
internal sealed class GZipFilePart : FilePart
|
||||
{
|
||||
private string? _name;
|
||||
private readonly Stream _stream;
|
||||
@@ -16,14 +16,23 @@ namespace SharpCompress.Common.GZip
|
||||
internal GZipFilePart(Stream stream, ArchiveEncoding archiveEncoding)
|
||||
: base(archiveEncoding)
|
||||
{
|
||||
ReadAndValidateGzipHeader(stream);
|
||||
EntryStartPosition = stream.Position;
|
||||
_stream = stream;
|
||||
ReadAndValidateGzipHeader();
|
||||
if (stream.CanSeek)
|
||||
{
|
||||
long position = stream.Position;
|
||||
stream.Position = stream.Length - 8;
|
||||
ReadTrailer();
|
||||
stream.Position = position;
|
||||
}
|
||||
EntryStartPosition = stream.Position;
|
||||
}
|
||||
|
||||
internal long EntryStartPosition { get; }
|
||||
|
||||
internal DateTime? DateModified { get; private set; }
|
||||
internal int? Crc { get; private set; }
|
||||
internal int? UncompressedSize { get; private set; }
|
||||
|
||||
internal override string FilePartName => _name!;
|
||||
|
||||
@@ -37,11 +46,21 @@ namespace SharpCompress.Common.GZip
|
||||
return _stream;
|
||||
}
|
||||
|
||||
private void ReadAndValidateGzipHeader(Stream stream)
|
||||
private void ReadTrailer()
|
||||
{
|
||||
// Read and potentially verify the GZIP trailer: CRC32 and size mod 2^32
|
||||
Span<byte> trailer = stackalloc byte[8];
|
||||
int n = _stream.Read(trailer);
|
||||
|
||||
Crc = BinaryPrimitives.ReadInt32LittleEndian(trailer);
|
||||
UncompressedSize = BinaryPrimitives.ReadInt32LittleEndian(trailer.Slice(4));
|
||||
}
|
||||
|
||||
private void ReadAndValidateGzipHeader()
|
||||
{
|
||||
// read the header on the first read
|
||||
byte[] header = new byte[10];
|
||||
int n = stream.Read(header, 0, header.Length);
|
||||
Span<byte> header = stackalloc byte[10];
|
||||
int n = _stream.Read(header);
|
||||
|
||||
// workitem 8501: handle edge case (decompress empty stream)
|
||||
if (n == 0)
|
||||
@@ -59,17 +78,17 @@ namespace SharpCompress.Common.GZip
|
||||
throw new ZlibException("Bad GZIP header.");
|
||||
}
|
||||
|
||||
int timet = BinaryPrimitives.ReadInt32LittleEndian(header.AsSpan(4));
|
||||
int timet = BinaryPrimitives.ReadInt32LittleEndian(header.Slice(4));
|
||||
DateModified = TarHeader.EPOCH.AddSeconds(timet);
|
||||
if ((header[3] & 0x04) == 0x04)
|
||||
{
|
||||
// read and discard extra field
|
||||
n = stream.Read(header, 0, 2); // 2-byte length field
|
||||
n = _stream.Read(header.Slice(0, 2)); // 2-byte length field
|
||||
|
||||
Int16 extraLength = (Int16)(header[0] + header[1] * 256);
|
||||
short extraLength = (short)(header[0] + header[1] * 256);
|
||||
byte[] extra = new byte[extraLength];
|
||||
|
||||
if (!stream.ReadFully(extra))
|
||||
if (!_stream.ReadFully(extra))
|
||||
{
|
||||
throw new ZlibException("Unexpected end-of-file reading GZIP header.");
|
||||
}
|
||||
@@ -77,27 +96,27 @@ namespace SharpCompress.Common.GZip
|
||||
}
|
||||
if ((header[3] & 0x08) == 0x08)
|
||||
{
|
||||
_name = ReadZeroTerminatedString(stream);
|
||||
_name = ReadZeroTerminatedString(_stream);
|
||||
}
|
||||
if ((header[3] & 0x10) == 0x010)
|
||||
{
|
||||
ReadZeroTerminatedString(stream);
|
||||
ReadZeroTerminatedString(_stream);
|
||||
}
|
||||
if ((header[3] & 0x02) == 0x02)
|
||||
{
|
||||
stream.ReadByte(); // CRC16, ignore
|
||||
_stream.ReadByte(); // CRC16, ignore
|
||||
}
|
||||
}
|
||||
|
||||
private string ReadZeroTerminatedString(Stream stream)
|
||||
{
|
||||
byte[] buf1 = new byte[1];
|
||||
Span<byte> buf1 = stackalloc byte[1];
|
||||
var list = new List<byte>();
|
||||
bool done = false;
|
||||
do
|
||||
{
|
||||
// workitem 7740
|
||||
int n = stream.Read(buf1, 0, 1);
|
||||
int n = stream.Read(buf1);
|
||||
if (n != 1)
|
||||
{
|
||||
throw new ZlibException("Unexpected EOF reading GZIP header.");
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
namespace SharpCompress.Common
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common
|
||||
{
|
||||
public class IncompleteArchiveException : ArchiveException
|
||||
{
|
||||
@@ -6,5 +8,10 @@
|
||||
: base(message)
|
||||
{
|
||||
}
|
||||
|
||||
public IncompleteArchiveException(string message, Exception inner)
|
||||
: base(message, inner)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,8 +4,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
internal class AvHeader : RarHeader
|
||||
{
|
||||
public AvHeader(RarHeader header, RarCrcBinaryReader reader)
|
||||
: base(header, reader, HeaderType.Av)
|
||||
public AvHeader(RarHeader header, RarCrcBinaryReader reader)
|
||||
: base(header, reader, HeaderType.Av)
|
||||
{
|
||||
if (IsRar5)
|
||||
{
|
||||
|
||||
@@ -6,7 +6,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
internal class ArchiveCryptHeader : RarHeader
|
||||
{
|
||||
|
||||
|
||||
private const int CRYPT_VERSION = 0; // Supported encryption version.
|
||||
private const int SIZE_SALT50 = 16;
|
||||
private const int SIZE_SALT30 = 8;
|
||||
@@ -15,14 +15,14 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
private const int SIZE_PSWCHECK_CSUM = 4;
|
||||
private const int CRYPT5_KDF_LG2_COUNT = 15; // LOG2 of PDKDF2 iteration count.
|
||||
private const int CRYPT5_KDF_LG2_COUNT_MAX = 24; // LOG2 of maximum accepted iteration count.
|
||||
|
||||
|
||||
|
||||
|
||||
private bool _usePswCheck;
|
||||
private uint _lg2Count; // Log2 of PBKDF2 repetition count.
|
||||
private byte[] _salt;
|
||||
private byte[] _pswCheck;
|
||||
private byte[] _pswCheckCsm;
|
||||
|
||||
|
||||
public ArchiveCryptHeader(RarHeader header, RarCrcBinaryReader reader)
|
||||
: base(header, reader, HeaderType.Crypt)
|
||||
{
|
||||
@@ -35,12 +35,12 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
//error?
|
||||
return;
|
||||
}
|
||||
}
|
||||
var encryptionFlags = reader.ReadRarVIntUInt32();
|
||||
_usePswCheck = FlagUtility.HasFlag(encryptionFlags, EncryptionFlagsV5.CHFL_CRYPT_PSWCHECK);
|
||||
_lg2Count = reader.ReadRarVIntByte(1);
|
||||
|
||||
|
||||
|
||||
//UsePswCheck = HasHeaderFlag(EncryptionFlagsV5.CHFL_CRYPT_PSWCHECK);
|
||||
if (_lg2Count > CRYPT5_KDF_LG2_COUNT_MAX)
|
||||
{
|
||||
|
||||
@@ -4,14 +4,14 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
internal sealed class ArchiveHeader : RarHeader
|
||||
{
|
||||
public ArchiveHeader(RarHeader header, RarCrcBinaryReader reader)
|
||||
: base(header, reader, HeaderType.Archive)
|
||||
public ArchiveHeader(RarHeader header, RarCrcBinaryReader reader)
|
||||
: base(header, reader, HeaderType.Archive)
|
||||
{
|
||||
}
|
||||
|
||||
protected override void ReadFinish(MarkingBinaryReader reader)
|
||||
{
|
||||
if (IsRar5)
|
||||
if (IsRar5)
|
||||
{
|
||||
Flags = reader.ReadRarVIntUInt16();
|
||||
if (HasFlag(ArchiveFlagsV5.HAS_VOLUME_NUMBER))
|
||||
@@ -22,8 +22,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
//if (ExtraSize != 0) {
|
||||
// ReadLocator(reader);
|
||||
//}
|
||||
}
|
||||
else
|
||||
}
|
||||
else
|
||||
{
|
||||
Flags = HeaderFlags;
|
||||
HighPosAv = reader.ReadInt16();
|
||||
@@ -35,7 +35,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
private void ReadLocator(MarkingBinaryReader reader) {
|
||||
private void ReadLocator(MarkingBinaryReader reader)
|
||||
{
|
||||
var size = reader.ReadRarVIntUInt16();
|
||||
var type = reader.ReadRarVIntUInt16();
|
||||
if (type != 1)
|
||||
@@ -47,18 +48,20 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
const ushort hasQuickOpenOffset = 0x01;
|
||||
const ushort hasRecoveryOffset = 0x02;
|
||||
ulong quickOpenOffset = 0;
|
||||
if ((flags & hasQuickOpenOffset) == hasQuickOpenOffset) {
|
||||
if ((flags & hasQuickOpenOffset) == hasQuickOpenOffset)
|
||||
{
|
||||
quickOpenOffset = reader.ReadRarVInt();
|
||||
}
|
||||
ulong recoveryOffset = 0;
|
||||
if ((flags & hasRecoveryOffset) == hasRecoveryOffset) {
|
||||
if ((flags & hasRecoveryOffset) == hasRecoveryOffset)
|
||||
{
|
||||
recoveryOffset = reader.ReadRarVInt();
|
||||
}
|
||||
}
|
||||
|
||||
private ushort Flags { get; set; }
|
||||
private ushort Flags { get; set; }
|
||||
|
||||
private bool HasFlag(ushort flag)
|
||||
private bool HasFlag(ushort flag)
|
||||
{
|
||||
return (Flags & flag) == flag;
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
internal class CommentHeader : RarHeader
|
||||
{
|
||||
protected CommentHeader(RarHeader header, RarCrcBinaryReader reader)
|
||||
: base(header, reader, HeaderType.Comment)
|
||||
{
|
||||
: base(header, reader, HeaderType.Comment)
|
||||
{
|
||||
if (IsRar5)
|
||||
{
|
||||
throw new InvalidFormatException("unexpected rar5 record");
|
||||
|
||||
@@ -4,14 +4,14 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
internal class EndArchiveHeader : RarHeader
|
||||
{
|
||||
public EndArchiveHeader(RarHeader header, RarCrcBinaryReader reader)
|
||||
: base(header, reader, HeaderType.EndArchive)
|
||||
public EndArchiveHeader(RarHeader header, RarCrcBinaryReader reader)
|
||||
: base(header, reader, HeaderType.EndArchive)
|
||||
{
|
||||
}
|
||||
|
||||
protected override void ReadFinish(MarkingBinaryReader reader)
|
||||
{
|
||||
if (IsRar5)
|
||||
if (IsRar5)
|
||||
{
|
||||
Flags = reader.ReadRarVIntUInt16();
|
||||
}
|
||||
@@ -31,7 +31,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
private ushort Flags { get; set; }
|
||||
|
||||
private bool HasFlag(ushort flag)
|
||||
private bool HasFlag(ushort flag)
|
||||
{
|
||||
return (Flags & flag) == flag;
|
||||
}
|
||||
|
||||
@@ -21,18 +21,18 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
private uint _fileCrc;
|
||||
|
||||
public FileHeader(RarHeader header, RarCrcBinaryReader reader, HeaderType headerType)
|
||||
: base(header, reader, headerType)
|
||||
public FileHeader(RarHeader header, RarCrcBinaryReader reader, HeaderType headerType)
|
||||
: base(header, reader, headerType)
|
||||
{
|
||||
}
|
||||
|
||||
protected override void ReadFinish(MarkingBinaryReader reader)
|
||||
protected override void ReadFinish(MarkingBinaryReader reader)
|
||||
{
|
||||
if (IsRar5)
|
||||
if (IsRar5)
|
||||
{
|
||||
ReadFromReaderV5(reader);
|
||||
}
|
||||
else
|
||||
}
|
||||
else
|
||||
{
|
||||
ReadFromReaderV4(reader);
|
||||
}
|
||||
@@ -49,11 +49,13 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
FileAttributes = reader.ReadRarVIntUInt32();
|
||||
|
||||
if (HasFlag(FileFlagsV5.HAS_MOD_TIME)) {
|
||||
if (HasFlag(FileFlagsV5.HAS_MOD_TIME))
|
||||
{
|
||||
FileLastModifiedTime = Utility.UnixTimeToDateTime(reader.ReadUInt32());
|
||||
}
|
||||
|
||||
if (HasFlag(FileFlagsV5.HAS_CRC32)) {
|
||||
if (HasFlag(FileFlagsV5.HAS_CRC32))
|
||||
{
|
||||
FileCrc = reader.ReadUInt32();
|
||||
}
|
||||
|
||||
@@ -65,7 +67,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
// but it was already used in RAR 1.5 and Unpack needs to distinguish
|
||||
// them.
|
||||
CompressionAlgorithm = (byte)((compressionInfo & 0x3f) + 50);
|
||||
|
||||
|
||||
// 7th bit (0x0040) defines the solid flag. If it is set, RAR continues to use the compression dictionary left after processing preceding files.
|
||||
// It can be set only for file headers and is never set for service headers.
|
||||
IsSolid = (compressionInfo & 0x40) == 0x40;
|
||||
@@ -74,7 +76,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
CompressionMethod = (byte)((compressionInfo >> 7) & 0x7);
|
||||
|
||||
// Bits 11 - 14 (0x3c00) define the minimum size of dictionary size required to extract data. Value 0 means 128 KB, 1 - 256 KB, ..., 14 - 2048 MB, 15 - 4096 MB.
|
||||
WindowSize = IsDirectory ? 0 : ((size_t)0x20000) << ((compressionInfo>>10) & 0xf);
|
||||
WindowSize = IsDirectory ? 0 : ((size_t)0x20000) << ((compressionInfo >> 10) & 0xf);
|
||||
|
||||
HostOs = reader.ReadRarVIntByte();
|
||||
|
||||
@@ -101,18 +103,20 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
FileName = ConvertPathV5(Encoding.UTF8.GetString(b, 0, b.Length));
|
||||
|
||||
// extra size seems to be redudant since we know the total header size
|
||||
if (ExtraSize != RemainingHeaderBytes(reader))
|
||||
if (ExtraSize != RemainingHeaderBytes(reader))
|
||||
{
|
||||
throw new InvalidFormatException("rar5 header size / extra size inconsistency");
|
||||
}
|
||||
|
||||
isEncryptedRar5 = false;
|
||||
|
||||
while (RemainingHeaderBytes(reader) > 0) {
|
||||
while (RemainingHeaderBytes(reader) > 0)
|
||||
{
|
||||
var size = reader.ReadRarVIntUInt16();
|
||||
int n = RemainingHeaderBytes(reader);
|
||||
var type = reader.ReadRarVIntUInt16();
|
||||
switch (type) {
|
||||
switch (type)
|
||||
{
|
||||
//TODO
|
||||
case 1: // file encryption
|
||||
{
|
||||
@@ -120,7 +124,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
//var version = reader.ReadRarVIntByte();
|
||||
//if (version != 0) throw new InvalidFormatException("unknown encryption algorithm " + version);
|
||||
}
|
||||
}
|
||||
break;
|
||||
// case 2: // file hash
|
||||
// {
|
||||
@@ -131,38 +135,41 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
ushort flags = reader.ReadRarVIntUInt16();
|
||||
var isWindowsTime = (flags & 1) == 0;
|
||||
if ((flags & 0x2) == 0x2) {
|
||||
if ((flags & 0x2) == 0x2)
|
||||
{
|
||||
FileLastModifiedTime = ReadExtendedTimeV5(reader, isWindowsTime);
|
||||
}
|
||||
if ((flags & 0x4) == 0x4) {
|
||||
if ((flags & 0x4) == 0x4)
|
||||
{
|
||||
FileCreatedTime = ReadExtendedTimeV5(reader, isWindowsTime);
|
||||
}
|
||||
if ((flags & 0x8) == 0x8) {
|
||||
if ((flags & 0x8) == 0x8)
|
||||
{
|
||||
FileLastAccessedTime = ReadExtendedTimeV5(reader, isWindowsTime);
|
||||
}
|
||||
}
|
||||
break;
|
||||
//TODO
|
||||
// case 4: // file version
|
||||
// {
|
||||
//
|
||||
// }
|
||||
// break;
|
||||
// case 5: // file system redirection
|
||||
// {
|
||||
//
|
||||
// }
|
||||
// break;
|
||||
// case 6: // unix owner
|
||||
// {
|
||||
//
|
||||
// }
|
||||
// break;
|
||||
// case 7: // service data
|
||||
// {
|
||||
//
|
||||
// }
|
||||
// break;
|
||||
//TODO
|
||||
// case 4: // file version
|
||||
// {
|
||||
//
|
||||
// }
|
||||
// break;
|
||||
// case 5: // file system redirection
|
||||
// {
|
||||
//
|
||||
// }
|
||||
// break;
|
||||
// case 6: // unix owner
|
||||
// {
|
||||
//
|
||||
// }
|
||||
// break;
|
||||
// case 7: // service data
|
||||
// {
|
||||
//
|
||||
// }
|
||||
// break;
|
||||
|
||||
default:
|
||||
// skip unknown record types to allow new record types to be added in the future
|
||||
@@ -171,25 +178,26 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
// drain any trailing bytes of extra record
|
||||
int did = n - RemainingHeaderBytes(reader);
|
||||
int drain = size - did;
|
||||
if (drain > 0)
|
||||
if (drain > 0)
|
||||
{
|
||||
reader.ReadBytes(drain);
|
||||
}
|
||||
}
|
||||
|
||||
if (AdditionalDataSize != 0) {
|
||||
if (AdditionalDataSize != 0)
|
||||
{
|
||||
CompressedSize = AdditionalDataSize;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static DateTime ReadExtendedTimeV5(MarkingBinaryReader reader, bool isWindowsTime)
|
||||
private static DateTime ReadExtendedTimeV5(MarkingBinaryReader reader, bool isWindowsTime)
|
||||
{
|
||||
if (isWindowsTime)
|
||||
if (isWindowsTime)
|
||||
{
|
||||
return DateTime.FromFileTime(reader.ReadInt64());
|
||||
}
|
||||
else
|
||||
}
|
||||
else
|
||||
{
|
||||
return Utility.UnixTimeToDateTime(reader.ReadUInt32());
|
||||
}
|
||||
@@ -201,7 +209,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
// replace embedded \\ with valid filename char
|
||||
return path.Replace('\\', '-').Replace('/', '\\');
|
||||
}
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
@@ -376,20 +384,22 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
private ushort Flags { get; set; }
|
||||
|
||||
private bool HasFlag(ushort flag)
|
||||
private bool HasFlag(ushort flag)
|
||||
{
|
||||
return (Flags & flag) == flag;
|
||||
}
|
||||
|
||||
internal uint FileCrc
|
||||
{
|
||||
get {
|
||||
if (IsRar5 && !HasFlag(FileFlagsV5.HAS_CRC32)) {
|
||||
//!!! rar5:
|
||||
internal uint FileCrc
|
||||
{
|
||||
get
|
||||
{
|
||||
if (IsRar5 && !HasFlag(FileFlagsV5.HAS_CRC32))
|
||||
{
|
||||
//!!! rar5:
|
||||
throw new InvalidOperationException("TODO rar5");
|
||||
}
|
||||
return _fileCrc;
|
||||
}
|
||||
return _fileCrc;
|
||||
}
|
||||
private set => _fileCrc = value;
|
||||
}
|
||||
|
||||
@@ -409,7 +419,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
//case 29: // rar 3.x compression
|
||||
//case 50: // RAR 5.0 compression algorithm.
|
||||
internal byte CompressionAlgorithm { get; private set; }
|
||||
|
||||
|
||||
public bool IsSolid { get; private set; }
|
||||
|
||||
// unused for UnpackV1 implementation (limitation)
|
||||
@@ -427,13 +437,14 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
internal long DataStartPosition { get; set; }
|
||||
public Stream PackedStream { get; set; }
|
||||
|
||||
public bool IsSplitBefore => IsRar5 ? HasHeaderFlag(HeaderFlagsV5.SPLIT_BEFORE) : HasFlag(FileFlagsV4.SPLIT_BEFORE);
|
||||
public bool IsSplitAfter => IsRar5 ? HasHeaderFlag(HeaderFlagsV5.SPLIT_AFTER) : HasFlag(FileFlagsV4.SPLIT_AFTER);
|
||||
|
||||
public bool IsDirectory => HasFlag(IsRar5 ? FileFlagsV5.DIRECTORY : FileFlagsV4.DIRECTORY);
|
||||
|
||||
private bool isEncryptedRar5 = false;
|
||||
public bool IsEncrypted => IsRar5 ? isEncryptedRar5: HasFlag(FileFlagsV4.PASSWORD);
|
||||
|
||||
public bool IsEncrypted => IsRar5 ? isEncryptedRar5 : HasFlag(FileFlagsV4.PASSWORD);
|
||||
|
||||
internal DateTime? FileLastModifiedTime { get; private set; }
|
||||
|
||||
internal DateTime? FileCreatedTime { get; private set; }
|
||||
|
||||
@@ -42,10 +42,10 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
|
||||
internal static class EncryptionFlagsV5
|
||||
{
|
||||
{
|
||||
// RAR 5.0 archive encryption header specific flags.
|
||||
public const uint CHFL_CRYPT_PSWCHECK = 0x01; // Password check data is present.
|
||||
|
||||
|
||||
public const uint FHEXTRA_CRYPT_PSWCHECK = 0x01; // Password check data is present.
|
||||
public const uint FHEXTRA_CRYPT_HASHMAC = 0x02;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
internal interface IRarHeader
|
||||
internal interface IRarHeader
|
||||
{
|
||||
HeaderType HeaderType { get; }
|
||||
}
|
||||
|
||||
@@ -11,39 +11,39 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
public bool IsRar5 { get; }
|
||||
|
||||
private MarkHeader(bool isRar5)
|
||||
{
|
||||
private MarkHeader(bool isRar5)
|
||||
{
|
||||
IsRar5 = isRar5;
|
||||
}
|
||||
|
||||
public HeaderType HeaderType => HeaderType.Mark;
|
||||
|
||||
private static byte GetByte(Stream stream)
|
||||
private static byte GetByte(Stream stream)
|
||||
{
|
||||
var b = stream.ReadByte();
|
||||
if (b != -1)
|
||||
if (b != -1)
|
||||
{
|
||||
return (byte)b;
|
||||
}
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
|
||||
public static MarkHeader Read(Stream stream, bool leaveStreamOpen, bool lookForHeader)
|
||||
public static MarkHeader Read(Stream stream, bool leaveStreamOpen, bool lookForHeader)
|
||||
{
|
||||
int maxScanIndex = lookForHeader ? MAX_SFX_SIZE : 0;
|
||||
try
|
||||
{
|
||||
int start = -1;
|
||||
var b = GetByte(stream); start++;
|
||||
while (start <= maxScanIndex)
|
||||
while (start <= maxScanIndex)
|
||||
{
|
||||
// Rar old signature: 52 45 7E 5E
|
||||
// Rar4 signature: 52 61 72 21 1A 07 00
|
||||
// Rar5 signature: 52 61 72 21 1A 07 01 00
|
||||
if (b == 0x52)
|
||||
if (b == 0x52)
|
||||
{
|
||||
b = GetByte(stream); start++;
|
||||
if (b == 0x61)
|
||||
if (b == 0x61)
|
||||
{
|
||||
b = GetByte(stream); start++;
|
||||
if (b != 0x72)
|
||||
@@ -70,7 +70,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
|
||||
b = GetByte(stream); start++;
|
||||
if (b == 1)
|
||||
if (b == 1)
|
||||
{
|
||||
b = GetByte(stream); start++;
|
||||
if (b != 0)
|
||||
@@ -79,13 +79,13 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
|
||||
return new MarkHeader(true); // Rar5
|
||||
}
|
||||
else if (b == 0)
|
||||
}
|
||||
else if (b == 0)
|
||||
{
|
||||
return new MarkHeader(false); // Rar4
|
||||
}
|
||||
}
|
||||
else if (b == 0x45)
|
||||
}
|
||||
}
|
||||
else if (b == 0x45)
|
||||
{
|
||||
b = GetByte(stream); start++;
|
||||
if (b != 0x7e)
|
||||
@@ -100,9 +100,9 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
|
||||
throw new InvalidFormatException("Rar format version pre-4 is unsupported.");
|
||||
}
|
||||
}
|
||||
else
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
b = GetByte(stream); start++;
|
||||
}
|
||||
|
||||
@@ -2,23 +2,23 @@
|
||||
|
||||
namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
internal class NewSubHeaderType : IEquatable<NewSubHeaderType>
|
||||
internal sealed class NewSubHeaderType : IEquatable<NewSubHeaderType>
|
||||
{
|
||||
internal static readonly NewSubHeaderType SUBHEAD_TYPE_CMT = new NewSubHeaderType('C', 'M', 'T');
|
||||
internal static readonly NewSubHeaderType SUBHEAD_TYPE_CMT = new('C', 'M', 'T');
|
||||
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_ACL = new NewSubHeaderType(new byte[]{'A','C','L'});
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_ACL = new (new byte[]{'A','C','L'});
|
||||
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_STREAM = new NewSubHeaderType(new byte[]{'S','T','M'});
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_STREAM = new (new byte[]{'S','T','M'});
|
||||
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_UOWNER = new NewSubHeaderType(new byte[]{'U','O','W'});
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_UOWNER = new (new byte[]{'U','O','W'});
|
||||
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_AV = new NewSubHeaderType(new byte[]{'A','V'});
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_AV = new (new byte[]{'A','V'});
|
||||
|
||||
internal static readonly NewSubHeaderType SUBHEAD_TYPE_RR = new NewSubHeaderType('R', 'R');
|
||||
internal static readonly NewSubHeaderType SUBHEAD_TYPE_RR = new('R', 'R');
|
||||
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_OS2EA = new NewSubHeaderType(new byte[]{'E','A','2'});
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_OS2EA = new (new byte[]{'E','A','2'});
|
||||
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_BEOSEA = new NewSubHeaderType(new byte[]{'E','A','B','E'});
|
||||
//internal static final NewSubHeaderType SUBHEAD_TYPE_BEOSEA = new (new byte[]{'E','A','B','E'});
|
||||
|
||||
private readonly byte[] _bytes;
|
||||
|
||||
@@ -37,19 +37,13 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < bytes.Length; ++i)
|
||||
{
|
||||
if (_bytes[i] != bytes[i])
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
||||
return _bytes.AsSpan().SequenceEqual(bytes);
|
||||
}
|
||||
|
||||
public bool Equals(NewSubHeaderType other)
|
||||
public bool Equals(NewSubHeaderType? other)
|
||||
{
|
||||
return Equals(other._bytes);
|
||||
return other is not null && Equals(other._bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,8 +5,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
// ProtectHeader is part of the Recovery Record feature
|
||||
internal sealed class ProtectHeader : RarHeader
|
||||
{
|
||||
public ProtectHeader(RarHeader header, RarCrcBinaryReader reader)
|
||||
: base(header, reader, HeaderType.Protect)
|
||||
public ProtectHeader(RarHeader header, RarCrcBinaryReader reader)
|
||||
: base(header, reader, HeaderType.Protect)
|
||||
{
|
||||
if (IsRar5)
|
||||
{
|
||||
|
||||
@@ -23,12 +23,12 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
private RarHeader(RarCrcBinaryReader reader, bool isRar5, ArchiveEncoding archiveEncoding)
|
||||
private RarHeader(RarCrcBinaryReader reader, bool isRar5, ArchiveEncoding archiveEncoding)
|
||||
{
|
||||
_headerType = HeaderType.Null;
|
||||
_isRar5 = isRar5;
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
if (IsRar5)
|
||||
if (IsRar5)
|
||||
{
|
||||
HeaderCrc = reader.ReadUInt32();
|
||||
reader.ResetCrc();
|
||||
@@ -45,7 +45,9 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
{
|
||||
AdditionalDataSize = (long)reader.ReadRarVInt();
|
||||
}
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
reader.Mark();
|
||||
HeaderCrc = reader.ReadUInt16();
|
||||
reader.ResetCrc();
|
||||
@@ -59,7 +61,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
protected RarHeader(RarHeader header, RarCrcBinaryReader reader, HeaderType headerType) {
|
||||
protected RarHeader(RarHeader header, RarCrcBinaryReader reader, HeaderType headerType)
|
||||
{
|
||||
_headerType = headerType;
|
||||
_isRar5 = header.IsRar5;
|
||||
HeaderCrc = header.HeaderCrc;
|
||||
@@ -80,7 +83,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
VerifyHeaderCrc(reader.GetCrc32());
|
||||
}
|
||||
|
||||
protected int RemainingHeaderBytes(MarkingBinaryReader reader) {
|
||||
protected int RemainingHeaderBytes(MarkingBinaryReader reader)
|
||||
{
|
||||
return checked(HeaderSize - (int)reader.CurrentReadByteCount);
|
||||
}
|
||||
|
||||
@@ -108,7 +112,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
|
||||
protected ushort HeaderFlags { get; }
|
||||
|
||||
protected bool HasHeaderFlag(ushort flag)
|
||||
protected bool HasHeaderFlag(ushort flag)
|
||||
{
|
||||
return (HeaderFlags & flag) == flag;
|
||||
}
|
||||
|
||||
@@ -41,11 +41,11 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
private RarHeader? TryReadNextHeader(Stream stream)
|
||||
{
|
||||
RarCrcBinaryReader reader;
|
||||
if (!IsEncrypted)
|
||||
if (!IsEncrypted)
|
||||
{
|
||||
reader = new RarCrcBinaryReader(stream);
|
||||
}
|
||||
else
|
||||
}
|
||||
else
|
||||
{
|
||||
if (Options.Password is null)
|
||||
{
|
||||
@@ -65,7 +65,7 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
case HeaderCodeV.RAR4_ARCHIVE_HEADER:
|
||||
{
|
||||
var ah = new ArchiveHeader(header, reader);
|
||||
if (ah.IsEncrypted == true)
|
||||
if (ah.IsEncrypted == true)
|
||||
{
|
||||
//!!! rar5 we don't know yet
|
||||
IsEncrypted = true;
|
||||
@@ -150,11 +150,11 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
return new EndArchiveHeader(header, reader);
|
||||
}
|
||||
case HeaderCodeV.RAR5_ARCHIVE_ENCRYPTION_HEADER:
|
||||
{
|
||||
var ch = new ArchiveCryptHeader(header, reader);
|
||||
IsEncrypted = true;
|
||||
return ch;
|
||||
}
|
||||
{
|
||||
var ch = new ArchiveCryptHeader(header, reader);
|
||||
IsEncrypted = true;
|
||||
return ch;
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Unknown Rar Header: " + header.HeaderCode);
|
||||
@@ -162,21 +162,26 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
}
|
||||
}
|
||||
|
||||
private void SkipData(FileHeader fh, RarCrcBinaryReader reader) {
|
||||
switch (StreamingMode) {
|
||||
case StreamingMode.Seekable: {
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
private void SkipData(FileHeader fh, RarCrcBinaryReader reader)
|
||||
{
|
||||
switch (StreamingMode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
fh.DataStartPosition = reader.BaseStream.Position;
|
||||
reader.BaseStream.Position += fh.CompressedSize;
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming: {
|
||||
//skip the data because it's useless?
|
||||
reader.BaseStream.Skip(fh.CompressedSize);
|
||||
}
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
//skip the data because it's useless?
|
||||
reader.BaseStream.Skip(fh.CompressedSize);
|
||||
}
|
||||
break;
|
||||
default: {
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
internal class SignHeader : RarHeader
|
||||
{
|
||||
protected SignHeader(RarHeader header, RarCrcBinaryReader reader)
|
||||
: base(header, reader, HeaderType.Sign)
|
||||
{
|
||||
: base(header, reader, HeaderType.Sign)
|
||||
{
|
||||
if (IsRar5)
|
||||
{
|
||||
throw new InvalidFormatException("unexpected rar5 record");
|
||||
|
||||
@@ -50,11 +50,11 @@ namespace SharpCompress.Common.Rar
|
||||
if (sizeToRead > 0)
|
||||
{
|
||||
int alignedSize = sizeToRead + ((~sizeToRead + 1) & 0xf);
|
||||
byte[] cipherText = new byte[RarRijndael.CRYPTO_BLOCK_SIZE];
|
||||
Span<byte> cipherText = stackalloc byte[RarRijndael.CRYPTO_BLOCK_SIZE];
|
||||
for (int i = 0; i < alignedSize / 16; i++)
|
||||
{
|
||||
//long ax = System.currentTimeMillis();
|
||||
_actualStream.Read(cipherText, 0, RarRijndael.CRYPTO_BLOCK_SIZE);
|
||||
_actualStream.Read(cipherText);
|
||||
|
||||
var readBytes = _rijndael.ProcessBlock(cipherText);
|
||||
foreach (var readByte in readBytes)
|
||||
|
||||
@@ -11,7 +11,7 @@ namespace SharpCompress.Common.Rar
|
||||
/// As the V2017 port isn't complete, add this check to use the legacy Rar code.
|
||||
/// </summary>
|
||||
internal bool IsRarV3 => FileHeader.CompressionAlgorithm == 29 || FileHeader.CompressionAlgorithm == 36;
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// The File's 32 bit CRC Hash
|
||||
/// </summary>
|
||||
|
||||
@@ -27,13 +27,13 @@ namespace SharpCompress.Common.Rar
|
||||
|
||||
_rijndael = new RijndaelEngine();
|
||||
_aesInitializationVector = new byte[CRYPTO_BLOCK_SIZE];
|
||||
int rawLength = 2*_password.Length;
|
||||
int rawLength = 2 * _password.Length;
|
||||
byte[] rawPassword = new byte[rawLength + 8];
|
||||
byte[] passwordBytes = Encoding.UTF8.GetBytes(_password);
|
||||
for (int i = 0; i < _password.Length; i++)
|
||||
{
|
||||
rawPassword[i*2] = passwordBytes[i];
|
||||
rawPassword[i*2 + 1] = 0;
|
||||
rawPassword[i * 2] = passwordBytes[i];
|
||||
rawPassword[i * 2 + 1] = 0;
|
||||
}
|
||||
for (int i = 0; i < _salt.Length; i++)
|
||||
{
|
||||
@@ -68,11 +68,11 @@ namespace SharpCompress.Common.Rar
|
||||
{
|
||||
for (int j = 0; j < 4; j++)
|
||||
{
|
||||
aesKey[i*4 + j] = (byte)
|
||||
(((digest[i*4]*0x1000000) & 0xff000000 |
|
||||
(uint) ((digest[i*4 + 1]*0x10000) & 0xff0000) |
|
||||
(uint) ((digest[i*4 + 2]*0x100) & 0xff00) |
|
||||
(uint) (digest[i*4 + 3] & 0xff)) >> (j*8));
|
||||
aesKey[i * 4 + j] = (byte)
|
||||
(((digest[i * 4] * 0x1000000) & 0xff000000 |
|
||||
(uint)((digest[i * 4 + 1] * 0x10000) & 0xff0000) |
|
||||
(uint)((digest[i * 4 + 2] * 0x100) & 0xff00) |
|
||||
(uint)(digest[i * 4 + 3] & 0xff)) >> (j * 8));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,11 +87,11 @@ namespace SharpCompress.Common.Rar
|
||||
return rijndael;
|
||||
}
|
||||
|
||||
public byte[] ProcessBlock(byte[] cipherText)
|
||||
public byte[] ProcessBlock(ReadOnlySpan<byte> cipherText)
|
||||
{
|
||||
var plainText = new byte[CRYPTO_BLOCK_SIZE];
|
||||
Span<byte> plainText = stackalloc byte[CRYPTO_BLOCK_SIZE]; // 16 bytes
|
||||
byte[] decryptedBytes = new byte[CRYPTO_BLOCK_SIZE];
|
||||
_rijndael.ProcessBlock(cipherText, 0, plainText, 0);
|
||||
_rijndael.ProcessBlock(cipherText, plainText);
|
||||
|
||||
for (int j = 0; j < CRYPTO_BLOCK_SIZE; j++)
|
||||
{
|
||||
|
||||
@@ -39,20 +39,20 @@ namespace SharpCompress.Common.Rar
|
||||
switch (header.HeaderType)
|
||||
{
|
||||
case HeaderType.Mark:
|
||||
{
|
||||
lastMarkHeader = (MarkHeader)header;
|
||||
}
|
||||
{
|
||||
lastMarkHeader = (MarkHeader)header;
|
||||
}
|
||||
break;
|
||||
case HeaderType.Archive:
|
||||
{
|
||||
ArchiveHeader = (ArchiveHeader)header;
|
||||
}
|
||||
{
|
||||
ArchiveHeader = (ArchiveHeader)header;
|
||||
}
|
||||
break;
|
||||
case HeaderType.File:
|
||||
{
|
||||
var fh = (FileHeader)header;
|
||||
yield return CreateFilePart(lastMarkHeader!, fh);
|
||||
}
|
||||
{
|
||||
var fh = (FileHeader)header;
|
||||
yield return CreateFilePart(lastMarkHeader!, fh);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
// v3.13 incorrectly worked with empty folders
|
||||
// v4.07: Loop for skipping empty folders
|
||||
for (;;)
|
||||
for (; ; )
|
||||
{
|
||||
if (folderIndex >= _folders.Count)
|
||||
{
|
||||
|
||||
@@ -90,7 +90,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
private void WaitAttribute(BlockType attribute)
|
||||
{
|
||||
for (;;)
|
||||
for (; ; )
|
||||
{
|
||||
BlockType? type = ReadId();
|
||||
if (type == attribute)
|
||||
@@ -452,7 +452,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
#endif
|
||||
|
||||
BlockType? type;
|
||||
for (;;)
|
||||
for (; ; )
|
||||
{
|
||||
type = ReadId();
|
||||
if (type == BlockType.End)
|
||||
@@ -508,7 +508,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
int index = 0;
|
||||
for (int i = 0; i < numFolders; i++)
|
||||
{
|
||||
var f = new CFolder {_firstPackStreamId = index};
|
||||
var f = new CFolder { _firstPackStreamId = index };
|
||||
folders.Add(f);
|
||||
GetNextFolderItem(f);
|
||||
index += f._packStreams.Count;
|
||||
@@ -539,7 +539,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
#endif
|
||||
}
|
||||
|
||||
for (;;)
|
||||
for (; ; )
|
||||
{
|
||||
BlockType? type = ReadId();
|
||||
if (type == BlockType.End)
|
||||
@@ -580,7 +580,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
numUnpackStreamsInFolders = null;
|
||||
|
||||
BlockType? type;
|
||||
for (;;)
|
||||
for (; ; )
|
||||
{
|
||||
type = ReadId();
|
||||
if (type == BlockType.NumUnpackStream)
|
||||
@@ -602,7 +602,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
if (type == BlockType.Crc || type == BlockType.Size)
|
||||
if (type is BlockType.Crc or BlockType.Size)
|
||||
{
|
||||
break;
|
||||
}
|
||||
@@ -672,7 +672,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
digests = null;
|
||||
|
||||
for (;;)
|
||||
for (; ; )
|
||||
{
|
||||
if (type == BlockType.Crc)
|
||||
{
|
||||
@@ -755,7 +755,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
unpackSizes = null;
|
||||
digests = null;
|
||||
|
||||
for (;;)
|
||||
for (; ; )
|
||||
{
|
||||
switch (ReadId())
|
||||
{
|
||||
@@ -791,22 +791,14 @@ namespace SharpCompress.Common.SevenZip
|
||||
#endif
|
||||
try
|
||||
{
|
||||
long dataStartPos;
|
||||
List<long> packSizes;
|
||||
List<uint?> packCrCs;
|
||||
List<CFolder> folders;
|
||||
List<int> numUnpackStreamsInFolders;
|
||||
List<long> unpackSizes;
|
||||
List<uint?> digests;
|
||||
|
||||
ReadStreamsInfo(null,
|
||||
out dataStartPos,
|
||||
out packSizes,
|
||||
out packCrCs,
|
||||
out folders,
|
||||
out numUnpackStreamsInFolders,
|
||||
out unpackSizes,
|
||||
out digests);
|
||||
out long dataStartPos,
|
||||
out List<long> packSizes,
|
||||
out List<uint?> packCrCs,
|
||||
out List<CFolder> folders,
|
||||
out List<int> numUnpackStreamsInFolders,
|
||||
out List<long> unpackSizes,
|
||||
out List<uint?> digests);
|
||||
|
||||
dataStartPos += baseOffset;
|
||||
|
||||
@@ -934,7 +926,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
BitVector antiFileVector = null;
|
||||
int numEmptyStreams = 0;
|
||||
|
||||
for (;;)
|
||||
for (; ; )
|
||||
{
|
||||
type = ReadId();
|
||||
if (type == BlockType.End)
|
||||
@@ -969,7 +961,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
#if DEBUG
|
||||
Log.Write("WinAttributes:");
|
||||
#endif
|
||||
ReadAttributeVector(dataVector, numFiles, delegate(int i, uint? attr)
|
||||
ReadAttributeVector(dataVector, numFiles, delegate (int i, uint? attr)
|
||||
{
|
||||
// Some third party implementations established an unofficial extension
|
||||
// of the 7z archive format by placing posix file attributes in the high
|
||||
@@ -1057,7 +1049,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
#if DEBUG
|
||||
Log.Write("StartPos:");
|
||||
#endif
|
||||
ReadNumberVector(dataVector, numFiles, delegate(int i, long? startPos)
|
||||
ReadNumberVector(dataVector, numFiles, delegate (int i, long? startPos)
|
||||
{
|
||||
db._files[i].StartPos = startPos;
|
||||
#if DEBUG
|
||||
@@ -1072,7 +1064,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
#if DEBUG
|
||||
Log.Write("CTime:");
|
||||
#endif
|
||||
ReadDateTimeVector(dataVector, numFiles, delegate(int i, DateTime? time)
|
||||
ReadDateTimeVector(dataVector, numFiles, delegate (int i, DateTime? time)
|
||||
{
|
||||
db._files[i].CTime = time;
|
||||
#if DEBUG
|
||||
@@ -1087,7 +1079,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
#if DEBUG
|
||||
Log.Write("ATime:");
|
||||
#endif
|
||||
ReadDateTimeVector(dataVector, numFiles, delegate(int i, DateTime? time)
|
||||
ReadDateTimeVector(dataVector, numFiles, delegate (int i, DateTime? time)
|
||||
{
|
||||
db._files[i].ATime = time;
|
||||
#if DEBUG
|
||||
@@ -1102,7 +1094,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
#if DEBUG
|
||||
Log.Write("MTime:");
|
||||
#endif
|
||||
ReadDateTimeVector(dataVector, numFiles, delegate(int i, DateTime? time)
|
||||
ReadDateTimeVector(dataVector, numFiles, delegate (int i, DateTime? time)
|
||||
{
|
||||
db._files[i].MTime = time;
|
||||
#if DEBUG
|
||||
@@ -1445,8 +1437,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
private Stream GetCachedDecoderStream(ArchiveDatabase db, int folderIndex)
|
||||
{
|
||||
Stream s;
|
||||
if (!_cachedStreams.TryGetValue(folderIndex, out s))
|
||||
if (!_cachedStreams.TryGetValue(folderIndex, out Stream s))
|
||||
{
|
||||
CFolder folderInfo = db._folders[folderIndex];
|
||||
int packStreamIndex = db._folders[folderIndex]._firstPackStreamId;
|
||||
@@ -1494,7 +1485,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
int numItems = allFilesMode
|
||||
? db._files.Count
|
||||
: indices.Length;
|
||||
|
||||
|
||||
if (numItems == 0)
|
||||
{
|
||||
return;
|
||||
@@ -1526,6 +1517,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
}
|
||||
}
|
||||
|
||||
byte[] buffer = null;
|
||||
foreach (CExtractFolderInfo efi in extractFolderInfoVector)
|
||||
{
|
||||
int startIndex;
|
||||
@@ -1562,8 +1554,8 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
Stream s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes,
|
||||
folderInfo, db.PasswordProvider);
|
||||
byte[] buffer = new byte[4 << 10];
|
||||
for (;;)
|
||||
buffer ??= new byte[4 << 10];
|
||||
for (; ; )
|
||||
{
|
||||
int processed = s.Read(buffer, 0, buffer.Length);
|
||||
if (processed == 0)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
internal struct CMethodId
|
||||
internal readonly struct CMethodId
|
||||
{
|
||||
public const ulong K_COPY_ID = 0;
|
||||
public const ulong K_LZMA_ID = 0x030101;
|
||||
@@ -24,9 +24,9 @@
|
||||
return _id.GetHashCode();
|
||||
}
|
||||
|
||||
public override bool Equals(object obj)
|
||||
public override bool Equals(object? obj)
|
||||
{
|
||||
return obj is CMethodId && (CMethodId)obj == this;
|
||||
return obj is CMethodId other && Equals(other);
|
||||
}
|
||||
|
||||
public bool Equals(CMethodId other)
|
||||
|
||||
@@ -161,7 +161,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
int ending = Offset;
|
||||
|
||||
for (;;)
|
||||
for (; ; )
|
||||
{
|
||||
if (ending + 2 > _ending)
|
||||
{
|
||||
|
||||
@@ -32,7 +32,7 @@ namespace SharpCompress.Common.SevenZip
|
||||
|
||||
public override DateTime? ArchivedTime => null;
|
||||
|
||||
public override bool IsEncrypted => false;
|
||||
public override bool IsEncrypted => FilePart.IsEncrypted;
|
||||
|
||||
public override bool IsDirectory => FilePart.Header.IsDir;
|
||||
|
||||
|
||||
@@ -84,23 +84,25 @@ namespace SharpCompress.Common.SevenZip
|
||||
{
|
||||
var coder = Folder!._coders.First();
|
||||
switch (coder._methodId._id)
|
||||
{
|
||||
{
|
||||
case K_LZMA:
|
||||
case K_LZMA2:
|
||||
{
|
||||
return CompressionType.LZMA;
|
||||
}
|
||||
{
|
||||
return CompressionType.LZMA;
|
||||
}
|
||||
case K_PPMD:
|
||||
{
|
||||
return CompressionType.PPMd;
|
||||
}
|
||||
{
|
||||
return CompressionType.PPMd;
|
||||
}
|
||||
case K_B_ZIP2:
|
||||
{
|
||||
return CompressionType.BZip2;
|
||||
}
|
||||
{
|
||||
return CompressionType.BZip2;
|
||||
}
|
||||
default:
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
}
|
||||
|
||||
internal bool IsEncrypted => Folder!._coders.FindIndex(c => c._methodId._id == CMethodId.K_AES_ID) != -1;
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,7 @@ using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
internal class TarHeader
|
||||
internal sealed class TarHeader
|
||||
{
|
||||
internal static readonly DateTime EPOCH = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
@@ -97,7 +97,7 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
numPaddingBytes = BLOCK_SIZE;
|
||||
}
|
||||
output.Write(new byte[numPaddingBytes], 0, numPaddingBytes);
|
||||
output.Write(stackalloc byte[numPaddingBytes]);
|
||||
}
|
||||
|
||||
internal bool Read(BinaryReader reader)
|
||||
@@ -260,10 +260,16 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
return Convert.ToInt64(s);
|
||||
}
|
||||
|
||||
|
||||
private static readonly byte[] eightSpaces = {
|
||||
(byte)' ', (byte)' ', (byte)' ', (byte)' ',
|
||||
(byte)' ', (byte)' ', (byte)' ', (byte)' '
|
||||
};
|
||||
|
||||
internal static int RecalculateChecksum(byte[] buf)
|
||||
{
|
||||
// Set default value for checksum. That is 8 spaces.
|
||||
Encoding.UTF8.GetBytes(" ").CopyTo(buf, 148);
|
||||
eightSpaces.CopyTo(buf, 148);
|
||||
|
||||
// Calculate checksum
|
||||
int headerChecksum = 0;
|
||||
@@ -276,7 +282,7 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
|
||||
internal static int RecalculateAltChecksum(byte[] buf)
|
||||
{
|
||||
Encoding.UTF8.GetBytes(" ").CopyTo(buf, 148);
|
||||
eightSpaces.CopyTo(buf, 148);
|
||||
int headerChecksum = 0;
|
||||
foreach (byte b in buf)
|
||||
{
|
||||
|
||||
@@ -32,7 +32,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
byte[] name = reader.ReadBytes(nameLength);
|
||||
byte[] extra = reader.ReadBytes(extraLength);
|
||||
byte[] comment = reader.ReadBytes(commentLength);
|
||||
|
||||
|
||||
// According to .ZIP File Format Specification
|
||||
//
|
||||
// For example: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
|
||||
@@ -40,7 +40,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
// Bit 11: Language encoding flag (EFS). If this bit is set,
|
||||
// the filename and comment fields for this file
|
||||
// MUST be encoded using UTF-8. (see APPENDIX D)
|
||||
|
||||
|
||||
if (Flags.HasFlag(HeaderFlags.Efs))
|
||||
{
|
||||
Name = ArchiveEncoding.DecodeUTF8(name);
|
||||
@@ -63,6 +63,8 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
var zip64ExtraData = Extra.OfType<Zip64ExtendedInformationExtraField>().FirstOrDefault();
|
||||
if (zip64ExtraData != null)
|
||||
{
|
||||
zip64ExtraData.Process(UncompressedSize, CompressedSize, RelativeOffsetOfEntryHeader, DiskNumberStart);
|
||||
|
||||
if (CompressedSize == uint.MaxValue)
|
||||
{
|
||||
CompressedSize = zip64ExtraData.CompressedSize;
|
||||
|
||||
@@ -24,7 +24,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
ushort extraLength = reader.ReadUInt16();
|
||||
byte[] name = reader.ReadBytes(nameLength);
|
||||
byte[] extra = reader.ReadBytes(extraLength);
|
||||
|
||||
|
||||
// According to .ZIP File Format Specification
|
||||
//
|
||||
// For example: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
|
||||
@@ -32,7 +32,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
// Bit 11: Language encoding flag (EFS). If this bit is set,
|
||||
// the filename and comment fields for this file
|
||||
// MUST be encoded using UTF-8. (see APPENDIX D)
|
||||
|
||||
|
||||
if (Flags.HasFlag(HeaderFlags.Efs))
|
||||
{
|
||||
Name = ArchiveEncoding.DecodeUTF8(name);
|
||||
@@ -41,7 +41,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
Name = ArchiveEncoding.Decode(name);
|
||||
}
|
||||
|
||||
|
||||
LoadExtra(extra);
|
||||
|
||||
var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField);
|
||||
@@ -53,6 +53,8 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
var zip64ExtraData = Extra.OfType<Zip64ExtendedInformationExtraField>().FirstOrDefault();
|
||||
if (zip64ExtraData != null)
|
||||
{
|
||||
zip64ExtraData.Process(UncompressedSize, CompressedSize, 0, 0);
|
||||
|
||||
if (CompressedSize == uint.MaxValue)
|
||||
{
|
||||
CompressedSize = zip64ExtraData.CompressedSize;
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#nullable disable
|
||||
|
||||
using System;
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.Text;
|
||||
|
||||
@@ -20,13 +18,25 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
|
||||
internal class ExtraData
|
||||
{
|
||||
internal ExtraDataType Type { get; set; }
|
||||
internal ushort Length { get; set; }
|
||||
internal byte[] DataBytes { get; set; }
|
||||
public ExtraData(ExtraDataType type, ushort length, byte[] dataBytes)
|
||||
{
|
||||
Type = type;
|
||||
Length = length;
|
||||
DataBytes = dataBytes;
|
||||
}
|
||||
|
||||
internal ExtraDataType Type { get; }
|
||||
internal ushort Length { get; }
|
||||
internal byte[] DataBytes { get; }
|
||||
}
|
||||
|
||||
internal class ExtraUnicodePathExtraField : ExtraData
|
||||
internal sealed class ExtraUnicodePathExtraField : ExtraData
|
||||
{
|
||||
public ExtraUnicodePathExtraField(ExtraDataType type, ushort length, byte[] dataBytes)
|
||||
: base(type, length, dataBytes)
|
||||
{
|
||||
}
|
||||
|
||||
internal byte Version => DataBytes[0];
|
||||
|
||||
internal byte[] NameCrc32
|
||||
@@ -51,70 +61,79 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
}
|
||||
}
|
||||
|
||||
internal class Zip64ExtendedInformationExtraField : ExtraData
|
||||
internal sealed class Zip64ExtendedInformationExtraField : ExtraData
|
||||
{
|
||||
|
||||
public Zip64ExtendedInformationExtraField(ExtraDataType type, ushort length, byte[] dataBytes)
|
||||
: base(type, length, dataBytes)
|
||||
{
|
||||
Type = type;
|
||||
Length = length;
|
||||
DataBytes = dataBytes;
|
||||
Process();
|
||||
}
|
||||
|
||||
//From the spec values are only in the extradata if the standard
|
||||
//value is set to 0xFFFF, but if one of the sizes are present, both are.
|
||||
//Hence if length == 4 volume only
|
||||
// if length == 8 offset only
|
||||
// if length == 12 offset + volume
|
||||
// if length == 16 sizes only
|
||||
// if length == 20 sizes + volume
|
||||
// if length == 24 sizes + offset
|
||||
// if length == 28 everything.
|
||||
//It is unclear how many of these are used in the wild.
|
||||
|
||||
private void Process()
|
||||
// From the spec, values are only in the extradata if the standard
|
||||
// value is set to 0xFFFFFFFF (or 0xFFFF for the Disk Start Number).
|
||||
// Values, if present, must appear in the following order:
|
||||
// - Original Size
|
||||
// - Compressed Size
|
||||
// - Relative Header Offset
|
||||
// - Disk Start Number
|
||||
public void Process(long uncompressedFileSize, long compressedFileSize, long relativeHeaderOffset, ushort diskNumber)
|
||||
{
|
||||
switch (DataBytes.Length)
|
||||
var bytesRequired = ((uncompressedFileSize == uint.MaxValue) ? 8 : 0)
|
||||
+ ((compressedFileSize == uint.MaxValue) ? 8 : 0)
|
||||
+ ((relativeHeaderOffset == uint.MaxValue) ? 8 : 0)
|
||||
+ ((diskNumber == ushort.MaxValue) ? 4 : 0);
|
||||
var currentIndex = 0;
|
||||
|
||||
if (bytesRequired > DataBytes.Length)
|
||||
{
|
||||
case 4:
|
||||
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes);
|
||||
return;
|
||||
case 8:
|
||||
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
|
||||
return;
|
||||
case 12:
|
||||
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
|
||||
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes.AsSpan(8));
|
||||
return;
|
||||
case 16:
|
||||
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
|
||||
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(8));
|
||||
return;
|
||||
case 20:
|
||||
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
|
||||
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(8));
|
||||
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes.AsSpan(16));
|
||||
return;
|
||||
case 24:
|
||||
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
|
||||
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(8));
|
||||
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(16));
|
||||
return;
|
||||
case 28:
|
||||
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
|
||||
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(8));
|
||||
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(16));
|
||||
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes.AsSpan(24));
|
||||
return;
|
||||
default:
|
||||
throw new ArchiveException("Unexpected size of of Zip64 extended information extra field");
|
||||
throw new ArchiveException("Zip64 extended information extra field is not large enough for the required information");
|
||||
}
|
||||
|
||||
if (uncompressedFileSize == uint.MaxValue)
|
||||
{
|
||||
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(currentIndex));
|
||||
currentIndex += 8;
|
||||
}
|
||||
|
||||
if (compressedFileSize == uint.MaxValue)
|
||||
{
|
||||
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(currentIndex));
|
||||
currentIndex += 8;
|
||||
}
|
||||
|
||||
if (relativeHeaderOffset == uint.MaxValue)
|
||||
{
|
||||
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(currentIndex));
|
||||
currentIndex += 8;
|
||||
}
|
||||
|
||||
if (diskNumber == ushort.MaxValue)
|
||||
{
|
||||
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes.AsSpan(currentIndex));
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Uncompressed file size. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
|
||||
/// original entry header had a corresponding 0xFFFFFFFF value.
|
||||
/// </summary>
|
||||
public long UncompressedSize { get; private set; }
|
||||
|
||||
/// <summary>
|
||||
/// Compressed file size. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
|
||||
/// original entry header had a corresponding 0xFFFFFFFF value.
|
||||
/// </summary>
|
||||
public long CompressedSize { get; private set; }
|
||||
|
||||
/// <summary>
|
||||
/// Relative offset of the entry header. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
|
||||
/// original entry header had a corresponding 0xFFFFFFFF value.
|
||||
/// </summary>
|
||||
public long RelativeOffsetOfEntryHeader { get; private set; }
|
||||
|
||||
/// <summary>
|
||||
/// Volume number. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
|
||||
/// original entry header had a corresponding 0xFFFF value.
|
||||
/// </summary>
|
||||
public uint VolumeNumber { get; private set; }
|
||||
}
|
||||
|
||||
@@ -122,30 +141,12 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
internal static ExtraData Create(ExtraDataType type, ushort length, byte[] extraData)
|
||||
{
|
||||
switch (type)
|
||||
return type switch
|
||||
{
|
||||
case ExtraDataType.UnicodePathExtraField:
|
||||
return new ExtraUnicodePathExtraField
|
||||
{
|
||||
Type = type,
|
||||
Length = length,
|
||||
DataBytes = extraData
|
||||
};
|
||||
case ExtraDataType.Zip64ExtendedInformationExtraField:
|
||||
return new Zip64ExtendedInformationExtraField
|
||||
(
|
||||
type,
|
||||
length,
|
||||
extraData
|
||||
);
|
||||
default:
|
||||
return new ExtraData
|
||||
{
|
||||
Type = type,
|
||||
Length = length,
|
||||
DataBytes = extraData
|
||||
};
|
||||
}
|
||||
ExtraDataType.UnicodePathExtraField => new ExtraUnicodePathExtraField(type, length, extraData),
|
||||
ExtraDataType.Zip64ExtendedInformationExtraField => new Zip64ExtendedInformationExtraField(type, length, extraData),
|
||||
_ => new ExtraData(type, length, extraData)
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
get
|
||||
{
|
||||
if (Name.EndsWith("/"))
|
||||
if (Name.EndsWith('/'))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@@ -28,7 +28,7 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
//.NET Framework 4.5 : System.IO.Compression::CreateFromDirectory() probably writes backslashes to headers
|
||||
return CompressedSize == 0
|
||||
&& UncompressedSize == 0
|
||||
&& Name.EndsWith("\\");
|
||||
&& Name.EndsWith('\\');
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,6 +105,6 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
|
||||
internal ZipFilePart Part { get; set; }
|
||||
|
||||
internal bool IsZip64 => CompressedSize == uint.MaxValue;
|
||||
internal bool IsZip64 => CompressedSize >= uint.MaxValue;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ namespace SharpCompress.Common.Zip
|
||||
internal class PkwareTraditionalEncryptionData
|
||||
{
|
||||
private static readonly CRC32 CRC32 = new CRC32();
|
||||
private readonly UInt32[] _keys = {0x12345678, 0x23456789, 0x34567890};
|
||||
private readonly UInt32[] _keys = { 0x12345678, 0x23456789, 0x34567890 };
|
||||
private readonly ArchiveEncoding _archiveEncoding;
|
||||
|
||||
private PkwareTraditionalEncryptionData(string password, ArchiveEncoding archiveEncoding)
|
||||
|
||||
@@ -8,7 +8,10 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
internal sealed class SeekableZipHeaderFactory : ZipHeaderFactory
|
||||
{
|
||||
private const int MAX_ITERATIONS_FOR_DIRECTORY_HEADER = 4096;
|
||||
private const int MINIMUM_EOCD_LENGTH = 22;
|
||||
private const int ZIP64_EOCD_LENGTH = 20;
|
||||
// Comment may be within 64kb + structure 22 bytes
|
||||
private const int MAX_SEARCH_LENGTH_FOR_EOCD = 65557;
|
||||
private bool _zip64;
|
||||
|
||||
internal SeekableZipHeaderFactory(string? password, ArchiveEncoding archiveEncoding)
|
||||
@@ -20,14 +23,24 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
var reader = new BinaryReader(stream);
|
||||
|
||||
SeekBackToHeader(stream, reader, DIRECTORY_END_HEADER_BYTES);
|
||||
SeekBackToHeader(stream, reader);
|
||||
|
||||
var eocd_location = stream.Position;
|
||||
var entry = new DirectoryEndHeader();
|
||||
entry.Read(reader);
|
||||
|
||||
if (entry.IsZip64)
|
||||
{
|
||||
_zip64 = true;
|
||||
SeekBackToHeader(stream, reader, ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR);
|
||||
|
||||
// ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR should be before the EOCD
|
||||
stream.Seek(eocd_location - ZIP64_EOCD_LENGTH - 4, SeekOrigin.Begin);
|
||||
uint zip64_locator = reader.ReadUInt32();
|
||||
if( zip64_locator != ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR )
|
||||
{
|
||||
throw new ArchiveException("Failed to locate the Zip64 Directory Locator");
|
||||
}
|
||||
|
||||
var zip64Locator = new Zip64DirectoryEndLocatorHeader();
|
||||
zip64Locator.Read(reader);
|
||||
|
||||
@@ -73,27 +86,49 @@ namespace SharpCompress.Common.Zip
|
||||
}
|
||||
}
|
||||
|
||||
private static void SeekBackToHeader(Stream stream, BinaryReader reader, uint headerSignature)
|
||||
private static bool IsMatch( byte[] haystack, int position, byte[] needle)
|
||||
{
|
||||
long offset = 0;
|
||||
uint signature;
|
||||
int iterationCount = 0;
|
||||
do
|
||||
for( int i = 0; i < needle.Length; i++ )
|
||||
{
|
||||
if ((stream.Length + offset) - 4 < 0)
|
||||
if( haystack[ position + i ] != needle[ i ] )
|
||||
{
|
||||
throw new ArchiveException("Failed to locate the Zip Header");
|
||||
}
|
||||
stream.Seek(offset - 4, SeekOrigin.End);
|
||||
signature = reader.ReadUInt32();
|
||||
offset--;
|
||||
iterationCount++;
|
||||
if (iterationCount > MAX_ITERATIONS_FOR_DIRECTORY_HEADER)
|
||||
{
|
||||
throw new ArchiveException("Could not find Zip file Directory at the end of the file. File may be corrupted.");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
while (signature != headerSignature);
|
||||
|
||||
return true;
|
||||
}
|
||||
private static void SeekBackToHeader(Stream stream, BinaryReader reader)
|
||||
{
|
||||
// Minimum EOCD length
|
||||
if (stream.Length < MINIMUM_EOCD_LENGTH)
|
||||
{
|
||||
throw new ArchiveException("Could not find Zip file Directory at the end of the file. File may be corrupted.");
|
||||
}
|
||||
|
||||
int len = stream.Length < MAX_SEARCH_LENGTH_FOR_EOCD ? (int)stream.Length : MAX_SEARCH_LENGTH_FOR_EOCD;
|
||||
// We search for marker in reverse to find the first occurance
|
||||
byte[] needle = { 0x06, 0x05, 0x4b, 0x50 };
|
||||
|
||||
stream.Seek(-len, SeekOrigin.End);
|
||||
|
||||
byte[] seek = reader.ReadBytes(len);
|
||||
|
||||
// Search in reverse
|
||||
Array.Reverse(seek);
|
||||
|
||||
var max_search_area = len - MINIMUM_EOCD_LENGTH;
|
||||
|
||||
for( int pos_from_end = 0; pos_from_end < max_search_area; ++pos_from_end)
|
||||
{
|
||||
if( IsMatch(seek, pos_from_end, needle) )
|
||||
{
|
||||
stream.Seek(-pos_from_end, SeekOrigin.End);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
throw new ArchiveException("Failed to locate the Zip Header");
|
||||
}
|
||||
|
||||
internal LocalEntryHeader GetLocalHeader(Stream stream, DirectoryEntryHeader directoryEntryHeader)
|
||||
|
||||
@@ -42,7 +42,7 @@ namespace SharpCompress.Common.Zip
|
||||
if (Header.HasData && !Skipped)
|
||||
{
|
||||
_decompressionStream ??= GetCompressedStream();
|
||||
|
||||
|
||||
_decompressionStream.Skip();
|
||||
|
||||
if (_decompressionStream is DeflateStream deflateStream)
|
||||
|
||||
@@ -49,7 +49,10 @@ namespace SharpCompress.Common.Zip
|
||||
_lastEntryHeader = null;
|
||||
uint headerBytes = reader.ReadUInt32();
|
||||
header = ReadHeader(headerBytes, reader);
|
||||
if (header is null) { yield break; }
|
||||
if (header is null)
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
|
||||
//entry could be zero bytes so we need to know that.
|
||||
if (header.ZipHeaderType == ZipHeaderType.LocalEntry)
|
||||
@@ -57,11 +60,11 @@ namespace SharpCompress.Common.Zip
|
||||
var local_header = ((LocalEntryHeader)header);
|
||||
|
||||
// If we have CompressedSize, there is data to be read
|
||||
if( local_header.CompressedSize > 0 )
|
||||
if (local_header.CompressedSize > 0)
|
||||
{
|
||||
header.HasData = true;
|
||||
} // Check if zip is streaming ( Length is 0 and is declared in PostDataDescriptor )
|
||||
else if( local_header.Flags.HasFlag(HeaderFlags.UsePostDataDescriptor) )
|
||||
else if (local_header.Flags.HasFlag(HeaderFlags.UsePostDataDescriptor))
|
||||
{
|
||||
bool isRecording = rewindableStream.IsRecording;
|
||||
if (!isRecording)
|
||||
|
||||
@@ -75,7 +75,7 @@ namespace SharpCompress.Common.Zip
|
||||
if (disposing)
|
||||
{
|
||||
//read out last 10 auth bytes
|
||||
var ten = new byte[10];
|
||||
Span<byte> ten = stackalloc byte[10];
|
||||
_stream.ReadFully(ten);
|
||||
_stream.Dispose();
|
||||
}
|
||||
|
||||
@@ -33,8 +33,10 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
private int KeySizeInBytes
|
||||
{
|
||||
get { return KeyLengthInBytes(_keySize);
|
||||
}
|
||||
get
|
||||
{
|
||||
return KeyLengthInBytes(_keySize);
|
||||
}
|
||||
}
|
||||
|
||||
internal static int KeyLengthInBytes(WinzipAesKeySize keySize)
|
||||
|
||||
@@ -27,33 +27,33 @@ namespace SharpCompress.Common.Zip
|
||||
switch (_filePart.Header.CompressionMethod)
|
||||
{
|
||||
case ZipCompressionMethod.BZip2:
|
||||
{
|
||||
return CompressionType.BZip2;
|
||||
}
|
||||
{
|
||||
return CompressionType.BZip2;
|
||||
}
|
||||
case ZipCompressionMethod.Deflate:
|
||||
{
|
||||
return CompressionType.Deflate;
|
||||
}
|
||||
{
|
||||
return CompressionType.Deflate;
|
||||
}
|
||||
case ZipCompressionMethod.Deflate64:
|
||||
{
|
||||
return CompressionType.Deflate64;
|
||||
}
|
||||
{
|
||||
return CompressionType.Deflate64;
|
||||
}
|
||||
case ZipCompressionMethod.LZMA:
|
||||
{
|
||||
return CompressionType.LZMA;
|
||||
}
|
||||
{
|
||||
return CompressionType.LZMA;
|
||||
}
|
||||
case ZipCompressionMethod.PPMd:
|
||||
{
|
||||
return CompressionType.PPMd;
|
||||
}
|
||||
{
|
||||
return CompressionType.PPMd;
|
||||
}
|
||||
case ZipCompressionMethod.None:
|
||||
{
|
||||
return CompressionType.None;
|
||||
}
|
||||
{
|
||||
return CompressionType.None;
|
||||
}
|
||||
default:
|
||||
{
|
||||
return CompressionType.Unknown;
|
||||
}
|
||||
{
|
||||
return CompressionType.Unknown;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,72 +60,72 @@ namespace SharpCompress.Common.Zip
|
||||
switch (method)
|
||||
{
|
||||
case ZipCompressionMethod.None:
|
||||
{
|
||||
return stream;
|
||||
}
|
||||
{
|
||||
return stream;
|
||||
}
|
||||
case ZipCompressionMethod.Deflate:
|
||||
{
|
||||
return new DeflateStream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
{
|
||||
return new DeflateStream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
case ZipCompressionMethod.Deflate64:
|
||||
{
|
||||
return new Deflate64Stream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
{
|
||||
return new Deflate64Stream(stream, CompressionMode.Decompress);
|
||||
}
|
||||
case ZipCompressionMethod.BZip2:
|
||||
{
|
||||
return new BZip2Stream(stream, CompressionMode.Decompress, false);
|
||||
}
|
||||
{
|
||||
return new BZip2Stream(stream, CompressionMode.Decompress, false);
|
||||
}
|
||||
case ZipCompressionMethod.LZMA:
|
||||
{
|
||||
if (FlagUtility.HasFlag(Header.Flags, HeaderFlags.Encrypted))
|
||||
{
|
||||
throw new NotSupportedException("LZMA with pkware encryption.");
|
||||
if (FlagUtility.HasFlag(Header.Flags, HeaderFlags.Encrypted))
|
||||
{
|
||||
throw new NotSupportedException("LZMA with pkware encryption.");
|
||||
}
|
||||
var reader = new BinaryReader(stream);
|
||||
reader.ReadUInt16(); //LZMA version
|
||||
var props = new byte[reader.ReadUInt16()];
|
||||
reader.Read(props, 0, props.Length);
|
||||
return new LzmaStream(props, stream,
|
||||
Header.CompressedSize > 0 ? Header.CompressedSize - 4 - props.Length : -1,
|
||||
FlagUtility.HasFlag(Header.Flags, HeaderFlags.Bit1)
|
||||
? -1
|
||||
: (long)Header.UncompressedSize);
|
||||
}
|
||||
var reader = new BinaryReader(stream);
|
||||
reader.ReadUInt16(); //LZMA version
|
||||
var props = new byte[reader.ReadUInt16()];
|
||||
reader.Read(props, 0, props.Length);
|
||||
return new LzmaStream(props, stream,
|
||||
Header.CompressedSize > 0 ? Header.CompressedSize - 4 - props.Length : -1,
|
||||
FlagUtility.HasFlag(Header.Flags, HeaderFlags.Bit1)
|
||||
? -1
|
||||
: (long)Header.UncompressedSize);
|
||||
}
|
||||
case ZipCompressionMethod.PPMd:
|
||||
{
|
||||
var props = new byte[2];
|
||||
stream.ReadFully(props);
|
||||
return new PpmdStream(new PpmdProperties(props), stream, false);
|
||||
}
|
||||
{
|
||||
Span<byte> props = stackalloc byte[2];
|
||||
stream.ReadFully(props);
|
||||
return new PpmdStream(new PpmdProperties(props), stream, false);
|
||||
}
|
||||
case ZipCompressionMethod.WinzipAes:
|
||||
{
|
||||
ExtraData data = Header.Extra.Where(x => x.Type == ExtraDataType.WinZipAes).SingleOrDefault();
|
||||
if (data is null)
|
||||
{
|
||||
throw new InvalidFormatException("No Winzip AES extra data found.");
|
||||
}
|
||||
if (data.Length != 7)
|
||||
{
|
||||
throw new InvalidFormatException("Winzip data length is not 7.");
|
||||
}
|
||||
ushort compressedMethod = BinaryPrimitives.ReadUInt16LittleEndian(data.DataBytes);
|
||||
ExtraData? data = Header.Extra.SingleOrDefault(x => x.Type == ExtraDataType.WinZipAes);
|
||||
if (data is null)
|
||||
{
|
||||
throw new InvalidFormatException("No Winzip AES extra data found.");
|
||||
}
|
||||
if (data.Length != 7)
|
||||
{
|
||||
throw new InvalidFormatException("Winzip data length is not 7.");
|
||||
}
|
||||
ushort compressedMethod = BinaryPrimitives.ReadUInt16LittleEndian(data.DataBytes);
|
||||
|
||||
if (compressedMethod != 0x01 && compressedMethod != 0x02)
|
||||
{
|
||||
throw new InvalidFormatException("Unexpected vendor version number for WinZip AES metadata");
|
||||
}
|
||||
if (compressedMethod != 0x01 && compressedMethod != 0x02)
|
||||
{
|
||||
throw new InvalidFormatException("Unexpected vendor version number for WinZip AES metadata");
|
||||
}
|
||||
|
||||
ushort vendorId = BinaryPrimitives.ReadUInt16LittleEndian(data.DataBytes.AsSpan(2));
|
||||
if (vendorId != 0x4541)
|
||||
{
|
||||
throw new InvalidFormatException("Unexpected vendor ID for WinZip AES metadata");
|
||||
ushort vendorId = BinaryPrimitives.ReadUInt16LittleEndian(data.DataBytes.AsSpan(2));
|
||||
if (vendorId != 0x4541)
|
||||
{
|
||||
throw new InvalidFormatException("Unexpected vendor ID for WinZip AES metadata");
|
||||
}
|
||||
return CreateDecompressionStream(stream, (ZipCompressionMethod)BinaryPrimitives.ReadUInt16LittleEndian(data.DataBytes.AsSpan(5)));
|
||||
}
|
||||
return CreateDecompressionStream(stream, (ZipCompressionMethod)BinaryPrimitives.ReadUInt16LittleEndian(data.DataBytes.AsSpan(5)));
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw new NotSupportedException("CompressionMethod: " + Header.CompressionMethod);
|
||||
}
|
||||
{
|
||||
throw new NotSupportedException("CompressionMethod: " + Header.CompressionMethod);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,23 +159,23 @@ namespace SharpCompress.Common.Zip
|
||||
case ZipCompressionMethod.BZip2:
|
||||
case ZipCompressionMethod.LZMA:
|
||||
case ZipCompressionMethod.PPMd:
|
||||
{
|
||||
return new PkwareTraditionalCryptoStream(plainStream, Header.ComposeEncryptionData(plainStream), CryptoMode.Decrypt);
|
||||
}
|
||||
{
|
||||
return new PkwareTraditionalCryptoStream(plainStream, Header.ComposeEncryptionData(plainStream), CryptoMode.Decrypt);
|
||||
}
|
||||
|
||||
case ZipCompressionMethod.WinzipAes:
|
||||
{
|
||||
if (Header.WinzipAesEncryptionData != null)
|
||||
{
|
||||
return new WinzipAesCryptoStream(plainStream, Header.WinzipAesEncryptionData, Header.CompressedSize - 10);
|
||||
if (Header.WinzipAesEncryptionData != null)
|
||||
{
|
||||
return new WinzipAesCryptoStream(plainStream, Header.WinzipAesEncryptionData, Header.CompressedSize - 10);
|
||||
}
|
||||
return plainStream;
|
||||
}
|
||||
return plainStream;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
throw new ArgumentOutOfRangeException();
|
||||
}
|
||||
{
|
||||
throw new ArgumentOutOfRangeException();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,20 +35,20 @@ namespace SharpCompress.Common.Zip
|
||||
switch (headerBytes)
|
||||
{
|
||||
case ENTRY_HEADER_BYTES:
|
||||
{
|
||||
var entryHeader = new LocalEntryHeader(_archiveEncoding);
|
||||
entryHeader.Read(reader);
|
||||
LoadHeader(entryHeader, reader.BaseStream);
|
||||
{
|
||||
var entryHeader = new LocalEntryHeader(_archiveEncoding);
|
||||
entryHeader.Read(reader);
|
||||
LoadHeader(entryHeader, reader.BaseStream);
|
||||
|
||||
_lastEntryHeader = entryHeader;
|
||||
return entryHeader;
|
||||
}
|
||||
_lastEntryHeader = entryHeader;
|
||||
return entryHeader;
|
||||
}
|
||||
case DIRECTORY_START_HEADER_BYTES:
|
||||
{
|
||||
var entry = new DirectoryEntryHeader(_archiveEncoding);
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
{
|
||||
var entry = new DirectoryEntryHeader(_archiveEncoding);
|
||||
entry.Read(reader);
|
||||
return entry;
|
||||
}
|
||||
case POST_DATA_DESCRIPTOR:
|
||||
{
|
||||
if (FlagUtility.HasFlag(_lastEntryHeader!.Flags, HeaderFlags.UsePostDataDescriptor))
|
||||
@@ -129,7 +129,7 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
if (entryHeader.CompressionMethod == ZipCompressionMethod.WinzipAes)
|
||||
{
|
||||
ExtraData data = entryHeader.Extra.SingleOrDefault(x => x.Type == ExtraDataType.WinZipAes);
|
||||
ExtraData? data = entryHeader.Extra.SingleOrDefault(x => x.Type == ExtraDataType.WinZipAes);
|
||||
if (data != null)
|
||||
{
|
||||
var keySize = (WinzipAesKeySize)data.DataBytes[4];
|
||||
|
||||
@@ -52,32 +52,24 @@ namespace SharpCompress.Compressors.ADC
|
||||
|
||||
private static int GetChunkSize(byte byt)
|
||||
{
|
||||
switch (GetChunkType(byt))
|
||||
return GetChunkType(byt) switch
|
||||
{
|
||||
case PLAIN:
|
||||
return (byt & 0x7F) + 1;
|
||||
case TWO_BYTE:
|
||||
return ((byt & 0x3F) >> 2) + 3;
|
||||
case THREE_BYTE:
|
||||
return (byt & 0x3F) + 4;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
PLAIN => (byt & 0x7F) + 1,
|
||||
TWO_BYTE => ((byt & 0x3F) >> 2) + 3,
|
||||
THREE_BYTE => (byt & 0x3F) + 4,
|
||||
_ => -1,
|
||||
};
|
||||
}
|
||||
|
||||
private static int GetOffset(ReadOnlySpan<byte> chunk)
|
||||
{
|
||||
switch (GetChunkType(chunk[0]))
|
||||
return GetChunkType(chunk[0]) switch
|
||||
{
|
||||
case PLAIN:
|
||||
return 0;
|
||||
case TWO_BYTE:
|
||||
return ((chunk[0] & 0x03) << 8) + chunk[1];
|
||||
case THREE_BYTE:
|
||||
return (chunk[1] << 8) + chunk[2];
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
PLAIN => 0,
|
||||
TWO_BYTE => ((chunk[0] & 0x03) << 8) + chunk[1],
|
||||
THREE_BYTE => (chunk[1] << 8) + chunk[2],
|
||||
_ => -1,
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user