Compare commits

...

68 Commits
0.27 ... 0.31

Author SHA1 Message Date
Adam Hathcock
eee8309db8 Mark for 0.31 2022-03-30 12:03:03 +01:00
Adam Hathcock
155cfab792 Merge pull request #651 from loop-evgeny/evgeny-upgrade-adler32
Update Adler32 from ImageSharp v2.1.0
2022-03-30 12:01:32 +01:00
Evgeny Morozov
e1c36afdec Update Adler32 from ImageSharp v2.1.0
Adler32.cs is taken from 09b2cdb83a with minimal change to make it build as part of SharpCompress. Fixes https://github.com/adamhathcock/sharpcompress/issues/650 and https://github.com/adamhathcock/sharpcompress/issues/645.
2022-03-30 12:17:14 +02:00
Adam Hathcock
6b0d6a41ca Merge pull request #638 from Nanook/rar2MultiWithTest
Rar2 fix with new unit tests that fail on previous build.
2022-02-16 08:33:12 +00:00
Craig
dab157bb71 Rar2 fix with new unit tests that fail on previous build. 2022-02-15 16:24:22 +00:00
Adam Hathcock
8d17d09455 Merge pull request #624 from adamhathcock/issue-617
Add test and probable fix for Issue 617
2021-11-22 09:20:15 +00:00
Adam Hathcock
05208ccd9b Add test and probable fix for Issue 617 2021-11-22 08:40:40 +00:00
Adam Hathcock
a1e7c0068d Merge pull request #622 from adamhathcock/net461-tests
Net461 tests
2021-10-02 15:32:20 +01:00
Adam Hathcock
e6bec19946 Mark for 0.30 2021-10-02 15:29:22 +01:00
Adam Hathcock
ec2be2869f Fix whitespace from dotnet format 2021-10-02 15:29:03 +01:00
Adam Hathcock
ce5432ed73 Fix tests for multi-targetting 2021-10-02 15:25:43 +01:00
Adam Hathcock
b6e0ad89ce Remove duplicated artifact step 2021-10-02 15:21:05 +01:00
Adam Hathcock
2745bfa19b Minor SDK update 2021-10-02 15:19:51 +01:00
Adam Hathcock
3cdc4b38a6 Test 461 on github actions 2021-10-02 15:19:13 +01:00
Adam Hathcock
fc1ca808d7 Merge pull request #621 from inthemedium/master
Add net461 target to clean up issues with system.* nuget dependencies
2021-10-02 15:08:18 +01:00
Jeff Tyson
6983e66037 Fix preprocessor condition 2021-10-01 16:34:00 +00:00
Jeff Tyson
01f7336d09 Based on docs, the target should bet net461 2021-09-29 22:04:47 +00:00
Jeff Tyson
1561bba538 Add net462 target to clean up issues with system.* nuget dependencies 2021-09-29 21:55:11 +00:00
Adam Hathcock
3ecf8a5e0c Merge pull request #616 from amosonn/patch-1
Fix for chunked read for ZLibBaseStream
2021-09-27 09:14:58 +01:00
Adam Hathcock
e2095fc416 Merge branch 'master' into patch-1 2021-09-27 09:08:58 +01:00
Amos Onn
8398d40106 Fix #615 2021-09-14 22:02:18 +02:00
Amos Onn
134fa8892f Test for bug #615 2021-09-14 21:55:05 +02:00
Adam Hathcock
ea5c8dc063 Merge pull request #614 from adamhathcock/ensure-dest-dir-exists
Ensure destination directory exists.
2021-09-12 08:56:42 +01:00
Adam Hathcock
0209d00164 Minor updates and prep for 0.29 2021-09-12 08:52:00 +01:00
Adam Hathcock
a8d065dc9e Ensure destination directory exists 2021-09-12 08:47:30 +01:00
Adam Hathcock
7bd9711ade Merge pull request #610 from cyr/master
Bugfix for TarWriter - too much padding in large files
2021-09-12 08:43:20 +01:00
cyr
61802eadb4 Merge branch 'adamhathcock:master' into master 2021-09-12 09:37:07 +02:00
Adam Hathcock
b425659058 Merge pull request #611 from Thunderstr1k3/fix-zipheader-seeking
Allowing to seek empty zip files
2021-09-12 08:28:05 +01:00
Christian
3e32e3d7b1 Allowing to seek empty zip files 2021-09-02 13:54:32 +02:00
cyr
1b661c9df1 Fixed bug where large (int32+ file size) adds an additional 512 bytes of padding in tar files. 2021-08-27 22:38:04 +02:00
Adam Hathcock
54fc26b93d Update build and mark for 0.28.3 2021-06-04 13:43:35 +01:00
Adam Hathcock
161f99bbad Merge pull request #601 from salvois/master
Write ZIP64 End of Central Directory only if needed.
2021-06-04 13:22:01 +01:00
Adam Hathcock
c012db0776 Merge branch 'master' into master 2021-06-04 13:17:13 +01:00
Adam Hathcock
8ee257d299 Merge pull request #592 from adamhathcock/memory-downgrade
Downgrade System.Memory to fix buffer version issue
2021-06-04 13:16:40 +01:00
Adam Hathcock
f9522107c3 Merge branch 'master' into memory-downgrade 2021-06-04 13:16:34 +01:00
Adam Hathcock
e07046a37a Merge pull request #596 from DannyBoyk/issue_595_conditionally_read_zip64_extra
Conditionally parse Zip64 extra field based on specification
2021-06-04 13:07:08 +01:00
Salvatore Isaja
ad6d0d9ae8 Write ZIP64 End of Central Directory only if needed. 2021-05-23 21:10:35 +02:00
Daniel Nash
fdc33e91bd Conditionally parse Zip64 extra field based on specification
The Zip64 extra field should look for values based on the corresponding
values in the local entry header.

Fixes adamhathcock/sharpcompress#595
2021-04-26 14:58:10 -04:00
Adam Hathcock
a34f5a855c Mark for 0.28.2 2021-04-25 09:29:56 +01:00
Adam Hathcock
6474741af1 Merge pull request #593 from adamhathcock/fix-pkware-encryption
ReadFully used by pkware encryption didn’t like spans
2021-04-25 09:29:02 +01:00
Adam Hathcock
c10bd840c5 ReadFully used by pkware encryption didn’t like spans 2021-04-25 09:25:51 +01:00
Adam Hathcock
e6dded826b Downgrade System.Memory to fix buffer version issue 2021-04-24 09:16:46 +01:00
Adam Hathcock
8a022c4b18 Update FORMATS.md
remove LZipArchive/Reader/Writer mention
2021-03-28 08:58:11 +01:00
Adam Hathcock
cfef228afc Merge pull request #579 from Looooong/fix/do-not-place-extention-classes-in-common-namespace
Do not place extension classes in common namespace
2021-03-18 13:52:40 +00:00
Nguyễn Đức Long
237ff9f055 Do not place extension classes in common namespace 2021-03-18 20:44:04 +07:00
Adam Hathcock
020f862814 Bug fix for recursive call introduced in 0.28 2021-02-18 08:31:50 +00:00
Adam Hathcock
fa6107200d Merge pull request #572 from Erior/feature/521
Not so elegant perhaps for checking 7z encryption
2021-02-16 08:05:08 +00:00
Adam Hathcock
eb81f972c4 Merge branch 'master' into feature/521 2021-02-16 08:01:32 +00:00
Lars Vahlenberg
93c1ff396e Not so elegant perhaps 2021-02-14 16:29:01 +01:00
Adam Hathcock
403baf05a6 Mark for 0.28 2021-02-14 13:07:35 +00:00
Adam Hathcock
a51b56339a Fix complete entry check for RAR files. 2021-02-14 13:00:43 +00:00
Adam Hathcock
f48a6d47dc Merge pull request #571 from Erior/feature/540
Proposal fixing Extra bytes written when setting zip64
2021-02-14 12:54:17 +00:00
Adam Hathcock
5b52463e4c Merge pull request #570 from Erior/feature/555
Propsal for handling Zip with long comment
2021-02-14 12:52:42 +00:00
Adam Hathcock
6f08bb72d8 Merge pull request #569 from BrendanGrant/improve_how_missing_parts_are_handled
Improve how missing parts are handled
2021-02-14 12:49:49 +00:00
Lars Vahlenberg
045093f453 Linux is case sensitive with files names 2021-02-14 10:26:26 +01:00
Lars Vahlenberg
566c49ce53 Proposal
Zip64 requires version 4.5
Number of disks is 4 bytes and not 8
2021-02-14 02:42:32 +01:00
Lars Vahlenberg
d1d2758ee0 Propsal for handling Zip with long comment 2021-02-13 23:57:03 +01:00
Brendan Grant
5b86c40d5b Properly detect if RAR is complete at the end or not 2021-02-13 13:34:57 -06:00
Brendan Grant
53393e744e Supporting reading contents of incomplete files 2021-02-13 13:33:43 -06:00
Adam Hathcock
2dd17e3882 Be explicit about zip64 extra field sizes. Formatting 2021-02-13 07:05:53 +00:00
Adam Hathcock
c4f7433584 Merge pull request #567 from Nanook/master
Zip64 Header and Size fix
2021-02-13 06:58:41 +00:00
Adam Hathcock
9405a7cf4b Merge pull request #568 from Bond-009/stackalloc
Use stackallocs where possible/sensible
2021-02-13 06:39:32 +00:00
Bond_009
cd677440ce Use stackallocs where possible/sensible 2021-02-12 20:20:15 +01:00
Craig Greenhill
c06f4bc5a8 Zip64 Header and Size fix 2021-02-11 09:37:59 +00:00
Adam Hathcock
4a7337b223 Merge pull request #563 from adamhathcock/add-reader-test-gzip
Fix Rewindable stream Length and add GZip Reader tests
2021-01-13 15:13:34 +00:00
Adam Hathcock
1d8afb817e Bump version 2021-01-13 14:41:25 +00:00
Adam Hathcock
0f06c3d934 Fix rewindable stream to expose length 2021-01-13 14:40:36 +00:00
Adam Hathcock
9d5cb8d119 Add GZip Reader tests 2021-01-13 10:42:59 +00:00
64 changed files with 998 additions and 528 deletions

View File

@@ -12,13 +12,9 @@ jobs:
- uses: actions/checkout@v1
- uses: actions/setup-dotnet@v1
with:
dotnet-version: 5.0.101
dotnet-version: 5.0.401
- run: dotnet run -p build/build.csproj
- uses: actions/upload-artifact@v2
with:
name: ${{ matrix.os }}-sharpcompress.nupkg
path: artifacts/*
- uses: actions/upload-artifact@v2
with:
name: ${{ matrix.os }}-sharpcompress.snupkg
path: artifacts/*

View File

@@ -19,7 +19,6 @@
| Tar.XZ | LZMA2 | Decompress | TarArchive | TarReader | TarWriter (3) |
| GZip (single file) | DEFLATE | Both | GZipArchive | GZipReader | GZipWriter |
| 7Zip (4) | LZMA, LZMA2, BZip2, PPMd, BCJ, BCJ2, Deflate | Decompress | SevenZipArchive | N/A | N/A |
| LZip (single file) (5) | LZip (LZMA) | Both | LZipArchive | LZipReader | LZipWriter |
1. SOLID Rars are only supported in the RarReader API.
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading/writing is supported but only with seekable streams as the Zip spec doesn't support Zip64 data in post data descriptors. Deflate64 is only supported for reading.

View File

@@ -49,20 +49,20 @@ class Program
Target(Build, DependsOn(Format),
framework =>
{
if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && framework == "net46")
{
return;
}
Run("dotnet", "build src/SharpCompress/SharpCompress.csproj -c Release");
});
Target(Test, DependsOn(Build), ForEach("net5.0"),
Target(Test, DependsOn(Build), ForEach("net5.0", "net461"),
framework =>
{
IEnumerable<string> GetFiles(string d)
{
return Glob.Files(".", d);
}
if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && framework == "net461")
{
return;
}
foreach (var file in GetFiles("**/*.Test.csproj"))
{

View File

@@ -6,9 +6,9 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Bullseye" Version="3.6.0" />
<PackageReference Include="Bullseye" Version="3.8.0" />
<PackageReference Include="Glob" Version="1.1.8" />
<PackageReference Include="SimpleExec" Version="6.4.0" />
<PackageReference Include="SimpleExec" Version="8.0.0" />
</ItemGroup>
</Project>

View File

@@ -1,5 +1,6 @@
{
"sdk": {
"version": "5.0.101"
"version": "5.0.300",
"rollForward": "latestFeature"
}
}

View File

@@ -0,0 +1,420 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
#if !NETSTANDARD2_0 && !NETSTANDARD2_1 && !NETFRAMEWORK
#define SUPPORTS_RUNTIME_INTRINSICS
#define SUPPORTS_HOTPATH
#endif
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
#if SUPPORTS_RUNTIME_INTRINSICS
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.X86;
#endif
#pragma warning disable IDE0007 // Use implicit type
namespace SharpCompress.Algorithms
{
/// <summary>
/// Calculates the 32 bit Adler checksum of a given buffer according to
/// RFC 1950. ZLIB Compressed Data Format Specification version 3.3)
/// </summary>
internal static class Adler32 // From https://github.com/SixLabors/ImageSharp/blob/main/src/ImageSharp/Compression/Zlib/Adler32.cs
{
/// <summary>
/// Global inlining options. Helps temporarily disable inlining for better profiler output.
/// </summary>
private static class InliningOptions // From https://github.com/SixLabors/ImageSharp/blob/main/src/ImageSharp/Common/Helpers/InliningOptions.cs
{
/// <summary>
/// <see cref="MethodImplOptions.AggressiveInlining"/> regardless of the build conditions.
/// </summary>
public const MethodImplOptions AlwaysInline = MethodImplOptions.AggressiveInlining;
#if PROFILING
public const MethodImplOptions HotPath = MethodImplOptions.NoInlining;
public const MethodImplOptions ShortMethod = MethodImplOptions.NoInlining;
#else
#if SUPPORTS_HOTPATH
public const MethodImplOptions HotPath = MethodImplOptions.AggressiveOptimization;
#else
public const MethodImplOptions HotPath = MethodImplOptions.AggressiveInlining;
#endif
public const MethodImplOptions ShortMethod = MethodImplOptions.AggressiveInlining;
#endif
public const MethodImplOptions ColdPath = MethodImplOptions.NoInlining;
}
#if SUPPORTS_RUNTIME_INTRINSICS
/// <summary>
/// Provides optimized static methods for trigonometric, logarithmic,
/// and other common mathematical functions.
/// </summary>
private static class Numerics // From https://github.com/SixLabors/ImageSharp/blob/main/src/ImageSharp/Common/Helpers/Numerics.cs
{
/// <summary>
/// Reduces elements of the vector into one sum.
/// </summary>
/// <param name="accumulator">The accumulator to reduce.</param>
/// <returns>The sum of all elements.</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int ReduceSum(Vector256<int> accumulator)
{
// Add upper lane to lower lane.
Vector128<int> vsum = Sse2.Add(accumulator.GetLower(), accumulator.GetUpper());
// Add odd to even.
vsum = Sse2.Add(vsum, Sse2.Shuffle(vsum, 0b_11_11_01_01));
// Add high to low.
vsum = Sse2.Add(vsum, Sse2.Shuffle(vsum, 0b_11_10_11_10));
return Sse2.ConvertToInt32(vsum);
}
/// <summary>
/// Reduces even elements of the vector into one sum.
/// </summary>
/// <param name="accumulator">The accumulator to reduce.</param>
/// <returns>The sum of even elements.</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int EvenReduceSum(Vector256<int> accumulator)
{
Vector128<int> vsum = Sse2.Add(accumulator.GetLower(), accumulator.GetUpper()); // add upper lane to lower lane
vsum = Sse2.Add(vsum, Sse2.Shuffle(vsum, 0b_11_10_11_10)); // add high to low
// Vector128<int>.ToScalar() isn't optimized pre-net5.0 https://github.com/dotnet/runtime/pull/37882
return Sse2.ConvertToInt32(vsum);
}
}
#endif
/// <summary>
/// The default initial seed value of a Adler32 checksum calculation.
/// </summary>
public const uint SeedValue = 1U;
// Largest prime smaller than 65536
private const uint BASE = 65521;
// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
private const uint NMAX = 5552;
#if SUPPORTS_RUNTIME_INTRINSICS
private const int MinBufferSize = 64;
private const int BlockSize = 1 << 5;
// The C# compiler emits this as a compile-time constant embedded in the PE file.
private static ReadOnlySpan<byte> Tap1Tap2 => new byte[]
{
32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, // tap1
16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 // tap2
};
#endif
/// <summary>
/// Calculates the Adler32 checksum with the bytes taken from the span.
/// </summary>
/// <param name="buffer">The readonly span of bytes.</param>
/// <returns>The <see cref="uint"/>.</returns>
[MethodImpl(InliningOptions.ShortMethod)]
public static uint Calculate(ReadOnlySpan<byte> buffer)
=> Calculate(SeedValue, buffer);
/// <summary>
/// Calculates the Adler32 checksum with the bytes taken from the span and seed.
/// </summary>
/// <param name="adler">The input Adler32 value.</param>
/// <param name="buffer">The readonly span of bytes.</param>
/// <returns>The <see cref="uint"/>.</returns>
[MethodImpl(InliningOptions.HotPath | InliningOptions.ShortMethod)]
public static uint Calculate(uint adler, ReadOnlySpan<byte> buffer)
{
if (buffer.IsEmpty)
{
return adler;
}
#if SUPPORTS_RUNTIME_INTRINSICS
if (Avx2.IsSupported && buffer.Length >= MinBufferSize)
{
return CalculateAvx2(adler, buffer);
}
if (Ssse3.IsSupported && buffer.Length >= MinBufferSize)
{
return CalculateSse(adler, buffer);
}
return CalculateScalar(adler, buffer);
#else
return CalculateScalar(adler, buffer);
#endif
}
// Based on https://github.com/chromium/chromium/blob/master/third_party/zlib/adler32_simd.c
#if SUPPORTS_RUNTIME_INTRINSICS
[MethodImpl(InliningOptions.HotPath | InliningOptions.ShortMethod)]
private static unsafe uint CalculateSse(uint adler, ReadOnlySpan<byte> buffer)
{
uint s1 = adler & 0xFFFF;
uint s2 = (adler >> 16) & 0xFFFF;
// Process the data in blocks.
uint length = (uint)buffer.Length;
uint blocks = length / BlockSize;
length -= blocks * BlockSize;
fixed (byte* bufferPtr = &MemoryMarshal.GetReference(buffer))
{
fixed (byte* tapPtr = &MemoryMarshal.GetReference(Tap1Tap2))
{
byte* localBufferPtr = bufferPtr;
// _mm_setr_epi8 on x86
Vector128<sbyte> tap1 = Sse2.LoadVector128((sbyte*)tapPtr);
Vector128<sbyte> tap2 = Sse2.LoadVector128((sbyte*)(tapPtr + 0x10));
Vector128<byte> zero = Vector128<byte>.Zero;
var ones = Vector128.Create((short)1);
while (blocks > 0)
{
uint n = NMAX / BlockSize; /* The NMAX constraint. */
if (n > blocks)
{
n = blocks;
}
blocks -= n;
// Process n blocks of data. At most NMAX data bytes can be
// processed before s2 must be reduced modulo BASE.
Vector128<uint> v_ps = Vector128.CreateScalar(s1 * n);
Vector128<uint> v_s2 = Vector128.CreateScalar(s2);
Vector128<uint> v_s1 = Vector128<uint>.Zero;
do
{
// Load 32 input bytes.
Vector128<byte> bytes1 = Sse3.LoadDquVector128(localBufferPtr);
Vector128<byte> bytes2 = Sse3.LoadDquVector128(localBufferPtr + 0x10);
// Add previous block byte sum to v_ps.
v_ps = Sse2.Add(v_ps, v_s1);
// Horizontally add the bytes for s1, multiply-adds the
// bytes by [ 32, 31, 30, ... ] for s2.
v_s1 = Sse2.Add(v_s1, Sse2.SumAbsoluteDifferences(bytes1, zero).AsUInt32());
Vector128<short> mad1 = Ssse3.MultiplyAddAdjacent(bytes1, tap1);
v_s2 = Sse2.Add(v_s2, Sse2.MultiplyAddAdjacent(mad1, ones).AsUInt32());
v_s1 = Sse2.Add(v_s1, Sse2.SumAbsoluteDifferences(bytes2, zero).AsUInt32());
Vector128<short> mad2 = Ssse3.MultiplyAddAdjacent(bytes2, tap2);
v_s2 = Sse2.Add(v_s2, Sse2.MultiplyAddAdjacent(mad2, ones).AsUInt32());
localBufferPtr += BlockSize;
}
while (--n > 0);
v_s2 = Sse2.Add(v_s2, Sse2.ShiftLeftLogical(v_ps, 5));
// Sum epi32 ints v_s1(s2) and accumulate in s1(s2).
const byte S2301 = 0b1011_0001; // A B C D -> B A D C
const byte S1032 = 0b0100_1110; // A B C D -> C D A B
v_s1 = Sse2.Add(v_s1, Sse2.Shuffle(v_s1, S1032));
s1 += v_s1.ToScalar();
v_s2 = Sse2.Add(v_s2, Sse2.Shuffle(v_s2, S2301));
v_s2 = Sse2.Add(v_s2, Sse2.Shuffle(v_s2, S1032));
s2 = v_s2.ToScalar();
// Reduce.
s1 %= BASE;
s2 %= BASE;
}
if (length > 0)
{
HandleLeftOver(localBufferPtr, length, ref s1, ref s2);
}
return s1 | (s2 << 16);
}
}
}
// Based on: https://github.com/zlib-ng/zlib-ng/blob/develop/arch/x86/adler32_avx2.c
[MethodImpl(InliningOptions.HotPath | InliningOptions.ShortMethod)]
public static unsafe uint CalculateAvx2(uint adler, ReadOnlySpan<byte> buffer)
{
uint s1 = adler & 0xFFFF;
uint s2 = (adler >> 16) & 0xFFFF;
uint length = (uint)buffer.Length;
fixed (byte* bufferPtr = &MemoryMarshal.GetReference(buffer))
{
byte* localBufferPtr = bufferPtr;
Vector256<byte> zero = Vector256<byte>.Zero;
var dot3v = Vector256.Create((short)1);
var dot2v = Vector256.Create(32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
// Process n blocks of data. At most NMAX data bytes can be
// processed before s2 must be reduced modulo BASE.
var vs1 = Vector256.CreateScalar(s1);
var vs2 = Vector256.CreateScalar(s2);
while (length >= 32)
{
int k = length < NMAX ? (int)length : (int)NMAX;
k -= k % 32;
length -= (uint)k;
Vector256<uint> vs10 = vs1;
Vector256<uint> vs3 = Vector256<uint>.Zero;
while (k >= 32)
{
// Load 32 input bytes.
Vector256<byte> block = Avx.LoadVector256(localBufferPtr);
// Sum of abs diff, resulting in 2 x int32's
Vector256<ushort> vs1sad = Avx2.SumAbsoluteDifferences(block, zero);
vs1 = Avx2.Add(vs1, vs1sad.AsUInt32());
vs3 = Avx2.Add(vs3, vs10);
// sum 32 uint8s to 16 shorts.
Vector256<short> vshortsum2 = Avx2.MultiplyAddAdjacent(block, dot2v);
// sum 16 shorts to 8 uint32s.
Vector256<int> vsum2 = Avx2.MultiplyAddAdjacent(vshortsum2, dot3v);
vs2 = Avx2.Add(vsum2.AsUInt32(), vs2);
vs10 = vs1;
localBufferPtr += BlockSize;
k -= 32;
}
// Defer the multiplication with 32 to outside of the loop.
vs3 = Avx2.ShiftLeftLogical(vs3, 5);
vs2 = Avx2.Add(vs2, vs3);
s1 = (uint)Numerics.EvenReduceSum(vs1.AsInt32());
s2 = (uint)Numerics.ReduceSum(vs2.AsInt32());
s1 %= BASE;
s2 %= BASE;
vs1 = Vector256.CreateScalar(s1);
vs2 = Vector256.CreateScalar(s2);
}
if (length > 0)
{
HandleLeftOver(localBufferPtr, length, ref s1, ref s2);
}
return s1 | (s2 << 16);
}
}
private static unsafe void HandleLeftOver(byte* localBufferPtr, uint length, ref uint s1, ref uint s2)
{
if (length >= 16)
{
s2 += s1 += localBufferPtr[0];
s2 += s1 += localBufferPtr[1];
s2 += s1 += localBufferPtr[2];
s2 += s1 += localBufferPtr[3];
s2 += s1 += localBufferPtr[4];
s2 += s1 += localBufferPtr[5];
s2 += s1 += localBufferPtr[6];
s2 += s1 += localBufferPtr[7];
s2 += s1 += localBufferPtr[8];
s2 += s1 += localBufferPtr[9];
s2 += s1 += localBufferPtr[10];
s2 += s1 += localBufferPtr[11];
s2 += s1 += localBufferPtr[12];
s2 += s1 += localBufferPtr[13];
s2 += s1 += localBufferPtr[14];
s2 += s1 += localBufferPtr[15];
localBufferPtr += 16;
length -= 16;
}
while (length-- > 0)
{
s2 += s1 += *localBufferPtr++;
}
if (s1 >= BASE)
{
s1 -= BASE;
}
s2 %= BASE;
}
#endif
[MethodImpl(InliningOptions.HotPath | InliningOptions.ShortMethod)]
private static unsafe uint CalculateScalar(uint adler, ReadOnlySpan<byte> buffer)
{
uint s1 = adler & 0xFFFF;
uint s2 = (adler >> 16) & 0xFFFF;
uint k;
fixed (byte* bufferPtr = buffer)
{
var localBufferPtr = bufferPtr;
uint length = (uint)buffer.Length;
while (length > 0)
{
k = length < NMAX ? length : NMAX;
length -= k;
while (k >= 16)
{
s2 += s1 += localBufferPtr[0];
s2 += s1 += localBufferPtr[1];
s2 += s1 += localBufferPtr[2];
s2 += s1 += localBufferPtr[3];
s2 += s1 += localBufferPtr[4];
s2 += s1 += localBufferPtr[5];
s2 += s1 += localBufferPtr[6];
s2 += s1 += localBufferPtr[7];
s2 += s1 += localBufferPtr[8];
s2 += s1 += localBufferPtr[9];
s2 += s1 += localBufferPtr[10];
s2 += s1 += localBufferPtr[11];
s2 += s1 += localBufferPtr[12];
s2 += s1 += localBufferPtr[13];
s2 += s1 += localBufferPtr[14];
s2 += s1 += localBufferPtr[15];
localBufferPtr += 16;
k -= 16;
}
while (k-- > 0)
{
s2 += s1 += *localBufferPtr++;
}
s1 %= BASE;
s2 %= BASE;
}
return (s2 << 16) | s1;
}
}
}
}

View File

@@ -1,285 +0,0 @@
// Copyright (c) Six Labors and contributors.
// Licensed under the GNU Affero General Public License, Version 3.
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.X86;
#endif
namespace SharpCompress.Algorithms
{
/// <summary>
/// Calculates the 32 bit Adler checksum of a given buffer according to
/// RFC 1950. ZLIB Compressed Data Format Specification version 3.3)
/// </summary>
internal static class Adler32
{
/// <summary>
/// The default initial seed value of a Adler32 checksum calculation.
/// </summary>
public const uint SeedValue = 1U;
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
private const int MinBufferSize = 64;
#endif
// Largest prime smaller than 65536
private const uint BASE = 65521;
// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
private const uint NMAX = 5552;
/// <summary>
/// Calculates the Adler32 checksum with the bytes taken from the span.
/// </summary>
/// <param name="buffer">The readonly span of bytes.</param>
/// <returns>The <see cref="uint"/>.</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static uint Calculate(ReadOnlySpan<byte> buffer)
{
return Calculate(SeedValue, buffer);
}
/// <summary>
/// Calculates the Adler32 checksum with the bytes taken from the span and seed.
/// </summary>
/// <param name="adler">The input Adler32 value.</param>
/// <param name="buffer">The readonly span of bytes.</param>
/// <returns>The <see cref="uint"/>.</returns>
public static uint Calculate(uint adler, ReadOnlySpan<byte> buffer)
{
if (buffer.IsEmpty)
{
return SeedValue;
}
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
if (Sse3.IsSupported && buffer.Length >= MinBufferSize)
{
return CalculateSse(adler, buffer);
}
return CalculateScalar(adler, buffer);
#else
return CalculateScalar(adler, buffer);
#endif
}
// Based on https://github.com/chromium/chromium/blob/master/third_party/zlib/adler32_simd.c
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
private static unsafe uint CalculateSse(uint adler, ReadOnlySpan<byte> buffer)
{
uint s1 = adler & 0xFFFF;
uint s2 = (adler >> 16) & 0xFFFF;
// Process the data in blocks.
const int BLOCK_SIZE = 1 << 5;
uint length = (uint)buffer.Length;
uint blocks = length / BLOCK_SIZE;
length -= blocks * BLOCK_SIZE;
int index = 0;
fixed (byte* bufferPtr = &buffer[0])
{
index += (int)blocks * BLOCK_SIZE;
var localBufferPtr = bufferPtr;
// _mm_setr_epi8 on x86
var tap1 = Vector128.Create(32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17);
var tap2 = Vector128.Create(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
Vector128<byte> zero = Vector128<byte>.Zero;
var ones = Vector128.Create((short)1);
while (blocks > 0)
{
uint n = NMAX / BLOCK_SIZE; /* The NMAX constraint. */
if (n > blocks)
{
n = blocks;
}
blocks -= n;
// Process n blocks of data. At most NMAX data bytes can be
// processed before s2 must be reduced modulo BASE.
Vector128<int> v_ps = Vector128.CreateScalar(s1 * n).AsInt32();
Vector128<int> v_s2 = Vector128.CreateScalar(s2).AsInt32();
Vector128<int> v_s1 = Vector128<int>.Zero;
do
{
// Load 32 input bytes.
Vector128<byte> bytes1 = Sse3.LoadDquVector128(localBufferPtr);
Vector128<byte> bytes2 = Sse3.LoadDquVector128(localBufferPtr + 16);
// Add previous block byte sum to v_ps.
v_ps = Sse2.Add(v_ps, v_s1);
// Horizontally add the bytes for s1, multiply-adds the
// bytes by [ 32, 31, 30, ... ] for s2.
v_s1 = Sse2.Add(v_s1, Sse2.SumAbsoluteDifferences(bytes1, zero).AsInt32());
Vector128<short> mad1 = Ssse3.MultiplyAddAdjacent(bytes1, tap1);
v_s2 = Sse2.Add(v_s2, Sse2.MultiplyAddAdjacent(mad1, ones));
v_s1 = Sse2.Add(v_s1, Sse2.SumAbsoluteDifferences(bytes2, zero).AsInt32());
Vector128<short> mad2 = Ssse3.MultiplyAddAdjacent(bytes2, tap2);
v_s2 = Sse2.Add(v_s2, Sse2.MultiplyAddAdjacent(mad2, ones));
localBufferPtr += BLOCK_SIZE;
}
while (--n > 0);
v_s2 = Sse2.Add(v_s2, Sse2.ShiftLeftLogical(v_ps, 5));
// Sum epi32 ints v_s1(s2) and accumulate in s1(s2).
const byte S2301 = 0b1011_0001; // A B C D -> B A D C
const byte S1032 = 0b0100_1110; // A B C D -> C D A B
v_s1 = Sse2.Add(v_s1, Sse2.Shuffle(v_s1, S2301));
v_s1 = Sse2.Add(v_s1, Sse2.Shuffle(v_s1, S1032));
s1 += (uint)v_s1.ToScalar();
v_s2 = Sse2.Add(v_s2, Sse2.Shuffle(v_s2, S2301));
v_s2 = Sse2.Add(v_s2, Sse2.Shuffle(v_s2, S1032));
s2 = (uint)v_s2.ToScalar();
// Reduce.
s1 %= BASE;
s2 %= BASE;
}
}
ref byte bufferRef = ref MemoryMarshal.GetReference(buffer);
if (length > 0)
{
if (length >= 16)
{
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
length -= 16;
}
while (length-- > 0)
{
s2 += s1 += Unsafe.Add(ref bufferRef, index++);
}
if (s1 >= BASE)
{
s1 -= BASE;
}
s2 %= BASE;
}
return s1 | (s2 << 16);
}
#endif
private static uint CalculateScalar(uint adler, ReadOnlySpan<byte> buffer)
{
uint s1 = adler & 0xFFFF;
uint s2 = (adler >> 16) & 0xFFFF;
uint k;
ref byte bufferRef = ref MemoryMarshal.GetReference<byte>(buffer);
uint length = (uint)buffer.Length;
int index = 0;
while (length > 0)
{
k = length < NMAX ? length : NMAX;
length -= k;
while (k >= 16)
{
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
k -= 16;
}
if (k != 0)
{
do
{
s1 += Unsafe.Add(ref bufferRef, index++);
s2 += s1;
}
while (--k != 0);
}
s1 %= BASE;
s2 %= BASE;
}
return (s2 << 16) | s1;
}
}
}

View File

@@ -98,7 +98,7 @@ namespace SharpCompress.Archives.GZip
public static bool IsGZipFile(Stream stream)
{
// read the header on the first read
byte[] header = new byte[10];
Span<byte> header = stackalloc byte[10];
// workitem 8501: handle edge case (decompress empty stream)
if (!stream.ReadFully(header))

View File

@@ -10,7 +10,8 @@ using SharpCompress.Readers.Rar;
namespace SharpCompress.Archives.Rar
{
public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
public class
RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
{
internal Lazy<IRarUnpack> UnpackV2017 { get; } = new Lazy<IRarUnpack>(() => new SharpCompress.Compressors.Rar.UnpackV2017.Unpack());
internal Lazy<IRarUnpack> UnpackV1 { get; } = new Lazy<IRarUnpack>(() => new SharpCompress.Compressors.Rar.UnpackV1.Unpack());
@@ -42,7 +43,7 @@ namespace SharpCompress.Archives.Rar
protected override IEnumerable<RarArchiveEntry> LoadEntries(IEnumerable<RarVolume> volumes)
{
return RarArchiveEntryFactory.GetEntries(this, volumes);
return RarArchiveEntryFactory.GetEntries(this, volumes, ReaderOptions);
}
protected override IEnumerable<RarVolume> LoadVolumes(IEnumerable<Stream> streams)

View File

@@ -6,6 +6,7 @@ using SharpCompress.Common;
using SharpCompress.Common.Rar;
using SharpCompress.Common.Rar.Headers;
using SharpCompress.Compressors.Rar;
using SharpCompress.Readers;
namespace SharpCompress.Archives.Rar
{
@@ -13,11 +14,13 @@ namespace SharpCompress.Archives.Rar
{
private readonly ICollection<RarFilePart> parts;
private readonly RarArchive archive;
private readonly ReaderOptions readerOptions;
internal RarArchiveEntry(RarArchive archive, IEnumerable<RarFilePart> parts)
internal RarArchiveEntry(RarArchive archive, IEnumerable<RarFilePart> parts, ReaderOptions readerOptions)
{
this.parts = parts.ToList();
this.archive = archive;
this.readerOptions = readerOptions;
}
public override CompressionType CompressionType => CompressionType.Rar;
@@ -69,13 +72,14 @@ namespace SharpCompress.Archives.Rar
{
get
{
return parts.Select(fp => fp.FileHeader).Any(fh => !fh.IsSplitAfter);
var headers = parts.Select(x => x.FileHeader);
return !headers.First().IsSplitBefore && !headers.Last().IsSplitAfter;
}
}
private void CheckIncomplete()
{
if (!IsComplete)
if (!readerOptions.DisableCheckIncomplete && !IsComplete)
{
throw new IncompleteArchiveException("ArchiveEntry is incomplete and cannot perform this operation.");
}

View File

@@ -1,5 +1,6 @@
using System.Collections.Generic;
using SharpCompress.Common.Rar;
using SharpCompress.Readers;
namespace SharpCompress.Archives.Rar
{
@@ -36,11 +37,12 @@ namespace SharpCompress.Archives.Rar
}
internal static IEnumerable<RarArchiveEntry> GetEntries(RarArchive archive,
IEnumerable<RarVolume> rarParts)
IEnumerable<RarVolume> rarParts,
ReaderOptions readerOptions)
{
foreach (var groupedParts in GetMatchedFileParts(rarParts))
{
yield return new RarArchiveEntry(archive, groupedParts);
yield return new RarArchiveEntry(archive, groupedParts, readerOptions);
}
}
}

View File

@@ -36,7 +36,7 @@ namespace SharpCompress.Common
Password = password;
}
#if !NET461
#if !NETFRAMEWORK
static ArchiveEncoding()
{
Encoding.RegisterProvider(CodePagesEncodingProvider.Instance);

View File

@@ -14,14 +14,25 @@ namespace SharpCompress.Common
Action<string, ExtractionOptions?> write)
{
string destinationFileName;
string file = Path.GetFileName(entry.Key);
string fullDestinationDirectoryPath = Path.GetFullPath(destinationDirectory);
//check for trailing slash.
if (fullDestinationDirectoryPath[fullDestinationDirectoryPath.Length - 1] != Path.DirectorySeparatorChar)
{
fullDestinationDirectoryPath += Path.DirectorySeparatorChar;
}
if (!Directory.Exists(fullDestinationDirectoryPath))
{
throw new ExtractionException($"Directory does not exist to extract to: {fullDestinationDirectoryPath}");
}
options ??= new ExtractionOptions()
{
Overwrite = true
};
string file = Path.GetFileName(entry.Key);
if (options.ExtractFullPath)
{
string folder = Path.GetDirectoryName(entry.Key)!;

View File

@@ -110,13 +110,13 @@ namespace SharpCompress.Common.GZip
private string ReadZeroTerminatedString(Stream stream)
{
byte[] buf1 = new byte[1];
Span<byte> buf1 = stackalloc byte[1];
var list = new List<byte>();
bool done = false;
do
{
// workitem 7740
int n = stream.Read(buf1, 0, 1);
int n = stream.Read(buf1);
if (n != 1)
{
throw new ZlibException("Unexpected EOF reading GZIP header.");

View File

@@ -437,6 +437,7 @@ namespace SharpCompress.Common.Rar.Headers
internal long DataStartPosition { get; set; }
public Stream PackedStream { get; set; }
public bool IsSplitBefore => IsRar5 ? HasHeaderFlag(HeaderFlagsV5.SPLIT_BEFORE) : HasFlag(FileFlagsV4.SPLIT_BEFORE);
public bool IsSplitAfter => IsRar5 ? HasHeaderFlag(HeaderFlagsV5.SPLIT_AFTER) : HasFlag(FileFlagsV4.SPLIT_AFTER);
public bool IsDirectory => HasFlag(IsRar5 ? FileFlagsV5.DIRECTORY : FileFlagsV4.DIRECTORY);

View File

@@ -50,11 +50,11 @@ namespace SharpCompress.Common.Rar
if (sizeToRead > 0)
{
int alignedSize = sizeToRead + ((~sizeToRead + 1) & 0xf);
byte[] cipherText = new byte[RarRijndael.CRYPTO_BLOCK_SIZE];
Span<byte> cipherText = stackalloc byte[RarRijndael.CRYPTO_BLOCK_SIZE];
for (int i = 0; i < alignedSize / 16; i++)
{
//long ax = System.currentTimeMillis();
_actualStream.Read(cipherText, 0, RarRijndael.CRYPTO_BLOCK_SIZE);
_actualStream.Read(cipherText);
var readBytes = _rijndael.ProcessBlock(cipherText);
foreach (var readByte in readBytes)

View File

@@ -10,7 +10,7 @@ namespace SharpCompress.Common.Rar
/// <summary>
/// As the V2017 port isn't complete, add this check to use the legacy Rar code.
/// </summary>
internal bool IsRarV3 => FileHeader.CompressionAlgorithm == 29 || FileHeader.CompressionAlgorithm == 36;
internal bool IsRarV3 => FileHeader.CompressionAlgorithm == 20 || FileHeader.CompressionAlgorithm == 26 || FileHeader.CompressionAlgorithm == 29 || FileHeader.CompressionAlgorithm == 36; //Nanook - Added 20+26 as Test arc from WinRar2.8 (algo 20) was failing with 2017 code
/// <summary>
/// The File's 32 bit CRC Hash

View File

@@ -1517,6 +1517,7 @@ namespace SharpCompress.Common.SevenZip
}
}
byte[] buffer = null;
foreach (CExtractFolderInfo efi in extractFolderInfoVector)
{
int startIndex;
@@ -1553,7 +1554,7 @@ namespace SharpCompress.Common.SevenZip
Stream s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes,
folderInfo, db.PasswordProvider);
byte[] buffer = new byte[4 << 10];
buffer ??= new byte[4 << 10];
for (; ; )
{
int processed = s.Read(buffer, 0, buffer.Length);

View File

@@ -32,7 +32,7 @@ namespace SharpCompress.Common.SevenZip
public override DateTime? ArchivedTime => null;
public override bool IsEncrypted => false;
public override bool IsEncrypted => FilePart.IsEncrypted;
public override bool IsDirectory => FilePart.Header.IsDir;

View File

@@ -102,5 +102,7 @@ namespace SharpCompress.Common.SevenZip
throw new NotImplementedException();
}
}
internal bool IsEncrypted => Folder!._coders.FindIndex(c => c._methodId._id == CMethodId.K_AES_ID) != -1;
}
}

View File

@@ -97,7 +97,7 @@ namespace SharpCompress.Common.Tar.Headers
{
numPaddingBytes = BLOCK_SIZE;
}
output.Write(new byte[numPaddingBytes], 0, numPaddingBytes);
output.Write(stackalloc byte[numPaddingBytes]);
}
internal bool Read(BinaryReader reader)

View File

@@ -63,6 +63,8 @@ namespace SharpCompress.Common.Zip.Headers
var zip64ExtraData = Extra.OfType<Zip64ExtendedInformationExtraField>().FirstOrDefault();
if (zip64ExtraData != null)
{
zip64ExtraData.Process(UncompressedSize, CompressedSize, RelativeOffsetOfEntryHeader, DiskNumberStart);
if (CompressedSize == uint.MaxValue)
{
CompressedSize = zip64ExtraData.CompressedSize;

View File

@@ -53,6 +53,8 @@ namespace SharpCompress.Common.Zip.Headers
var zip64ExtraData = Extra.OfType<Zip64ExtendedInformationExtraField>().FirstOrDefault();
if (zip64ExtraData != null)
{
zip64ExtraData.Process(UncompressedSize, CompressedSize, 0, 0);
if (CompressedSize == uint.MaxValue)
{
CompressedSize = zip64ExtraData.CompressedSize;

View File

@@ -66,62 +66,74 @@ namespace SharpCompress.Common.Zip.Headers
public Zip64ExtendedInformationExtraField(ExtraDataType type, ushort length, byte[] dataBytes)
: base(type, length, dataBytes)
{
Process();
}
//From the spec values are only in the extradata if the standard
//value is set to 0xFFFF, but if one of the sizes are present, both are.
//Hence if length == 4 volume only
// if length == 8 offset only
// if length == 12 offset + volume
// if length == 16 sizes only
// if length == 20 sizes + volume
// if length == 24 sizes + offset
// if length == 28 everything.
//It is unclear how many of these are used in the wild.
private void Process()
// From the spec, values are only in the extradata if the standard
// value is set to 0xFFFFFFFF (or 0xFFFF for the Disk Start Number).
// Values, if present, must appear in the following order:
// - Original Size
// - Compressed Size
// - Relative Header Offset
// - Disk Start Number
public void Process(long uncompressedFileSize, long compressedFileSize, long relativeHeaderOffset, ushort diskNumber)
{
switch (DataBytes.Length)
var bytesRequired = ((uncompressedFileSize == uint.MaxValue) ? 8 : 0)
+ ((compressedFileSize == uint.MaxValue) ? 8 : 0)
+ ((relativeHeaderOffset == uint.MaxValue) ? 8 : 0)
+ ((diskNumber == ushort.MaxValue) ? 4 : 0);
var currentIndex = 0;
if (bytesRequired > DataBytes.Length)
{
case 4:
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes);
return;
case 8:
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
return;
case 12:
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes.AsSpan(8));
return;
case 16:
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(8));
return;
case 20:
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(8));
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes.AsSpan(16));
return;
case 24:
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(8));
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(16));
return;
case 28:
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(8));
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(16));
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes.AsSpan(24));
return;
default:
throw new ArchiveException("Unexpected size of of Zip64 extended information extra field");
throw new ArchiveException("Zip64 extended information extra field is not large enough for the required information");
}
if (uncompressedFileSize == uint.MaxValue)
{
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(currentIndex));
currentIndex += 8;
}
if (compressedFileSize == uint.MaxValue)
{
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(currentIndex));
currentIndex += 8;
}
if (relativeHeaderOffset == uint.MaxValue)
{
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(currentIndex));
currentIndex += 8;
}
if (diskNumber == ushort.MaxValue)
{
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes.AsSpan(currentIndex));
}
}
/// <summary>
/// Uncompressed file size. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
/// original entry header had a corresponding 0xFFFFFFFF value.
/// </summary>
public long UncompressedSize { get; private set; }
/// <summary>
/// Compressed file size. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
/// original entry header had a corresponding 0xFFFFFFFF value.
/// </summary>
public long CompressedSize { get; private set; }
/// <summary>
/// Relative offset of the entry header. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
/// original entry header had a corresponding 0xFFFFFFFF value.
/// </summary>
public long RelativeOffsetOfEntryHeader { get; private set; }
/// <summary>
/// Volume number. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
/// original entry header had a corresponding 0xFFFF value.
/// </summary>
public uint VolumeNumber { get; private set; }
}

View File

@@ -105,6 +105,6 @@ namespace SharpCompress.Common.Zip.Headers
internal ZipFilePart Part { get; set; }
internal bool IsZip64 => CompressedSize == uint.MaxValue;
internal bool IsZip64 => CompressedSize >= uint.MaxValue;
}
}

View File

@@ -8,7 +8,10 @@ namespace SharpCompress.Common.Zip
{
internal sealed class SeekableZipHeaderFactory : ZipHeaderFactory
{
private const int MAX_ITERATIONS_FOR_DIRECTORY_HEADER = 4096;
private const int MINIMUM_EOCD_LENGTH = 22;
private const int ZIP64_EOCD_LENGTH = 20;
// Comment may be within 64kb + structure 22 bytes
private const int MAX_SEARCH_LENGTH_FOR_EOCD = 65557;
private bool _zip64;
internal SeekableZipHeaderFactory(string? password, ArchiveEncoding archiveEncoding)
@@ -20,14 +23,24 @@ namespace SharpCompress.Common.Zip
{
var reader = new BinaryReader(stream);
SeekBackToHeader(stream, reader, DIRECTORY_END_HEADER_BYTES);
SeekBackToHeader(stream, reader);
var eocd_location = stream.Position;
var entry = new DirectoryEndHeader();
entry.Read(reader);
if (entry.IsZip64)
{
_zip64 = true;
SeekBackToHeader(stream, reader, ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR);
// ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR should be before the EOCD
stream.Seek(eocd_location - ZIP64_EOCD_LENGTH - 4, SeekOrigin.Begin);
uint zip64_locator = reader.ReadUInt32();
if (zip64_locator != ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR)
{
throw new ArchiveException("Failed to locate the Zip64 Directory Locator");
}
var zip64Locator = new Zip64DirectoryEndLocatorHeader();
zip64Locator.Read(reader);
@@ -73,27 +86,50 @@ namespace SharpCompress.Common.Zip
}
}
private static void SeekBackToHeader(Stream stream, BinaryReader reader, uint headerSignature)
private static bool IsMatch(byte[] haystack, int position, byte[] needle)
{
long offset = 0;
uint signature;
int iterationCount = 0;
do
for (int i = 0; i < needle.Length; i++)
{
if ((stream.Length + offset) - 4 < 0)
if (haystack[position + i] != needle[i])
{
throw new ArchiveException("Failed to locate the Zip Header");
}
stream.Seek(offset - 4, SeekOrigin.End);
signature = reader.ReadUInt32();
offset--;
iterationCount++;
if (iterationCount > MAX_ITERATIONS_FOR_DIRECTORY_HEADER)
{
throw new ArchiveException("Could not find Zip file Directory at the end of the file. File may be corrupted.");
return false;
}
}
while (signature != headerSignature);
return true;
}
private static void SeekBackToHeader(Stream stream, BinaryReader reader)
{
// Minimum EOCD length
if (stream.Length < MINIMUM_EOCD_LENGTH)
{
throw new ArchiveException("Could not find Zip file Directory at the end of the file. File may be corrupted.");
}
int len = stream.Length < MAX_SEARCH_LENGTH_FOR_EOCD ? (int)stream.Length : MAX_SEARCH_LENGTH_FOR_EOCD;
// We search for marker in reverse to find the first occurance
byte[] needle = { 0x06, 0x05, 0x4b, 0x50 };
stream.Seek(-len, SeekOrigin.End);
byte[] seek = reader.ReadBytes(len);
// Search in reverse
Array.Reverse(seek);
// don't exclude the minimum eocd region, otherwise you fail to locate the header in empty zip files
var max_search_area = len; // - MINIMUM_EOCD_LENGTH;
for (int pos_from_end = 0; pos_from_end < max_search_area; ++pos_from_end)
{
if (IsMatch(seek, pos_from_end, needle))
{
stream.Seek(-pos_from_end, SeekOrigin.End);
return;
}
}
throw new ArchiveException("Failed to locate the Zip Header");
}
internal LocalEntryHeader GetLocalHeader(Stream stream, DirectoryEntryHeader directoryEntryHeader)

View File

@@ -75,7 +75,7 @@ namespace SharpCompress.Common.Zip
if (disposing)
{
//read out last 10 auth bytes
var ten = new byte[10];
Span<byte> ten = stackalloc byte[10];
_stream.ReadFully(ten);
_stream.Dispose();
}

View File

@@ -93,7 +93,7 @@ namespace SharpCompress.Common.Zip
}
case ZipCompressionMethod.PPMd:
{
var props = new byte[2];
Span<byte> props = stackalloc byte[2];
stream.ReadFully(props);
return new PpmdStream(new PpmdProperties(props), stream, false);
}

View File

@@ -83,7 +83,7 @@ namespace SharpCompress.Compressors.BZip2
stream.SetLength(value);
}
#if !NET461 && !NETSTANDARD2_0
#if !NETFRAMEWORK && !NETSTANDARD2_0
public override int Read(Span<byte> buffer)
{
@@ -123,4 +123,4 @@ namespace SharpCompress.Compressors.BZip2
return true;
}
}
}
}

View File

@@ -256,17 +256,15 @@ namespace SharpCompress.Compressors.Deflate
}
// Read and potentially verify the GZIP trailer: CRC32 and size mod 2^32
byte[] trailer = new byte[8];
Span<byte> trailer = stackalloc byte[8];
// workitem 8679
if (_z.AvailableBytesIn != 8)
{
// Make sure we have read to the end of the stream
Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, _z.AvailableBytesIn);
_z.InputBuffer.AsSpan(_z.NextIn, _z.AvailableBytesIn).CopyTo(trailer);
int bytesNeeded = 8 - _z.AvailableBytesIn;
int bytesRead = _stream.Read(trailer,
_z.AvailableBytesIn,
bytesNeeded);
int bytesRead = _stream.Read(trailer.Slice(_z.AvailableBytesIn, bytesNeeded));
if (bytesNeeded != bytesRead)
{
throw new ZlibException(String.Format(
@@ -276,12 +274,12 @@ namespace SharpCompress.Compressors.Deflate
}
else
{
Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, trailer.Length);
_z.InputBuffer.AsSpan(_z.NextIn, trailer.Length).CopyTo(trailer);
}
Int32 crc32_expected = BinaryPrimitives.ReadInt32LittleEndian(trailer);
Int32 crc32_actual = crc.Crc32Result;
Int32 isize_expected = BinaryPrimitives.ReadInt32LittleEndian(trailer.AsSpan(4));
Int32 isize_expected = BinaryPrimitives.ReadInt32LittleEndian(trailer.Slice(4));
Int32 isize_actual = (Int32)(_z.TotalBytesOut & 0x00000000FFFFFFFF);
if (crc32_actual != crc32_expected)
@@ -504,13 +502,37 @@ namespace SharpCompress.Compressors.Deflate
throw new ZlibException("Cannot Read after Writing.");
}
int rc = 0;
// set up the output of the deflate/inflate codec:
_z.OutputBuffer = buffer;
_z.NextOut = offset;
_z.AvailableBytesOut = count;
if (count == 0)
{
return 0;
}
if (nomoreinput && _wantCompress)
{
return 0; // workitem 8557
// no more input data available; therefore we flush to
// try to complete the read
rc = _z.Deflate(FlushType.Finish);
if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
{
throw new ZlibException(String.Format("Deflating: rc={0} msg={1}", rc, _z.Message));
}
rc = (count - _z.AvailableBytesOut);
// calculate CRC after reading
if (crc != null)
{
crc.SlurpBlock(buffer, offset, rc);
}
return rc;
}
if (buffer is null)
{
@@ -529,13 +551,6 @@ namespace SharpCompress.Compressors.Deflate
throw new ArgumentOutOfRangeException(nameof(count));
}
int rc = 0;
// set up the output of the deflate/inflate codec:
_z.OutputBuffer = buffer;
_z.NextOut = offset;
_z.AvailableBytesOut = count;
// This is necessary in case _workingBuffer has been resized. (new byte[])
// (The first reference to _workingBuffer goes through the private accessor which
// may initialize it.)

View File

@@ -59,16 +59,16 @@ namespace SharpCompress.Compressors.LZMA
crc32Stream.Dispose();
var compressedCount = _countingWritableSubStream!.Count;
byte[] intBuf = new byte[8];
Span<byte> intBuf = stackalloc byte[8];
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, crc32Stream.Crc);
_countingWritableSubStream.Write(intBuf, 0, 4);
_countingWritableSubStream.Write(intBuf.Slice(0, 4));
BinaryPrimitives.WriteInt64LittleEndian(intBuf, _writeCount);
_countingWritableSubStream.Write(intBuf, 0, 8);
_countingWritableSubStream.Write(intBuf);
//total with headers
BinaryPrimitives.WriteUInt64LittleEndian(intBuf, compressedCount + 6 + 20);
_countingWritableSubStream.Write(intBuf, 0, 8);
_countingWritableSubStream.Write(intBuf);
}
_finished = true;
}
@@ -118,7 +118,7 @@ namespace SharpCompress.Compressors.LZMA
public override void SetLength(long value) => throw new NotImplementedException();
#if !NET461 && !NETSTANDARD2_0
#if !NETFRAMEWORK && !NETSTANDARD2_0
public override int Read(Span<byte> buffer)
{

View File

@@ -31,7 +31,11 @@ namespace SharpCompress.Compressors.PPMd
public PpmdVersion Version { get; } = PpmdVersion.I1;
internal ModelRestorationMethod RestorationMethod { get; }
public PpmdProperties(byte[] properties)
public PpmdProperties(byte[] properties) : this(properties.AsSpan())
{
}
public PpmdProperties(ReadOnlySpan<byte> properties)
{
if (properties.Length == 2)
{
@@ -43,7 +47,7 @@ namespace SharpCompress.Compressors.PPMd
else if (properties.Length == 5)
{
Version = PpmdVersion.H7Z;
AllocatorSize = BinaryPrimitives.ReadInt32LittleEndian(properties.AsSpan(1));
AllocatorSize = BinaryPrimitives.ReadInt32LittleEndian(properties.Slice(1));
ModelOrder = properties[0];
}
}

View File

@@ -1,4 +1,5 @@
using System;
using System.Buffers.Binary;
using System.IO;
namespace SharpCompress.Compressors.Xz
@@ -8,7 +9,7 @@ namespace SharpCompress.Compressors.Xz
public static int ReadLittleEndianInt32(this BinaryReader reader)
{
byte[] bytes = reader.ReadBytes(4);
return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24));
return BinaryPrimitives.ReadInt32LittleEndian(bytes);
}
internal static uint ReadLittleEndianUInt32(this BinaryReader reader)
@@ -17,13 +18,13 @@ namespace SharpCompress.Compressors.Xz
}
public static int ReadLittleEndianInt32(this Stream stream)
{
byte[] bytes = new byte[4];
Span<byte> bytes = stackalloc byte[4];
var read = stream.ReadFully(bytes);
if (!read)
{
throw new EndOfStreamException();
}
return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24));
return BinaryPrimitives.ReadInt32LittleEndian(bytes);
}
internal static uint ReadLittleEndianUInt32(this Stream stream)

View File

@@ -1,7 +1,6 @@
#nullable disable
using System;
using System.Collections.Generic;
namespace SharpCompress.Compressors.Xz
{
@@ -24,7 +23,7 @@ namespace SharpCompress.Compressors.Xz
public static UInt32 Compute(UInt32 polynomial, UInt32 seed, byte[] buffer)
{
return ~CalculateHash(InitializeTable(polynomial), seed, buffer, 0, buffer.Length);
return ~CalculateHash(InitializeTable(polynomial), seed, buffer);
}
private static UInt32[] InitializeTable(UInt32 polynomial)
@@ -61,16 +60,16 @@ namespace SharpCompress.Compressors.Xz
return createTable;
}
private static UInt32 CalculateHash(UInt32[] table, UInt32 seed, IList<byte> buffer, int start, int size)
private static UInt32 CalculateHash(UInt32[] table, UInt32 seed, ReadOnlySpan<byte> buffer)
{
var crc = seed;
for (var i = start; i < size - start; i++)
int len = buffer.Length;
for (var i = 0; i < len; i++)
{
crc = (crc >> 8) ^ table[buffer[i] ^ crc & 0xff];
crc = (crc >> 8) ^ table[(buffer[i] ^ crc) & 0xff];
}
return crc;
}
}
}

View File

@@ -22,14 +22,14 @@ namespace SharpCompress.Compressors.Xz
{
Table ??= CreateTable(Iso3309Polynomial);
return CalculateHash(seed, Table, buffer, 0, buffer.Length);
return CalculateHash(seed, Table, buffer);
}
public static UInt64 CalculateHash(UInt64 seed, UInt64[] table, IList<byte> buffer, int start, int size)
public static UInt64 CalculateHash(UInt64 seed, UInt64[] table, ReadOnlySpan<byte> buffer)
{
var crc = seed;
for (var i = start; i < size; i++)
int len = buffer.Length;
for (var i = 0; i < len; i++)
{
unchecked
{

View File

@@ -42,7 +42,7 @@ namespace SharpCompress.Crypto
public override void SetLength(long value) => throw new NotSupportedException();
#if !NET461 && !NETSTANDARD2_0
#if !NETFRAMEWORK && !NETSTANDARD2_0
public override void Write(ReadOnlySpan<byte> buffer)
{

View File

@@ -58,7 +58,7 @@ namespace SharpCompress.IO
Stream.Write(buffer, offset, count);
}
#if !NET461 && !NETSTANDARD2_0
#if !NETFRAMEWORK && !NETSTANDARD2_0
public override int Read(Span<byte> buffer)
{
@@ -72,4 +72,4 @@ namespace SharpCompress.IO
#endif
}
}
}

View File

@@ -84,11 +84,11 @@ namespace SharpCompress.IO
throw new NotSupportedException();
}
public override long Length => throw new NotSupportedException();
public override long Length => stream.Length;
public override long Position
{
get { return stream.Position + bufferStream.Position - bufferStream.Length; }
get => stream.Position + bufferStream.Position - bufferStream.Length;
set
{
if (!isRewound)

View File

@@ -1,12 +1,14 @@
#if NET461 || NETSTANDARD2_0
#if NETFRAMEWORK || NETSTANDARD2_0
using System;
using System.Buffers;
using System.IO;
namespace System.IO
namespace SharpCompress
{
public static class StreamExtensions
internal static class StreamExtensions
{
public static int Read(this Stream stream, Span<byte> buffer)
internal static int Read(this Stream stream, Span<byte> buffer)
{
byte[] temp = ArrayPool<byte>.Shared.Rent(buffer.Length);
@@ -24,7 +26,7 @@ namespace System.IO
}
}
public static void Write(this Stream stream, ReadOnlySpan<byte> buffer)
internal static void Write(this Stream stream, ReadOnlySpan<byte> buffer)
{
byte[] temp = ArrayPool<byte>.Shared.Rent(buffer.Length);
@@ -42,4 +44,4 @@ namespace System.IO
}
}
#endif
#endif

View File

@@ -1,15 +1,15 @@
#if NET461 || NETSTANDARD2_0
#if NETFRAMEWORK || NETSTANDARD2_0
namespace System
namespace SharpCompress
{
public static class StringExtensions
internal static class StringExtensions
{
public static bool EndsWith(this string text, char value)
internal static bool EndsWith(this string text, char value)
{
return text.Length > 0 && text[text.Length - 1] == value;
}
public static bool Contains(this string text, char value)
internal static bool Contains(this string text, char value)
{
return text.IndexOf(value) > -1;
}

View File

@@ -10,5 +10,7 @@ namespace SharpCompress.Readers
public bool LookForHeader { get; set; }
public string? Password { get; set; }
public bool DisableCheckIncomplete { get; set; }
}
}

View File

@@ -2,11 +2,11 @@
<PropertyGroup>
<AssemblyTitle>SharpCompress - Pure C# Decompression/Compression</AssemblyTitle>
<NeutralLanguage>en-US</NeutralLanguage>
<VersionPrefix>0.27.0</VersionPrefix>
<AssemblyVersion>0.27.0</AssemblyVersion>
<FileVersion>0.27.0</FileVersion>
<VersionPrefix>0.31.0</VersionPrefix>
<AssemblyVersion>0.31.0</AssemblyVersion>
<FileVersion>0.31.0</FileVersion>
<Authors>Adam Hathcock</Authors>
<TargetFrameworks>netstandard2.0;netstandard2.1;netcoreapp3.1;net5.0</TargetFrameworks>
<TargetFrameworks>net461;netstandard2.0;netstandard2.1;netcoreapp3.1;net5.0</TargetFrameworks>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
<AllowUnsafeBlocks>false</AllowUnsafeBlocks>
<AssemblyName>SharpCompress</AssemblyName>
@@ -18,7 +18,7 @@
<PackageLicense>https://github.com/adamhathcock/sharpcompress/blob/master/LICENSE.txt</PackageLicense>
<GenerateAssemblyTitleAttribute>false</GenerateAssemblyTitleAttribute>
<GenerateAssemblyProductAttribute>false</GenerateAssemblyProductAttribute>
<Description>SharpCompress is a compression library for NET Standard 2.0/2.1//NET 4.6 that can unrar, decompress 7zip, decompress xz, zip/unzip, tar/untar lzip/unlzip, bzip2/unbzip2 and gzip/ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip is implemented.</Description>
<Description>SharpCompress is a compression library for NET Standard 2.0/2.1/NET 5.0 that can unrar, decompress 7zip, decompress xz, zip/unzip, tar/untar lzip/unlzip, bzip2/unbzip2 and gzip/ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip is implemented.</Description>
<LangVersion>9</LangVersion>
<Nullable>enable</Nullable>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
@@ -37,5 +37,8 @@
<PackageReference Include="System.Text.Encoding.CodePages" Version="5.0.0" />
<PackageReference Include="System.Memory" Version="4.5.4" />
</ItemGroup>
<ItemGroup Condition=" '$(VersionlessImplicitFrameworkDefine)' == 'NETFRAMEWORK' ">
<PackageReference Include="System.Text.Encoding.CodePages" Version="5.0.0" />
<PackageReference Include="System.Memory" Version="4.5.4" />
</ItemGroup>
</Project>

View File

@@ -296,6 +296,21 @@ namespace SharpCompress
return (total >= buffer.Length);
}
public static bool ReadFully(this Stream stream, Span<byte> buffer)
{
int total = 0;
int read;
while ((read = stream.Read(buffer.Slice(total, buffer.Length - total))) > 0)
{
total += read;
if (total >= buffer.Length)
{
return true;
}
}
return (total >= buffer.Length);
}
public static string TrimNulls(this string source)
{
return source.Replace('\0', ' ').Trim();

View File

@@ -87,19 +87,16 @@ namespace SharpCompress.Writers.Tar
header.Name = NormalizeFilename(filename);
header.Size = realSize;
header.Write(OutputStream);
size = source.TransferTo(OutputStream);
PadTo512(size.Value, false);
PadTo512(size.Value);
}
private void PadTo512(long size, bool forceZeros)
private void PadTo512(long size)
{
int zeros = (int)size % 512;
if (zeros == 0 && !forceZeros)
{
return;
}
zeros = 512 - zeros;
OutputStream.Write(new byte[zeros], 0, zeros);
int zeros = unchecked((int)(((size + 511L) & ~511L) - size));
OutputStream.Write(stackalloc byte[zeros]);
}
protected override void Dispose(bool isDisposing)
@@ -108,8 +105,7 @@ namespace SharpCompress.Writers.Tar
{
if (finalizeArchiveOnClose)
{
PadTo512(0, true);
PadTo512(0, true);
OutputStream.Write(stackalloc byte[1024]);
}
switch (OutputStream)
{
@@ -128,4 +124,4 @@ namespace SharpCompress.Writers.Tar
base.Dispose(isDisposing);
}
}
}
}

View File

@@ -71,60 +71,60 @@ namespace SharpCompress.Writers.Zip
usedCompression = ZipCompressionMethod.None;
}
byte[] intBuf = new byte[] { 80, 75, 1, 2, version, 0, version, 0 };
Span<byte> intBuf = stackalloc byte[] { 80, 75, 1, 2, version, 0, version, 0 };
//constant sig, then version made by, then version to extract
outputStream.Write(intBuf, 0, 8);
outputStream.Write(intBuf);
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)flags);
outputStream.Write(intBuf, 0, 2);
outputStream.Write(intBuf.Slice(0, 2));
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)usedCompression);
outputStream.Write(intBuf, 0, 2); // zipping method
outputStream.Write(intBuf.Slice(0, 2)); // zipping method
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, ModificationTime.DateTimeToDosTime());
outputStream.Write(intBuf, 0, 4);
outputStream.Write(intBuf.Slice(0, 4));
// zipping date and time
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, Crc);
outputStream.Write(intBuf, 0, 4); // file CRC
outputStream.Write(intBuf.Slice(0, 4)); // file CRC
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, compressedvalue);
outputStream.Write(intBuf, 0, 4); // compressed file size
outputStream.Write(intBuf.Slice(0, 4)); // compressed file size
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, decompressedvalue);
outputStream.Write(intBuf, 0, 4); // uncompressed file size
outputStream.Write(intBuf.Slice(0, 4)); // uncompressed file size
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)encodedFilename.Length);
outputStream.Write(intBuf, 0, 2); // Filename in zip
outputStream.Write(intBuf.Slice(0, 2)); // Filename in zip
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)extralength);
outputStream.Write(intBuf, 0, 2); // extra length
outputStream.Write(intBuf.Slice(0, 2)); // extra length
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)encodedComment.Length);
outputStream.Write(intBuf, 0, 2);
outputStream.Write(intBuf.Slice(0, 2));
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, 0);
outputStream.Write(intBuf, 0, 2); // disk=0
outputStream.Write(intBuf.Slice(0, 2)); // disk=0
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)flags);
outputStream.Write(intBuf, 0, 2); // file type: binary
outputStream.Write(intBuf.Slice(0, 2)); // file type: binary
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)flags);
outputStream.Write(intBuf, 0, 2); // Internal file attributes
outputStream.Write(intBuf.Slice(0, 2)); // Internal file attributes
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, 0x8100);
outputStream.Write(intBuf, 0, 2);
outputStream.Write(intBuf.Slice(0, 2));
// External file attributes (normal/readable)
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, headeroffsetvalue);
outputStream.Write(intBuf, 0, 4); // Offset of header
outputStream.Write(intBuf.Slice(0, 4)); // Offset of header
outputStream.Write(encodedFilename, 0, encodedFilename.Length);
if (zip64)
{
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, 0x0001);
outputStream.Write(intBuf, 0, 2);
outputStream.Write(intBuf.Slice(0, 2));
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)(extralength - 4));
outputStream.Write(intBuf, 0, 2);
outputStream.Write(intBuf.Slice(0, 2));
BinaryPrimitives.WriteUInt64LittleEndian(intBuf, Decompressed);
outputStream.Write(intBuf, 0, 8);
outputStream.Write(intBuf);
BinaryPrimitives.WriteUInt64LittleEndian(intBuf, Compressed);
outputStream.Write(intBuf, 0, 8);
outputStream.Write(intBuf);
BinaryPrimitives.WriteUInt64LittleEndian(intBuf, HeaderOffset);
outputStream.Write(intBuf, 0, 8);
outputStream.Write(intBuf);
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, 0);
outputStream.Write(intBuf, 0, 4); // VolumeNumber = 0
outputStream.Write(intBuf.Slice(0, 4)); // VolumeNumber = 0
}
outputStream.Write(encodedComment, 0, encodedComment.Length);

View File

@@ -162,10 +162,9 @@ namespace SharpCompress.Writers.Zip
var explicitZipCompressionInfo = ToZipCompressionMethod(zipWriterEntryOptions.CompressionType ?? compressionType);
byte[] encodedFilename = WriterOptions.ArchiveEncoding.Encode(filename);
// TODO: Use stackalloc when we exclusively support netstandard2.1 or higher
byte[] intBuf = new byte[4];
Span<byte> intBuf = stackalloc byte[4];
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, ZipHeaderFactory.ENTRY_HEADER_BYTES);
OutputStream.Write(intBuf, 0, 4);
OutputStream.Write(intBuf);
if (explicitZipCompressionInfo == ZipCompressionMethod.Deflate)
{
if (OutputStream.CanSeek && useZip64)
@@ -193,18 +192,18 @@ namespace SharpCompress.Writers.Zip
}
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)flags);
OutputStream.Write(intBuf, 0, 2);
OutputStream.Write(intBuf.Slice(0, 2));
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)explicitZipCompressionInfo);
OutputStream.Write(intBuf, 0, 2); // zipping method
OutputStream.Write(intBuf.Slice(0, 2)); // zipping method
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, zipWriterEntryOptions.ModificationDateTime.DateTimeToDosTime());
OutputStream.Write(intBuf, 0, 4);
OutputStream.Write(intBuf);
// zipping date and time
OutputStream.Write(stackalloc byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 });
// unused CRC, un/compressed size, updated later
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)encodedFilename.Length);
OutputStream.Write(intBuf, 0, 2); // filename length
OutputStream.Write(intBuf.Slice(0, 2)); // filename length
var extralength = 0;
if (OutputStream.CanSeek && useZip64)
@@ -213,7 +212,7 @@ namespace SharpCompress.Writers.Zip
}
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)extralength);
OutputStream.Write(intBuf, 0, 2); // extra length
OutputStream.Write(intBuf.Slice(0, 2)); // extra length
OutputStream.Write(encodedFilename, 0, encodedFilename.Length);
if (extralength != 0)
@@ -227,25 +226,25 @@ namespace SharpCompress.Writers.Zip
private void WriteFooter(uint crc, uint compressed, uint uncompressed)
{
byte[] intBuf = new byte[4];
Span<byte> intBuf = stackalloc byte[4];
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, crc);
OutputStream.Write(intBuf, 0, 4);
OutputStream.Write(intBuf);
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, compressed);
OutputStream.Write(intBuf, 0, 4);
OutputStream.Write(intBuf);
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, uncompressed);
OutputStream.Write(intBuf, 0, 4);
OutputStream.Write(intBuf);
}
private void WriteEndRecord(ulong size)
{
var zip64 = isZip64 || entries.Count > ushort.MaxValue || streamPosition >= uint.MaxValue || size >= uint.MaxValue;
var zip64EndOfCentralDirectoryNeeded = entries.Count > ushort.MaxValue || streamPosition >= uint.MaxValue || size >= uint.MaxValue;
var sizevalue = size >= uint.MaxValue ? uint.MaxValue : (uint)size;
var streampositionvalue = streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)streamPosition;
byte[] intBuf = new byte[8];
if (zip64)
Span<byte> intBuf = stackalloc byte[8];
if (zip64EndOfCentralDirectoryNeeded)
{
var recordlen = 2 + 2 + 4 + 4 + 8 + 8 + 8 + 8;
@@ -253,51 +252,50 @@ namespace SharpCompress.Writers.Zip
OutputStream.Write(stackalloc byte[] { 80, 75, 6, 6 });
BinaryPrimitives.WriteUInt64LittleEndian(intBuf, (ulong)recordlen);
OutputStream.Write(intBuf, 0, 8); // Size of zip64 end of central directory record
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, 0);
OutputStream.Write(intBuf, 0, 2); // Made by
OutputStream.Write(intBuf); // Size of zip64 end of central directory record
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, 45);
OutputStream.Write(intBuf, 0, 2); // Version needed
OutputStream.Write(intBuf.Slice(0, 2)); // Made by
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, 45);
OutputStream.Write(intBuf.Slice(0, 2)); // Version needed
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, 0);
OutputStream.Write(intBuf, 0, 4); // Disk number
OutputStream.Write(intBuf, 0, 4); // Central dir disk
OutputStream.Write(intBuf.Slice(0, 4)); // Disk number
OutputStream.Write(intBuf.Slice(0, 4)); // Central dir disk
// TODO: entries.Count is int, so max 2^31 files
BinaryPrimitives.WriteUInt64LittleEndian(intBuf, (ulong)entries.Count);
OutputStream.Write(intBuf, 0, 8); // Entries in this disk
OutputStream.Write(intBuf, 0, 8); // Total entries
OutputStream.Write(intBuf); // Entries in this disk
OutputStream.Write(intBuf); // Total entries
BinaryPrimitives.WriteUInt64LittleEndian(intBuf, size);
OutputStream.Write(intBuf, 0, 8); // Central Directory size
OutputStream.Write(intBuf); // Central Directory size
BinaryPrimitives.WriteUInt64LittleEndian(intBuf, (ulong)streamPosition);
OutputStream.Write(intBuf, 0, 8); // Disk offset
OutputStream.Write(intBuf); // Disk offset
// Write zip64 end of central directory locator
OutputStream.Write(stackalloc byte[] { 80, 75, 6, 7 });
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, 0);
OutputStream.Write(intBuf, 0, 4); // Entry disk
OutputStream.Write(intBuf.Slice(0, 4)); // Entry disk
BinaryPrimitives.WriteUInt64LittleEndian(intBuf, (ulong)streamPosition + size);
OutputStream.Write(intBuf, 0, 8); // Offset to the zip64 central directory
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, 0);
OutputStream.Write(intBuf, 0, 4); // Number of disks
OutputStream.Write(intBuf); // Offset to the zip64 central directory
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, 1);
OutputStream.Write(intBuf.Slice(0, 4)); // Number of disks
streamPosition += recordlen + (4 + 4 + 8 + 4);
streampositionvalue = streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)streampositionvalue;
streamPosition += 4 + 8 + recordlen + (4 + 4 + 8 + 4);
}
// Write normal end of central directory record
OutputStream.Write(stackalloc byte[] { 80, 75, 5, 6, 0, 0, 0, 0 });
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)entries.Count);
OutputStream.Write(intBuf, 0, 2);
OutputStream.Write(intBuf, 0, 2);
OutputStream.Write(intBuf.Slice(0, 2));
OutputStream.Write(intBuf.Slice(0, 2));
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, sizevalue);
OutputStream.Write(intBuf, 0, 4);
OutputStream.Write(intBuf.Slice(0, 4));
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, streampositionvalue);
OutputStream.Write(intBuf, 0, 4);
OutputStream.Write(intBuf.Slice(0, 4));
byte[] encodedComment = WriterOptions.ArchiveEncoding.Encode(zipComment);
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)encodedComment.Length);
OutputStream.Write(intBuf, 0, 2);
OutputStream.Write(intBuf.Slice(0, 2));
OutputStream.Write(encodedComment, 0, encodedComment.Length);
}
@@ -443,16 +441,16 @@ namespace SharpCompress.Writers.Zip
if (entry.Zip64HeaderOffset != 0)
{
originalStream.Position = (long)(entry.HeaderOffset + entry.Zip64HeaderOffset);
byte[] intBuf = new byte[8];
Span<byte> intBuf = stackalloc byte[8];
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, 0x0001);
originalStream.Write(intBuf, 0, 2);
originalStream.Write(intBuf.Slice(0, 2));
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, 8 + 8);
originalStream.Write(intBuf, 0, 2);
originalStream.Write(intBuf.Slice(0, 2));
BinaryPrimitives.WriteUInt64LittleEndian(intBuf, entry.Decompressed);
originalStream.Write(intBuf, 0, 8);
originalStream.Write(intBuf);
BinaryPrimitives.WriteUInt64LittleEndian(intBuf, entry.Compressed);
originalStream.Write(intBuf, 0, 8);
originalStream.Write(intBuf);
}
originalStream.Position = writer.streamPosition + (long)entry.Compressed;
@@ -471,9 +469,9 @@ namespace SharpCompress.Writers.Zip
throw new NotSupportedException("Streams larger than 4GiB are not supported for non-seekable streams");
}
byte[] intBuf = new byte[4];
Span<byte> intBuf = stackalloc byte[4];
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, ZipHeaderFactory.POST_DATA_DESCRIPTOR);
originalStream.Write(intBuf, 0, 4);
originalStream.Write(intBuf);
writer.WriteFooter(entry.Crc,
compressedvalue,
decompressedvalue);

View File

@@ -0,0 +1,37 @@
using System.IO;
using SharpCompress.Common;
using SharpCompress.IO;
using Xunit;
namespace SharpCompress.Test.GZip
{
public class GZipReaderTests : ReaderTests
{
public GZipReaderTests()
{
UseExtensionInsteadOfNameToVerify = true;
}
[Fact]
public void GZip_Reader_Generic()
{
Read("Tar.tar.gz", CompressionType.GZip);
}
[Fact]
public void GZip_Reader_Generic2()
{
//read only as GZip itme
using (Stream stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "Tar.tar.gz")))
using (var reader = SharpCompress.Readers.GZip.GZipReader.Open(new RewindableStream(stream)))
{
while (reader.MoveToNextEntry()) // Crash here
{
Assert.NotEqual(0, reader.Entry.Size);
Assert.NotEqual(0, reader.Entry.Crc);
}
}
}
}
}

View File

@@ -360,6 +360,31 @@ namespace SharpCompress.Test.Rar
ArchiveFileRead("Rar5.solid.rar");
}
[Fact]
public void Rar2_Multi_ArchiveStreamRead()
{
DoRar_Multi_ArchiveStreamRead(new string[] {
"Rar2.multi.rar",
"Rar2.multi.r00",
"Rar2.multi.r01",
"Rar2.multi.r02",
"Rar2.multi.r03",
"Rar2.multi.r04",
"Rar2.multi.r05"}, false);
}
[Fact]
public void Rar2_Multi_ArchiveFileRead()
{
ArchiveFileRead("Rar2.multi.rar"); //r00, r01...
}
[Fact]
public void Rar2_ArchiveFileRead()
{
ArchiveFileRead("Rar2.rar");
}
[Fact]
public void Rar_Multi_ArchiveFileRead()
{

View File

@@ -1,6 +1,6 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>net5.0</TargetFrameworks>
<TargetFrameworks>net5.0;net461</TargetFrameworks>
<AssemblyName>SharpCompress.Test</AssemblyName>
<AssemblyOriginatorKeyFile>../../SharpCompress.snk</AssemblyOriginatorKeyFile>
<SignAssembly>true</SignAssembly>
@@ -12,13 +12,13 @@
<ProjectReference Include="..\..\src\SharpCompress\SharpCompress.csproj" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.8.3" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.4.3">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
</PackageReference>
<PackageReference Include="FluentAssertions" Version="6.2.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.11.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.4.3" />
<PackageReference Include="xunit" Version="2.4.1" />
<PackageReference Include="Xunit.SkippableFact" Version="1.4.13" />
</ItemGroup>
<ItemGroup Condition=" '$(VersionlessImplicitFrameworkDefine)' != 'NETFRAMEWORK' ">
<PackageReference Include="Mono.Posix.NETStandard" Version="1.0.0" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,88 @@
using System.IO;
using System.Text;
using FluentAssertions;
using SharpCompress.Compressors;
using SharpCompress.Compressors.Deflate;
using SharpCompress.IO;
using Xunit;
namespace SharpCompress.Test.Streams
{
public class ZLibBaseStreamTests
{
[Fact]
public void TestChunkedZlibCompressesEverything()
{
byte[] plainData = new byte[] { 0xf7, 0x1b, 0xda, 0x0f, 0xb6, 0x2b, 0x3d, 0x91, 0xd7, 0xe1, 0xb5, 0x11, 0x34, 0x5a, 0x51, 0x3f, 0x8b, 0xce, 0x49, 0xd2 };
byte[] buf = new byte[plainData.Length * 2];
MemoryStream plainStream1 = new MemoryStream(plainData);
DeflateStream compressor1 = new DeflateStream(plainStream1, CompressionMode.Compress);
// This is enough to read the entire data
int realCompressedSize = compressor1.Read(buf, 0, plainData.Length * 2);
MemoryStream plainStream2 = new MemoryStream(plainData);
DeflateStream compressor2 = new DeflateStream(plainStream2, CompressionMode.Compress);
int total = 0;
int r = -1; // Jumpstart
while (r != 0)
{
// Reading in chunks
r = compressor2.Read(buf, 0, plainData.Length);
total += r;
}
Assert.Equal(total, realCompressedSize);
}
[Fact]
public void Zlib_should_read_the_previously_written_message()
{
var message = new string('a', 131073); // 131073 causes the failure, but 131072 (-1) doesn't
var bytes = Encoding.ASCII.GetBytes(message);
using (var inputStream = new MemoryStream(bytes))
{
using (var compressedStream = new MemoryStream())
using (var byteBufferStream = new BufferedStream(inputStream)) // System.IO
{
Compress(byteBufferStream, compressedStream, compressionLevel: 1);
compressedStream.Position = 0;
using (var decompressedStream = new MemoryStream())
{
Decompress(compressedStream, decompressedStream);
byteBufferStream.Position = 0;
var result = Encoding.ASCII.GetString(GetBytes(byteBufferStream));
result.Should().Be(message);
}
}
}
}
private void Compress(Stream input, Stream output, int compressionLevel)
{
using (var zlibStream = new ZlibStream(new NonDisposingStream(output), CompressionMode.Compress, (CompressionLevel)compressionLevel))
{
zlibStream.FlushMode = FlushType.Sync;
input.CopyTo(zlibStream);
}
}
private void Decompress(Stream input, Stream output)
{
using (var zlibStream = new ZlibStream(new NonDisposingStream(input), CompressionMode.Decompress))
{
zlibStream.CopyTo(output);
}
}
byte[] GetBytes(BufferedStream stream)
{
byte[] bytes = new byte[stream.Length];
stream.Read(bytes, 0, (int)stream.Length);
return bytes;
}
}
}

View File

@@ -190,7 +190,7 @@ namespace SharpCompress.Test.Tar
}
}
#if !NET461
#if !NETFRAMEWORK
[Fact]
public void Tar_GZip_With_Symlink_Entries()
{

View File

@@ -290,6 +290,28 @@ namespace SharpCompress.Test.Zip
Directory.Delete(SCRATCH_FILES_PATH, true);
}
/// <summary>
/// Creates an empty zip file and attempts to read it right afterwards.
/// Ensures that parsing file headers works even in that case
/// </summary>
[Fact]
public void Zip_Create_Empty_And_Read()
{
var archive = ZipArchive.Create();
var archiveStream = new MemoryStream();
archive.SaveTo(archiveStream, CompressionType.LZMA);
archiveStream.Position = 0;
var readArchive = ArchiveFactory.Open(archiveStream);
var count = readArchive.Entries.Count();
Assert.Equal(0, count);
}
[Fact]
public void Zip_Create_New_Add_Remove()
{
@@ -564,5 +586,35 @@ namespace SharpCompress.Test.Zip
}
}
}
[Fact]
public void Zip_LongComment_Read()
{
string zipPath = Path.Combine(TEST_ARCHIVES_PATH, "Zip.LongComment.zip");
using (ZipArchive za = ZipArchive.Open(zipPath))
{
var count = za.Entries.Count;
Assert.Equal(1, count);
}
}
[Fact]
public void Zip_Zip64_CompressedSizeExtraOnly_Read()
{
string zipPath = Path.Combine(TEST_ARCHIVES_PATH, "Zip.zip64.compressedonly.zip");
using (ZipArchive za = ZipArchive.Open(zipPath))
{
var firstEntry = za.Entries.First(x => x.Key == "test/test.txt");
using (var memoryStream = new MemoryStream())
using (var firstStream = firstEntry.OpenEntryStream())
{
firstStream.CopyTo(memoryStream);
Assert.Equal(15, memoryStream.Length);
}
}
}
}
}

View File

@@ -269,6 +269,34 @@ namespace SharpCompress.Test.Zip
VerifyFiles();
}
[Fact]
public void Zip_Deflate_ZipCrypto_Read()
{
int count = 0;
using (Stream stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "zipcrypto.zip")))
using (var reader = ZipReader.Open(stream, new ReaderOptions()
{
Password = "test"
}))
{
while (reader.MoveToNextEntry())
{
if (!reader.Entry.IsDirectory)
{
Assert.Equal(CompressionType.None, reader.Entry.CompressionType);
reader.WriteEntryToDirectory(SCRATCH_FILES_PATH,
new ExtractionOptions()
{
ExtractFullPath = true,
Overwrite = true
});
count++;
}
}
}
Assert.Equal(8, count);
}
[Fact]
public void TestSharpCompressWithEmptyStream()
{

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.