Compare commits

...

34 Commits

Author SHA1 Message Date
Adam Hathcock
8d17d09455 Merge pull request #624 from adamhathcock/issue-617
Add test and probable fix for Issue 617
2021-11-22 09:20:15 +00:00
Adam Hathcock
05208ccd9b Add test and probable fix for Issue 617 2021-11-22 08:40:40 +00:00
Adam Hathcock
a1e7c0068d Merge pull request #622 from adamhathcock/net461-tests
Net461 tests
2021-10-02 15:32:20 +01:00
Adam Hathcock
e6bec19946 Mark for 0.30 2021-10-02 15:29:22 +01:00
Adam Hathcock
ec2be2869f Fix whitespace from dotnet format 2021-10-02 15:29:03 +01:00
Adam Hathcock
ce5432ed73 Fix tests for multi-targetting 2021-10-02 15:25:43 +01:00
Adam Hathcock
b6e0ad89ce Remove duplicated artifact step 2021-10-02 15:21:05 +01:00
Adam Hathcock
2745bfa19b Minor SDK update 2021-10-02 15:19:51 +01:00
Adam Hathcock
3cdc4b38a6 Test 461 on github actions 2021-10-02 15:19:13 +01:00
Adam Hathcock
fc1ca808d7 Merge pull request #621 from inthemedium/master
Add net461 target to clean up issues with system.* nuget dependencies
2021-10-02 15:08:18 +01:00
Jeff Tyson
6983e66037 Fix preprocessor condition 2021-10-01 16:34:00 +00:00
Jeff Tyson
01f7336d09 Based on docs, the target should bet net461 2021-09-29 22:04:47 +00:00
Jeff Tyson
1561bba538 Add net462 target to clean up issues with system.* nuget dependencies 2021-09-29 21:55:11 +00:00
Adam Hathcock
3ecf8a5e0c Merge pull request #616 from amosonn/patch-1
Fix for chunked read for ZLibBaseStream
2021-09-27 09:14:58 +01:00
Adam Hathcock
e2095fc416 Merge branch 'master' into patch-1 2021-09-27 09:08:58 +01:00
Amos Onn
8398d40106 Fix #615 2021-09-14 22:02:18 +02:00
Amos Onn
134fa8892f Test for bug #615 2021-09-14 21:55:05 +02:00
Adam Hathcock
ea5c8dc063 Merge pull request #614 from adamhathcock/ensure-dest-dir-exists
Ensure destination directory exists.
2021-09-12 08:56:42 +01:00
Adam Hathcock
0209d00164 Minor updates and prep for 0.29 2021-09-12 08:52:00 +01:00
Adam Hathcock
a8d065dc9e Ensure destination directory exists 2021-09-12 08:47:30 +01:00
Adam Hathcock
7bd9711ade Merge pull request #610 from cyr/master
Bugfix for TarWriter - too much padding in large files
2021-09-12 08:43:20 +01:00
cyr
61802eadb4 Merge branch 'adamhathcock:master' into master 2021-09-12 09:37:07 +02:00
Adam Hathcock
b425659058 Merge pull request #611 from Thunderstr1k3/fix-zipheader-seeking
Allowing to seek empty zip files
2021-09-12 08:28:05 +01:00
Christian
3e32e3d7b1 Allowing to seek empty zip files 2021-09-02 13:54:32 +02:00
cyr
1b661c9df1 Fixed bug where large (int32+ file size) adds an additional 512 bytes of padding in tar files. 2021-08-27 22:38:04 +02:00
Adam Hathcock
54fc26b93d Update build and mark for 0.28.3 2021-06-04 13:43:35 +01:00
Adam Hathcock
161f99bbad Merge pull request #601 from salvois/master
Write ZIP64 End of Central Directory only if needed.
2021-06-04 13:22:01 +01:00
Adam Hathcock
c012db0776 Merge branch 'master' into master 2021-06-04 13:17:13 +01:00
Adam Hathcock
8ee257d299 Merge pull request #592 from adamhathcock/memory-downgrade
Downgrade System.Memory to fix buffer version issue
2021-06-04 13:16:40 +01:00
Adam Hathcock
f9522107c3 Merge branch 'master' into memory-downgrade 2021-06-04 13:16:34 +01:00
Adam Hathcock
e07046a37a Merge pull request #596 from DannyBoyk/issue_595_conditionally_read_zip64_extra
Conditionally parse Zip64 extra field based on specification
2021-06-04 13:07:08 +01:00
Salvatore Isaja
ad6d0d9ae8 Write ZIP64 End of Central Directory only if needed. 2021-05-23 21:10:35 +02:00
Daniel Nash
fdc33e91bd Conditionally parse Zip64 extra field based on specification
The Zip64 extra field should look for values based on the corresponding
values in the local entry header.

Fixes adamhathcock/sharpcompress#595
2021-04-26 14:58:10 -04:00
Adam Hathcock
e6dded826b Downgrade System.Memory to fix buffer version issue 2021-04-24 09:16:46 +01:00
30 changed files with 281 additions and 102 deletions

View File

@@ -12,13 +12,9 @@ jobs:
- uses: actions/checkout@v1
- uses: actions/setup-dotnet@v1
with:
dotnet-version: 5.0.101
dotnet-version: 5.0.401
- run: dotnet run -p build/build.csproj
- uses: actions/upload-artifact@v2
with:
name: ${{ matrix.os }}-sharpcompress.nupkg
path: artifacts/*
- uses: actions/upload-artifact@v2
with:
name: ${{ matrix.os }}-sharpcompress.snupkg
path: artifacts/*

View File

@@ -49,20 +49,20 @@ class Program
Target(Build, DependsOn(Format),
framework =>
{
if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && framework == "net46")
{
return;
}
Run("dotnet", "build src/SharpCompress/SharpCompress.csproj -c Release");
});
Target(Test, DependsOn(Build), ForEach("net5.0"),
Target(Test, DependsOn(Build), ForEach("net5.0", "net461"),
framework =>
{
IEnumerable<string> GetFiles(string d)
{
return Glob.Files(".", d);
}
if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && framework == "net461")
{
return;
}
foreach (var file in GetFiles("**/*.Test.csproj"))
{

View File

@@ -6,9 +6,9 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Bullseye" Version="3.6.0" />
<PackageReference Include="Bullseye" Version="3.8.0" />
<PackageReference Include="Glob" Version="1.1.8" />
<PackageReference Include="SimpleExec" Version="6.4.0" />
<PackageReference Include="SimpleExec" Version="8.0.0" />
</ItemGroup>
</Project>

View File

@@ -1,5 +1,6 @@
{
"sdk": {
"version": "5.0.101"
"version": "5.0.300",
"rollForward": "latestFeature"
}
}

View File

@@ -4,7 +4,7 @@
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
#if !NETSTANDARD2_0 && !NETSTANDARD2_1 && !NETFRAMEWORK
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.X86;
#endif
@@ -22,7 +22,7 @@ namespace SharpCompress.Algorithms
/// </summary>
public const uint SeedValue = 1U;
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
#if !NETSTANDARD2_0 && !NETSTANDARD2_1 && !NETFRAMEWORK
private const int MinBufferSize = 64;
#endif
@@ -51,12 +51,7 @@ namespace SharpCompress.Algorithms
/// <returns>The <see cref="uint"/>.</returns>
public static uint Calculate(uint adler, ReadOnlySpan<byte> buffer)
{
if (buffer.IsEmpty)
{
return SeedValue;
}
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
#if !NETSTANDARD2_0 && !NETSTANDARD2_1 && !NETFRAMEWORK
if (Sse3.IsSupported && buffer.Length >= MinBufferSize)
{
return CalculateSse(adler, buffer);
@@ -69,7 +64,7 @@ namespace SharpCompress.Algorithms
}
// Based on https://github.com/chromium/chromium/blob/master/third_party/zlib/adler32_simd.c
#if !NETSTANDARD2_0 && !NETSTANDARD2_1
#if !NETSTANDARD2_0 && !NETSTANDARD2_1 && !NETFRAMEWORK
private static unsafe uint CalculateSse(uint adler, ReadOnlySpan<byte> buffer)
{
uint s1 = adler & 0xFFFF;
@@ -282,4 +277,4 @@ namespace SharpCompress.Algorithms
return (s2 << 16) | s1;
}
}
}
}

View File

@@ -10,7 +10,7 @@ using SharpCompress.Readers.Rar;
namespace SharpCompress.Archives.Rar
{
public class
public class
RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
{
internal Lazy<IRarUnpack> UnpackV2017 { get; } = new Lazy<IRarUnpack>(() => new SharpCompress.Compressors.Rar.UnpackV2017.Unpack());

View File

@@ -36,7 +36,7 @@ namespace SharpCompress.Common
Password = password;
}
#if !NET461
#if !NETFRAMEWORK
static ArchiveEncoding()
{
Encoding.RegisterProvider(CodePagesEncodingProvider.Instance);

View File

@@ -14,14 +14,25 @@ namespace SharpCompress.Common
Action<string, ExtractionOptions?> write)
{
string destinationFileName;
string file = Path.GetFileName(entry.Key);
string fullDestinationDirectoryPath = Path.GetFullPath(destinationDirectory);
//check for trailing slash.
if (fullDestinationDirectoryPath[fullDestinationDirectoryPath.Length - 1] != Path.DirectorySeparatorChar)
{
fullDestinationDirectoryPath += Path.DirectorySeparatorChar;
}
if (!Directory.Exists(fullDestinationDirectoryPath))
{
throw new ExtractionException($"Directory does not exist to extract to: {fullDestinationDirectoryPath}");
}
options ??= new ExtractionOptions()
{
Overwrite = true
};
string file = Path.GetFileName(entry.Key);
if (options.ExtractFullPath)
{
string folder = Path.GetDirectoryName(entry.Key)!;

View File

@@ -63,6 +63,8 @@ namespace SharpCompress.Common.Zip.Headers
var zip64ExtraData = Extra.OfType<Zip64ExtendedInformationExtraField>().FirstOrDefault();
if (zip64ExtraData != null)
{
zip64ExtraData.Process(UncompressedSize, CompressedSize, RelativeOffsetOfEntryHeader, DiskNumberStart);
if (CompressedSize == uint.MaxValue)
{
CompressedSize = zip64ExtraData.CompressedSize;

View File

@@ -53,6 +53,8 @@ namespace SharpCompress.Common.Zip.Headers
var zip64ExtraData = Extra.OfType<Zip64ExtendedInformationExtraField>().FirstOrDefault();
if (zip64ExtraData != null)
{
zip64ExtraData.Process(UncompressedSize, CompressedSize, 0, 0);
if (CompressedSize == uint.MaxValue)
{
CompressedSize = zip64ExtraData.CompressedSize;

View File

@@ -66,46 +66,74 @@ namespace SharpCompress.Common.Zip.Headers
public Zip64ExtendedInformationExtraField(ExtraDataType type, ushort length, byte[] dataBytes)
: base(type, length, dataBytes)
{
Process();
}
private void Process()
// From the spec, values are only in the extradata if the standard
// value is set to 0xFFFFFFFF (or 0xFFFF for the Disk Start Number).
// Values, if present, must appear in the following order:
// - Original Size
// - Compressed Size
// - Relative Header Offset
// - Disk Start Number
public void Process(long uncompressedFileSize, long compressedFileSize, long relativeHeaderOffset, ushort diskNumber)
{
if (DataBytes.Length >= 8)
var bytesRequired = ((uncompressedFileSize == uint.MaxValue) ? 8 : 0)
+ ((compressedFileSize == uint.MaxValue) ? 8 : 0)
+ ((relativeHeaderOffset == uint.MaxValue) ? 8 : 0)
+ ((diskNumber == ushort.MaxValue) ? 4 : 0);
var currentIndex = 0;
if (bytesRequired > DataBytes.Length)
{
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes);
throw new ArchiveException("Zip64 extended information extra field is not large enough for the required information");
}
if (DataBytes.Length >= 16)
if (uncompressedFileSize == uint.MaxValue)
{
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(8));
UncompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(currentIndex));
currentIndex += 8;
}
if (DataBytes.Length >= 24)
if (compressedFileSize == uint.MaxValue)
{
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(16));
CompressedSize = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(currentIndex));
currentIndex += 8;
}
if (DataBytes.Length >= 28)
if (relativeHeaderOffset == uint.MaxValue)
{
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes.AsSpan(24));
RelativeOffsetOfEntryHeader = BinaryPrimitives.ReadInt64LittleEndian(DataBytes.AsSpan(currentIndex));
currentIndex += 8;
}
switch (DataBytes.Length)
if (diskNumber == ushort.MaxValue)
{
case 8:
case 16:
case 24:
case 28:
break;
default:
throw new ArchiveException($"Unexpected size of of Zip64 extended information extra field: {DataBytes.Length}");
VolumeNumber = BinaryPrimitives.ReadUInt32LittleEndian(DataBytes.AsSpan(currentIndex));
}
}
/// <summary>
/// Uncompressed file size. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
/// original entry header had a corresponding 0xFFFFFFFF value.
/// </summary>
public long UncompressedSize { get; private set; }
/// <summary>
/// Compressed file size. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
/// original entry header had a corresponding 0xFFFFFFFF value.
/// </summary>
public long CompressedSize { get; private set; }
/// <summary>
/// Relative offset of the entry header. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
/// original entry header had a corresponding 0xFFFFFFFF value.
/// </summary>
public long RelativeOffsetOfEntryHeader { get; private set; }
/// <summary>
/// Volume number. Only valid after <see cref="Process(long, long, long, ushort)"/> has been called and if the
/// original entry header had a corresponding 0xFFFF value.
/// </summary>
public uint VolumeNumber { get; private set; }
}

View File

@@ -105,6 +105,6 @@ namespace SharpCompress.Common.Zip.Headers
internal ZipFilePart Part { get; set; }
internal bool IsZip64 => CompressedSize == uint.MaxValue;
internal bool IsZip64 => CompressedSize >= uint.MaxValue;
}
}

View File

@@ -36,7 +36,7 @@ namespace SharpCompress.Common.Zip
// ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR should be before the EOCD
stream.Seek(eocd_location - ZIP64_EOCD_LENGTH - 4, SeekOrigin.Begin);
uint zip64_locator = reader.ReadUInt32();
if( zip64_locator != ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR )
if (zip64_locator != ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR)
{
throw new ArchiveException("Failed to locate the Zip64 Directory Locator");
}
@@ -86,11 +86,11 @@ namespace SharpCompress.Common.Zip
}
}
private static bool IsMatch( byte[] haystack, int position, byte[] needle)
private static bool IsMatch(byte[] haystack, int position, byte[] needle)
{
for( int i = 0; i < needle.Length; i++ )
for (int i = 0; i < needle.Length; i++)
{
if( haystack[ position + i ] != needle[ i ] )
if (haystack[position + i] != needle[i])
{
return false;
}
@@ -117,11 +117,12 @@ namespace SharpCompress.Common.Zip
// Search in reverse
Array.Reverse(seek);
var max_search_area = len - MINIMUM_EOCD_LENGTH;
// don't exclude the minimum eocd region, otherwise you fail to locate the header in empty zip files
var max_search_area = len; // - MINIMUM_EOCD_LENGTH;
for( int pos_from_end = 0; pos_from_end < max_search_area; ++pos_from_end)
for (int pos_from_end = 0; pos_from_end < max_search_area; ++pos_from_end)
{
if( IsMatch(seek, pos_from_end, needle) )
if (IsMatch(seek, pos_from_end, needle))
{
stream.Seek(-pos_from_end, SeekOrigin.End);
return;

View File

@@ -83,7 +83,7 @@ namespace SharpCompress.Compressors.BZip2
stream.SetLength(value);
}
#if !NET461 && !NETSTANDARD2_0
#if !NETFRAMEWORK && !NETSTANDARD2_0
public override int Read(Span<byte> buffer)
{
@@ -123,4 +123,4 @@ namespace SharpCompress.Compressors.BZip2
return true;
}
}
}
}

View File

@@ -502,13 +502,37 @@ namespace SharpCompress.Compressors.Deflate
throw new ZlibException("Cannot Read after Writing.");
}
int rc = 0;
// set up the output of the deflate/inflate codec:
_z.OutputBuffer = buffer;
_z.NextOut = offset;
_z.AvailableBytesOut = count;
if (count == 0)
{
return 0;
}
if (nomoreinput && _wantCompress)
{
return 0; // workitem 8557
// no more input data available; therefore we flush to
// try to complete the read
rc = _z.Deflate(FlushType.Finish);
if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
{
throw new ZlibException(String.Format("Deflating: rc={0} msg={1}", rc, _z.Message));
}
rc = (count - _z.AvailableBytesOut);
// calculate CRC after reading
if (crc != null)
{
crc.SlurpBlock(buffer, offset, rc);
}
return rc;
}
if (buffer is null)
{
@@ -527,13 +551,6 @@ namespace SharpCompress.Compressors.Deflate
throw new ArgumentOutOfRangeException(nameof(count));
}
int rc = 0;
// set up the output of the deflate/inflate codec:
_z.OutputBuffer = buffer;
_z.NextOut = offset;
_z.AvailableBytesOut = count;
// This is necessary in case _workingBuffer has been resized. (new byte[])
// (The first reference to _workingBuffer goes through the private accessor which
// may initialize it.)

View File

@@ -118,7 +118,7 @@ namespace SharpCompress.Compressors.LZMA
public override void SetLength(long value) => throw new NotImplementedException();
#if !NET461 && !NETSTANDARD2_0
#if !NETFRAMEWORK && !NETSTANDARD2_0
public override int Read(Span<byte> buffer)
{

View File

@@ -42,7 +42,7 @@ namespace SharpCompress.Crypto
public override void SetLength(long value) => throw new NotSupportedException();
#if !NET461 && !NETSTANDARD2_0
#if !NETFRAMEWORK && !NETSTANDARD2_0
public override void Write(ReadOnlySpan<byte> buffer)
{

View File

@@ -58,7 +58,7 @@ namespace SharpCompress.IO
Stream.Write(buffer, offset, count);
}
#if !NET461 && !NETSTANDARD2_0
#if !NETFRAMEWORK && !NETSTANDARD2_0
public override int Read(Span<byte> buffer)
{
@@ -72,4 +72,4 @@ namespace SharpCompress.IO
#endif
}
}
}

View File

@@ -1,4 +1,4 @@
#if NET461 || NETSTANDARD2_0
#if NETFRAMEWORK || NETSTANDARD2_0
using System;
using System.Buffers;

View File

@@ -1,4 +1,4 @@
#if NET461 || NETSTANDARD2_0
#if NETFRAMEWORK || NETSTANDARD2_0
namespace SharpCompress
{

View File

@@ -2,11 +2,11 @@
<PropertyGroup>
<AssemblyTitle>SharpCompress - Pure C# Decompression/Compression</AssemblyTitle>
<NeutralLanguage>en-US</NeutralLanguage>
<VersionPrefix>0.28.2</VersionPrefix>
<AssemblyVersion>0.28.2</AssemblyVersion>
<FileVersion>0.28.2</FileVersion>
<VersionPrefix>0.30.1</VersionPrefix>
<AssemblyVersion>0.30.1</AssemblyVersion>
<FileVersion>0.30.1</FileVersion>
<Authors>Adam Hathcock</Authors>
<TargetFrameworks>netstandard2.0;netstandard2.1;netcoreapp3.1;net5.0</TargetFrameworks>
<TargetFrameworks>net461;netstandard2.0;netstandard2.1;netcoreapp3.1;net5.0</TargetFrameworks>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
<AllowUnsafeBlocks>false</AllowUnsafeBlocks>
<AssemblyName>SharpCompress</AssemblyName>
@@ -18,7 +18,7 @@
<PackageLicense>https://github.com/adamhathcock/sharpcompress/blob/master/LICENSE.txt</PackageLicense>
<GenerateAssemblyTitleAttribute>false</GenerateAssemblyTitleAttribute>
<GenerateAssemblyProductAttribute>false</GenerateAssemblyProductAttribute>
<Description>SharpCompress is a compression library for NET Standard 2.0/2.1//NET 4.6 that can unrar, decompress 7zip, decompress xz, zip/unzip, tar/untar lzip/unlzip, bzip2/unbzip2 and gzip/ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip is implemented.</Description>
<Description>SharpCompress is a compression library for NET Standard 2.0/2.1/NET 5.0 that can unrar, decompress 7zip, decompress xz, zip/unzip, tar/untar lzip/unlzip, bzip2/unbzip2 and gzip/ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip is implemented.</Description>
<LangVersion>9</LangVersion>
<Nullable>enable</Nullable>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
@@ -37,5 +37,8 @@
<PackageReference Include="System.Text.Encoding.CodePages" Version="5.0.0" />
<PackageReference Include="System.Memory" Version="4.5.4" />
</ItemGroup>
<ItemGroup Condition=" '$(VersionlessImplicitFrameworkDefine)' == 'NETFRAMEWORK' ">
<PackageReference Include="System.Text.Encoding.CodePages" Version="5.0.0" />
<PackageReference Include="System.Memory" Version="4.5.4" />
</ItemGroup>
</Project>

View File

@@ -280,7 +280,7 @@ namespace SharpCompress
{
return ArrayPool<byte>.Shared.Rent(81920);
}
public static bool ReadFully(this Stream stream, byte[] buffer)
{
int total = 0;
@@ -295,7 +295,7 @@ namespace SharpCompress
}
return (total >= buffer.Length);
}
public static bool ReadFully(this Stream stream, Span<byte> buffer)
{
int total = 0;

View File

@@ -87,18 +87,15 @@ namespace SharpCompress.Writers.Tar
header.Name = NormalizeFilename(filename);
header.Size = realSize;
header.Write(OutputStream);
size = source.TransferTo(OutputStream);
PadTo512(size.Value, false);
PadTo512(size.Value);
}
private void PadTo512(long size, bool forceZeros)
private void PadTo512(long size)
{
int zeros = (int)size % 512;
if (zeros == 0 && !forceZeros)
{
return;
}
zeros = 512 - zeros;
int zeros = unchecked((int)(((size + 511L) & ~511L) - size));
OutputStream.Write(stackalloc byte[zeros]);
}
@@ -108,8 +105,7 @@ namespace SharpCompress.Writers.Tar
{
if (finalizeArchiveOnClose)
{
PadTo512(0, true);
PadTo512(0, true);
OutputStream.Write(stackalloc byte[1024]);
}
switch (OutputStream)
{

View File

@@ -238,13 +238,13 @@ namespace SharpCompress.Writers.Zip
private void WriteEndRecord(ulong size)
{
var zip64 = isZip64 || entries.Count > ushort.MaxValue || streamPosition >= uint.MaxValue || size >= uint.MaxValue;
var zip64EndOfCentralDirectoryNeeded = entries.Count > ushort.MaxValue || streamPosition >= uint.MaxValue || size >= uint.MaxValue;
var sizevalue = size >= uint.MaxValue ? uint.MaxValue : (uint)size;
var streampositionvalue = streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)streamPosition;
Span<byte> intBuf = stackalloc byte[8];
if (zip64)
if (zip64EndOfCentralDirectoryNeeded)
{
var recordlen = 2 + 2 + 4 + 4 + 8 + 8 + 8 + 8;
@@ -281,8 +281,7 @@ namespace SharpCompress.Writers.Zip
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, 1);
OutputStream.Write(intBuf.Slice(0, 4)); // Number of disks
streamPosition += recordlen + (4 + 4 + 8 + 4);
streampositionvalue = streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)streampositionvalue;
streamPosition += 4 + 8 + recordlen + (4 + 4 + 8 + 4);
}
// Write normal end of central directory record

View File

@@ -1,6 +1,6 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>net5.0</TargetFrameworks>
<TargetFrameworks>net5.0;net461</TargetFrameworks>
<AssemblyName>SharpCompress.Test</AssemblyName>
<AssemblyOriginatorKeyFile>../../SharpCompress.snk</AssemblyOriginatorKeyFile>
<SignAssembly>true</SignAssembly>
@@ -12,13 +12,13 @@
<ProjectReference Include="..\..\src\SharpCompress\SharpCompress.csproj" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.8.3" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.4.3">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
</PackageReference>
<PackageReference Include="FluentAssertions" Version="6.2.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.11.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.4.3" />
<PackageReference Include="xunit" Version="2.4.1" />
<PackageReference Include="Xunit.SkippableFact" Version="1.4.13" />
</ItemGroup>
<ItemGroup Condition=" '$(VersionlessImplicitFrameworkDefine)' != 'NETFRAMEWORK' ">
<PackageReference Include="Mono.Posix.NETStandard" Version="1.0.0" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,88 @@
using System.IO;
using System.Text;
using FluentAssertions;
using SharpCompress.Compressors;
using SharpCompress.Compressors.Deflate;
using SharpCompress.IO;
using Xunit;
namespace SharpCompress.Test.Streams
{
public class ZLibBaseStreamTests
{
[Fact]
public void TestChunkedZlibCompressesEverything()
{
byte[] plainData = new byte[] { 0xf7, 0x1b, 0xda, 0x0f, 0xb6, 0x2b, 0x3d, 0x91, 0xd7, 0xe1, 0xb5, 0x11, 0x34, 0x5a, 0x51, 0x3f, 0x8b, 0xce, 0x49, 0xd2 };
byte[] buf = new byte[plainData.Length * 2];
MemoryStream plainStream1 = new MemoryStream(plainData);
DeflateStream compressor1 = new DeflateStream(plainStream1, CompressionMode.Compress);
// This is enough to read the entire data
int realCompressedSize = compressor1.Read(buf, 0, plainData.Length * 2);
MemoryStream plainStream2 = new MemoryStream(plainData);
DeflateStream compressor2 = new DeflateStream(plainStream2, CompressionMode.Compress);
int total = 0;
int r = -1; // Jumpstart
while (r != 0)
{
// Reading in chunks
r = compressor2.Read(buf, 0, plainData.Length);
total += r;
}
Assert.Equal(total, realCompressedSize);
}
[Fact]
public void Zlib_should_read_the_previously_written_message()
{
var message = new string('a', 131073); // 131073 causes the failure, but 131072 (-1) doesn't
var bytes = Encoding.ASCII.GetBytes(message);
using (var inputStream = new MemoryStream(bytes))
{
using (var compressedStream = new MemoryStream())
using (var byteBufferStream = new BufferedStream(inputStream)) // System.IO
{
Compress(byteBufferStream, compressedStream, compressionLevel: 1);
compressedStream.Position = 0;
using (var decompressedStream = new MemoryStream())
{
Decompress(compressedStream, decompressedStream);
byteBufferStream.Position = 0;
var result = Encoding.ASCII.GetString(GetBytes(byteBufferStream));
result.Should().Be(message);
}
}
}
}
public void Compress(Stream input, Stream output, int compressionLevel)
{
using (var zlibStream = new ZlibStream(new NonDisposingStream(output), CompressionMode.Compress, (CompressionLevel)compressionLevel))
{
zlibStream.FlushMode = FlushType.Sync;
input.CopyTo(zlibStream);
}
}
public void Decompress(Stream input, Stream output)
{
using (var zlibStream = new ZlibStream(new NonDisposingStream(input), CompressionMode.Decompress))
{
zlibStream.CopyTo(output);
}
}
byte[] GetBytes(BufferedStream stream)
{
byte[] bytes = new byte[stream.Length];
stream.Read(bytes, 0, (int)stream.Length);
return bytes;
}
}
}

View File

@@ -190,7 +190,7 @@ namespace SharpCompress.Test.Tar
}
}
#if !NET461
#if !NETFRAMEWORK
[Fact]
public void Tar_GZip_With_Symlink_Entries()
{

View File

@@ -290,6 +290,28 @@ namespace SharpCompress.Test.Zip
Directory.Delete(SCRATCH_FILES_PATH, true);
}
/// <summary>
/// Creates an empty zip file and attempts to read it right afterwards.
/// Ensures that parsing file headers works even in that case
/// </summary>
[Fact]
public void Zip_Create_Empty_And_Read()
{
var archive = ZipArchive.Create();
var archiveStream = new MemoryStream();
archive.SaveTo(archiveStream, CompressionType.LZMA);
archiveStream.Position = 0;
var readArchive = ArchiveFactory.Open(archiveStream);
var count = readArchive.Entries.Count();
Assert.Equal(0, count);
}
[Fact]
public void Zip_Create_New_Add_Remove()
{
@@ -570,11 +592,29 @@ namespace SharpCompress.Test.Zip
{
string zipPath = Path.Combine(TEST_ARCHIVES_PATH, "Zip.LongComment.zip");
using(ZipArchive za = ZipArchive.Open(zipPath))
using (ZipArchive za = ZipArchive.Open(zipPath))
{
var count = za.Entries.Count;
Assert.Equal(1, count);
}
}
[Fact]
public void Zip_Zip64_CompressedSizeExtraOnly_Read()
{
string zipPath = Path.Combine(TEST_ARCHIVES_PATH, "Zip.zip64.compressedonly.zip");
using (ZipArchive za = ZipArchive.Open(zipPath))
{
var firstEntry = za.Entries.First(x => x.Key == "test/test.txt");
using (var memoryStream = new MemoryStream())
using (var firstStream = firstEntry.OpenEntryStream())
{
firstStream.CopyTo(memoryStream);
Assert.Equal(15, memoryStream.Length);
}
}
}
}
}

View File

@@ -268,16 +268,16 @@ namespace SharpCompress.Test.Zip
}
VerifyFiles();
}
[Fact]
public void Zip_Deflate_ZipCrypto_Read()
{
int count = 0;
using (Stream stream = File.OpenRead(Path.Combine(TEST_ARCHIVES_PATH, "zipcrypto.zip")))
using (var reader = ZipReader.Open(stream, new ReaderOptions()
{
Password = "test"
}))
{
Password = "test"
}))
{
while (reader.MoveToNextEntry())
{