Compare commits

...

9 Commits

Author SHA1 Message Date
copilot-swe-agent[bot]
4a6e5232ae Add opt-in multi-threading support with SupportsMultiThreadedExtraction flag
- Added IArchive.SupportsMultiThreadedExtraction property to indicate if multi-threading is supported
- Added ReaderOptions.EnableMultiThreadedExtraction option to opt-in to multi-threading
- Updated SeekableZipFilePart, TarFilePart, and SeekableFilePart to check the flag
- Added test to verify multi-threading flag behavior
- Multi-threading is now disabled by default for backward compatibility

Co-authored-by: adamhathcock <527620+adamhathcock@users.noreply.github.com>
2026-01-18 16:27:59 +00:00
copilot-swe-agent[bot]
3e23a6e5a6 Add multi-threading support for file-based archives - sync test passing
Co-authored-by: adamhathcock <527620+adamhathcock@users.noreply.github.com>
2026-01-18 16:09:54 +00:00
copilot-swe-agent[bot]
e0a43e9727 Initial plan 2026-01-18 15:56:08 +00:00
Adam Hathcock
f5d83c0e33 Merge pull request #1135 from adamhathcock/copilot/consolidate-compile-flags 2026-01-15 18:47:37 +00:00
copilot-swe-agent[bot]
d2cb792d91 Change NET6_0_OR_GREATER to NET8_0_OR_GREATER
Co-authored-by: adamhathcock <527620+adamhathcock@users.noreply.github.com>
2026-01-15 18:31:06 +00:00
copilot-swe-agent[bot]
52fef492a5 Additional simplifications: Remove NETCF, fix NET60 typo, consolidate NETCOREAPP2_1 pattern
Co-authored-by: adamhathcock <527620+adamhathcock@users.noreply.github.com>
2026-01-15 18:09:06 +00:00
copilot-swe-agent[bot]
a5300f3383 Replace NETFRAMEWORK and NETSTANDARD2_0 with LEGACY_DOTNET compile flag
Co-authored-by: adamhathcock <527620+adamhathcock@users.noreply.github.com>
2026-01-15 18:05:14 +00:00
copilot-swe-agent[bot]
cab3e7d498 Initial analysis: Planning compile flags consolidation
Co-authored-by: adamhathcock <527620+adamhathcock@users.noreply.github.com>
2026-01-15 17:55:37 +00:00
copilot-swe-agent[bot]
405dbb30cd Initial plan 2026-01-15 17:50:54 +00:00
43 changed files with 698 additions and 70 deletions

View File

@@ -1,7 +1,7 @@
// Copyright (c) Six Labors.
// Licensed under the Apache License, Version 2.0.
#if !NETSTANDARD2_0 && !NETSTANDARD2_1 && !NETFRAMEWORK
#if !LEGACY_DOTNET
#define SUPPORTS_RUNTIME_INTRINSICS
#define SUPPORTS_HOTPATH
#endif

View File

@@ -145,6 +145,19 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IAsyncArchive
/// </summary>
public virtual bool IsEncrypted => false;
/// <summary>
/// Returns whether multi-threaded extraction is supported for this archive.
/// Multi-threading is supported when:
/// 1. The archive is opened from a FileInfo or file path (not a stream)
/// 2. Multi-threading is explicitly enabled in ReaderOptions
/// 3. The archive is not SOLID (SOLID archives should use sequential extraction)
/// </summary>
public virtual bool SupportsMultiThreadedExtraction =>
_sourceStream is not null
&& _sourceStream.IsFileMode
&& ReaderOptions.EnableMultiThreadedExtraction
&& !IsSolid;
/// <summary>
/// The archive can find all the parts of the archive needed to fully extract the archive. This forces the parsing of the entire archive.
/// </summary>

View File

@@ -44,4 +44,12 @@ public interface IArchive : IDisposable
/// Returns whether the archive is encrypted.
/// </summary>
bool IsEncrypted { get; }
/// <summary>
/// Returns whether multi-threaded extraction is supported for this archive.
/// Multi-threading is supported when the archive is opened from a FileInfo or file path
/// (not a stream) and the format supports random access (e.g., Zip, Tar, Rar).
/// SOLID archives (some Rar, all 7Zip) should use sequential extraction for best performance.
/// </summary>
bool SupportsMultiThreadedExtraction { get; }
}

View File

@@ -1,6 +1,7 @@
using System.IO;
using SharpCompress.Common.Rar;
using SharpCompress.Common.Rar.Headers;
using SharpCompress.IO;
namespace SharpCompress.Archives.Rar;
@@ -24,6 +25,76 @@ internal class SeekableFilePart : RarFilePart
internal override Stream GetCompressedStream()
{
Stream streamToUse;
// If the stream is a SourceStream in file mode with multi-threading enabled,
// create an independent stream to support concurrent extraction
if (
_stream is SourceStream sourceStream
&& sourceStream.IsFileMode
&& sourceStream.ReaderOptions.EnableMultiThreadedExtraction
)
{
var independentStream = sourceStream.CreateIndependentStream(0);
if (independentStream is not null)
{
streamToUse = independentStream;
streamToUse.Position = FileHeader.DataStartPosition;
if (FileHeader.R4Salt != null)
{
var cryptKey = new CryptKey3(_password!);
return new RarCryptoWrapper(streamToUse, FileHeader.R4Salt, cryptKey);
}
if (FileHeader.Rar5CryptoInfo != null)
{
var cryptKey = new CryptKey5(_password!, FileHeader.Rar5CryptoInfo);
return new RarCryptoWrapper(
streamToUse,
FileHeader.Rar5CryptoInfo.Salt,
cryptKey
);
}
return streamToUse;
}
}
// Check if the stream wraps a FileStream
Stream? underlyingStream = _stream;
if (_stream is IStreamStack streamStack)
{
underlyingStream = streamStack.BaseStream();
}
if (underlyingStream is FileStream fileStream)
{
// Create a new independent stream from the file
streamToUse = new FileStream(
fileStream.Name,
FileMode.Open,
FileAccess.Read,
FileShare.Read
);
streamToUse.Position = FileHeader.DataStartPosition;
if (FileHeader.R4Salt != null)
{
var cryptKey = new CryptKey3(_password!);
return new RarCryptoWrapper(streamToUse, FileHeader.R4Salt, cryptKey);
}
if (FileHeader.Rar5CryptoInfo != null)
{
var cryptKey = new CryptKey5(_password!, FileHeader.Rar5CryptoInfo);
return new RarCryptoWrapper(streamToUse, FileHeader.Rar5CryptoInfo.Salt, cryptKey);
}
return streamToUse;
}
// Fall back to existing behavior for stream-based sources
_stream.Position = FileHeader.DataStartPosition;
if (FileHeader.R4Salt != null)

View File

@@ -184,7 +184,7 @@ public partial class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, Sev
return Task.CompletedTask;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default

View File

@@ -82,7 +82,7 @@ namespace SharpCompress.Common
}
}
#if NET6_0_OR_GREATER
#if NET8_0_OR_GREATER
public async ValueTask DisposeAsync()
{
if (_disposed)

View File

@@ -93,7 +93,7 @@ public class EntryStream : Stream, IStreamStack
_stream.Dispose();
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask DisposeAsync()
{
if (_isDisposed)
@@ -171,7 +171,7 @@ public class EntryStream : Stream, IStreamStack
return read;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default

View File

@@ -1,5 +1,6 @@
using System.IO;
using SharpCompress.Common.Tar.Headers;
using SharpCompress.IO;
namespace SharpCompress.Common.Tar;
@@ -20,8 +21,45 @@ internal sealed class TarFilePart : FilePart
internal override Stream GetCompressedStream()
{
if (_seekableStream != null)
if (_seekableStream is not null)
{
// If the seekable stream is a SourceStream in file mode with multi-threading enabled,
// create an independent stream to support concurrent extraction
if (
_seekableStream is SourceStream sourceStream
&& sourceStream.IsFileMode
&& sourceStream.ReaderOptions.EnableMultiThreadedExtraction
)
{
var independentStream = sourceStream.CreateIndependentStream(0);
if (independentStream is not null)
{
independentStream.Position = Header.DataStartPosition ?? 0;
return new TarReadOnlySubStream(independentStream, Header.Size);
}
}
// Check if the seekable stream wraps a FileStream
Stream? underlyingStream = _seekableStream;
if (_seekableStream is IStreamStack streamStack)
{
underlyingStream = streamStack.BaseStream();
}
if (underlyingStream is FileStream fileStream)
{
// Create a new independent stream from the file
var independentStream = new FileStream(
fileStream.Name,
FileMode.Open,
FileAccess.Read,
FileShare.Read
);
independentStream.Position = Header.DataStartPosition ?? 0;
return new TarReadOnlySubStream(independentStream, Header.Size);
}
// Fall back to existing behavior for stream-based sources
_seekableStream.Position = Header.DataStartPosition ?? 0;
return new TarReadOnlySubStream(_seekableStream, Header.Size);
}

View File

@@ -66,7 +66,7 @@ internal class TarReadOnlySubStream : SharpCompressStream, IStreamStack
base.Dispose(disposing);
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async System.Threading.Tasks.ValueTask DisposeAsync()
{
if (_isDisposed)
@@ -170,7 +170,7 @@ internal class TarReadOnlySubStream : SharpCompressStream, IStreamStack
return read;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async System.Threading.Tasks.ValueTask<int> ReadAsync(
System.Memory<byte> buffer,
System.Threading.CancellationToken cancellationToken = default

View File

@@ -2,13 +2,16 @@ using System.IO;
using System.Threading;
using System.Threading.Tasks;
using SharpCompress.Common.Zip.Headers;
using SharpCompress.IO;
namespace SharpCompress.Common.Zip;
internal class SeekableZipFilePart : ZipFilePart
{
private bool _isLocalHeaderLoaded;
private volatile bool _isLocalHeaderLoaded;
private readonly SeekableZipHeaderFactory _headerFactory;
private readonly object _headerLock = new();
private readonly SemaphoreSlim _asyncHeaderSemaphore = new(1, 1);
internal SeekableZipFilePart(
SeekableZipHeaderFactory headerFactory,
@@ -21,8 +24,14 @@ internal class SeekableZipFilePart : ZipFilePart
{
if (!_isLocalHeaderLoaded)
{
LoadLocalHeader();
_isLocalHeaderLoaded = true;
lock (_headerLock)
{
if (!_isLocalHeaderLoaded)
{
LoadLocalHeader();
_isLocalHeaderLoaded = true;
}
}
}
return base.GetCompressedStream();
}
@@ -33,22 +42,173 @@ internal class SeekableZipFilePart : ZipFilePart
{
if (!_isLocalHeaderLoaded)
{
await LoadLocalHeaderAsync(cancellationToken);
_isLocalHeaderLoaded = true;
await _asyncHeaderSemaphore.WaitAsync(cancellationToken);
try
{
if (!_isLocalHeaderLoaded)
{
await LoadLocalHeaderAsync(cancellationToken);
_isLocalHeaderLoaded = true;
}
}
finally
{
_asyncHeaderSemaphore.Release();
}
}
return await base.GetCompressedStreamAsync(cancellationToken);
}
private void LoadLocalHeader() =>
Header = _headerFactory.GetLocalHeader(BaseStream, (DirectoryEntryHeader)Header);
private void LoadLocalHeader()
{
// Use an independent stream for loading the header if multi-threading is enabled
Stream streamToUse = BaseStream;
bool disposeStream = false;
private async ValueTask LoadLocalHeaderAsync(CancellationToken cancellationToken = default) =>
Header = await _headerFactory.GetLocalHeaderAsync(BaseStream, (DirectoryEntryHeader)Header);
if (
BaseStream is SourceStream sourceStream
&& sourceStream.IsFileMode
&& sourceStream.ReaderOptions.EnableMultiThreadedExtraction
)
{
var independentStream = sourceStream.CreateIndependentStream(0);
if (independentStream is not null)
{
streamToUse = independentStream;
disposeStream = true;
}
}
else
{
// Check if BaseStream wraps a FileStream
Stream? underlyingStream = BaseStream;
if (BaseStream is IStreamStack streamStack)
{
underlyingStream = streamStack.BaseStream();
}
if (underlyingStream is FileStream fileStream)
{
streamToUse = new FileStream(
fileStream.Name,
FileMode.Open,
FileAccess.Read,
FileShare.Read
);
disposeStream = true;
}
}
try
{
Header = _headerFactory.GetLocalHeader(streamToUse, (DirectoryEntryHeader)Header);
}
finally
{
if (disposeStream)
{
streamToUse.Dispose();
}
}
}
private async ValueTask LoadLocalHeaderAsync(CancellationToken cancellationToken = default)
{
// Use an independent stream for loading the header if multi-threading is enabled
Stream streamToUse = BaseStream;
bool disposeStream = false;
if (
BaseStream is SourceStream sourceStream
&& sourceStream.IsFileMode
&& sourceStream.ReaderOptions.EnableMultiThreadedExtraction
)
{
var independentStream = sourceStream.CreateIndependentStream(0);
if (independentStream is not null)
{
streamToUse = independentStream;
disposeStream = true;
}
}
else
{
// Check if BaseStream wraps a FileStream
Stream? underlyingStream = BaseStream;
if (BaseStream is IStreamStack streamStack)
{
underlyingStream = streamStack.BaseStream();
}
if (underlyingStream is FileStream fileStream)
{
streamToUse = new FileStream(
fileStream.Name,
FileMode.Open,
FileAccess.Read,
FileShare.Read
);
disposeStream = true;
}
}
try
{
Header = await _headerFactory.GetLocalHeaderAsync(
streamToUse,
(DirectoryEntryHeader)Header
);
}
finally
{
if (disposeStream)
{
streamToUse.Dispose();
}
}
}
protected override Stream CreateBaseStream()
{
BaseStream.Position = Header.DataStartPosition.NotNull();
// If BaseStream is a SourceStream in file mode with multi-threading enabled,
// create an independent stream to support concurrent extraction
if (
BaseStream is SourceStream sourceStream
&& sourceStream.IsFileMode
&& sourceStream.ReaderOptions.EnableMultiThreadedExtraction
)
{
// Create a new independent stream for this entry
var independentStream = sourceStream.CreateIndependentStream(0);
if (independentStream is not null)
{
independentStream.Position = Header.DataStartPosition.NotNull();
return independentStream;
}
}
// Check if BaseStream wraps a FileStream (for multi-volume archives)
Stream? underlyingStream = BaseStream;
if (BaseStream is IStreamStack streamStack)
{
underlyingStream = streamStack.BaseStream();
}
if (underlyingStream is FileStream fileStream)
{
// Create a new independent stream from the file
var independentStream = new FileStream(
fileStream.Name,
FileMode.Open,
FileAccess.Read,
FileShare.Read
);
independentStream.Position = Header.DataStartPosition.NotNull();
return independentStream;
}
// Fall back to existing behavior for stream-based sources
BaseStream.Position = Header.DataStartPosition.NotNull();
return BaseStream;
}
}

View File

@@ -20,7 +20,7 @@ internal class WinzipAesEncryptionData
{
_keySize = keySize;
#if NETFRAMEWORK || NETSTANDARD2_0
#if LEGACY_DOTNET
var rfc2898 = new Rfc2898DeriveBytes(password, salt, RFC2898_ITERATIONS);
KeyBytes = rfc2898.GetBytes(KeySizeInBytes);
IvBytes = rfc2898.GetBytes(KeySizeInBytes);

View File

@@ -98,7 +98,7 @@ public sealed class BZip2Stream : Stream, IStreamStack
public override void SetLength(long value) => stream.SetLength(value);
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override int Read(Span<byte> buffer) => stream.Read(buffer);

View File

@@ -299,7 +299,7 @@ public class DeflateStream : Stream, IStreamStack
await _baseStream.FlushAsync(cancellationToken).ConfigureAwait(false);
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask DisposeAsync()
{
if (_disposed)
@@ -370,7 +370,7 @@ public class DeflateStream : Stream, IStreamStack
.ConfigureAwait(false);
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default
@@ -461,7 +461,7 @@ public class DeflateStream : Stream, IStreamStack
.ConfigureAwait(false);
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask WriteAsync(
ReadOnlyMemory<byte> buffer,
CancellationToken cancellationToken = default

View File

@@ -345,7 +345,7 @@ public class GZipStream : Stream, IStreamStack
return n;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default
@@ -454,7 +454,7 @@ public class GZipStream : Stream, IStreamStack
await BaseStream.WriteAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false);
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask WriteAsync(
ReadOnlyMemory<byte> buffer,
CancellationToken cancellationToken = default

View File

@@ -552,7 +552,7 @@ internal class ZlibBaseStream : Stream, IStreamStack
}
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask DisposeAsync()
{
if (isDisposed)
@@ -1171,7 +1171,7 @@ internal class ZlibBaseStream : Stream, IStreamStack
return rc;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default

View File

@@ -108,13 +108,9 @@ internal static class ZlibConstants
public const int Z_BUF_ERROR = -5;
/// <summary>
/// The size of the working buffer used in the ZlibCodec class. Defaults to 8192 bytes.
/// The size of the working buffer used in the ZlibCodec class. Defaults to 16384 bytes.
/// </summary>
#if NETCF
public const int WorkingBufferSizeDefault = 8192;
#else
public const int WorkingBufferSizeDefault = 16384;
#endif
/// <summary>
/// The minimum size of the working buffer used in the ZlibCodec class. Currently it is 128 bytes.

View File

@@ -277,7 +277,7 @@ public class ZlibStream : Stream, IStreamStack
await _baseStream.FlushAsync(cancellationToken).ConfigureAwait(false);
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask DisposeAsync()
{
if (_disposed)
@@ -347,7 +347,7 @@ public class ZlibStream : Stream, IStreamStack
.ConfigureAwait(false);
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default
@@ -431,7 +431,7 @@ public class ZlibStream : Stream, IStreamStack
.ConfigureAwait(false);
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask WriteAsync(
ReadOnlyMemory<byte> buffer,
CancellationToken cancellationToken = default

View File

@@ -200,7 +200,7 @@ public sealed class Deflate64Stream : Stream, IStreamStack
return count - remainingCount;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default

View File

@@ -163,7 +163,7 @@ public sealed class LZipStream : Stream, IStreamStack
public override void SetLength(long value) => throw new NotImplementedException();
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override ValueTask<int> ReadAsync(
Memory<byte> buffer,

View File

@@ -201,7 +201,7 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
}
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
[MemberNotNull(nameof(_outWindow))]
#endif
private void CreateDictionary()

View File

@@ -632,7 +632,7 @@ public class LzmaStream : Stream, IStreamStack
return total;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default

View File

@@ -185,7 +185,7 @@ internal sealed class MultiVolumeReadOnlyStream : Stream, IStreamStack
return totalRead;
}
#if NETCOREAPP2_1_OR_GREATER || NETSTANDARD2_1_OR_GREATER
#if !LEGACY_DOTNET
public override async System.Threading.Tasks.ValueTask<int> ReadAsync(
Memory<byte> buffer,
System.Threading.CancellationToken cancellationToken = default

View File

@@ -385,7 +385,7 @@ internal class RarBLAKE2spStream : RarStream, IStreamStack
return result;
}
#if NETCOREAPP2_1_OR_GREATER || NETSTANDARD2_1_OR_GREATER
#if !LEGACY_DOTNET
public override async System.Threading.Tasks.ValueTask<int> ReadAsync(
Memory<byte> buffer,
System.Threading.CancellationToken cancellationToken = default

View File

@@ -129,7 +129,7 @@ internal class RarCrcStream : RarStream, IStreamStack
return result;
}
#if NETCOREAPP2_1_OR_GREATER || NETSTANDARD2_1_OR_GREATER
#if !LEGACY_DOTNET
public override async System.Threading.Tasks.ValueTask<int> ReadAsync(
Memory<byte> buffer,
System.Threading.CancellationToken cancellationToken = default

View File

@@ -189,7 +189,7 @@ internal class RarStream : Stream, IStreamStack
return outTotal;
}
#if NETCOREAPP2_1_OR_GREATER || NETSTANDARD2_1_OR_GREATER
#if !LEGACY_DOTNET
public override async System.Threading.Tasks.ValueTask<int> ReadAsync(
Memory<byte> buffer,
System.Threading.CancellationToken cancellationToken = default

View File

@@ -74,7 +74,7 @@ public class CompressionStream : Stream
~CompressionStream() => Dispose(false);
#if !NETSTANDARD2_0 && !NETFRAMEWORK
#if !LEGACY_DOTNET
public override async ValueTask DisposeAsync()
#else
public async ValueTask DisposeAsync()
@@ -145,7 +145,7 @@ public class CompressionStream : Stream
public override void Write(byte[] buffer, int offset, int count) =>
Write(new ReadOnlySpan<byte>(buffer, offset, count));
#if !NETSTANDARD2_0 && !NETFRAMEWORK
#if !LEGACY_DOTNET
public override void Write(ReadOnlySpan<byte> buffer) =>
WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue);
#else
@@ -176,7 +176,7 @@ public class CompressionStream : Stream
);
}
#if !NETSTANDARD2_0 && !NETFRAMEWORK
#if !LEGACY_DOTNET
private async ValueTask WriteInternalAsync(
ReadOnlyMemory<byte>? buffer,
ZSTD_EndDirective directive,
@@ -218,7 +218,7 @@ public class CompressionStream : Stream
);
}
#if !NETSTANDARD2_0 && !NETFRAMEWORK
#if !LEGACY_DOTNET
public override Task WriteAsync(
byte[] buffer,

View File

@@ -105,7 +105,7 @@ public class DecompressionStream : Stream
public override int Read(byte[] buffer, int offset, int count) =>
Read(new Span<byte>(buffer, offset, count));
#if !NETSTANDARD2_0 && !NETFRAMEWORK
#if !LEGACY_DOTNET
public override int Read(Span<byte> buffer)
#else
public int Read(Span<byte> buffer)
@@ -158,7 +158,7 @@ public class DecompressionStream : Stream
}
}
#if !NETSTANDARD2_0 && !NETFRAMEWORK
#if !LEGACY_DOTNET
public override Task<int> ReadAsync(
byte[] buffer,
int offset,
@@ -276,7 +276,7 @@ public class DecompressionStream : Stream
throw new ObjectDisposedException(nameof(DecompressionStream));
}
#if NETSTANDARD2_0 || NETFRAMEWORK
#if LEGACY_DOTNET
public virtual Task DisposeAsync()
{
try

View File

@@ -16,7 +16,7 @@ public static unsafe class UnsafeHelper
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static void* malloc(ulong size)
{
#if NET6_0_OR_GREATER
#if NET8_0_OR_GREATER
var ptr = NativeMemory.Alloc((nuint)size);
#else
var ptr = (void*)Marshal.AllocHGlobal((nint)size);
@@ -31,7 +31,7 @@ public static unsafe class UnsafeHelper
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static void* calloc(ulong num, ulong size)
{
#if NET6_0_OR_GREATER
#if NET8_0_OR_GREATER
return NativeMemory.AllocZeroed((nuint)num, (nuint)size);
#else
var total = num * size;
@@ -53,7 +53,7 @@ public static unsafe class UnsafeHelper
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static void free(void* ptr)
{
#if NET6_0_OR_GREATER
#if NET8_0_OR_GREATER
NativeMemory.Free(ptr);
#else
Marshal.FreeHGlobal((IntPtr)ptr);

View File

@@ -70,7 +70,7 @@ public sealed class Crc32Stream : Stream, IStreamStack
public override void SetLength(long value) => throw new NotSupportedException();
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override void Write(ReadOnlySpan<byte> buffer)
{

View File

@@ -154,7 +154,7 @@ internal class BufferedSubStream : SharpCompressStream, IStreamStack
return count;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default

View File

@@ -64,7 +64,7 @@ internal sealed class ProgressReportingStream : Stream
return bytesRead;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override int Read(Span<byte> buffer)
{
var bytesRead = _baseStream.Read(buffer);
@@ -95,7 +95,7 @@ internal sealed class ProgressReportingStream : Stream
return bytesRead;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default
@@ -147,7 +147,7 @@ internal sealed class ProgressReportingStream : Stream
base.Dispose(disposing);
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask DisposeAsync()
{
if (!_leaveOpen)

View File

@@ -81,7 +81,7 @@ internal class ReadOnlySubStream : SharpCompressStream, IStreamStack
return value;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override int Read(Span<byte> buffer)
{
var sliceLen = BytesLeftToRead < buffer.Length ? BytesLeftToRead : buffer.Length;
@@ -117,7 +117,7 @@ internal class ReadOnlySubStream : SharpCompressStream, IStreamStack
return read;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default

View File

@@ -384,7 +384,7 @@ public class SharpCompressStream : Stream, IStreamStack
await Stream.FlushAsync(cancellationToken).ConfigureAwait(false);
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,

View File

@@ -98,6 +98,30 @@ public class SourceStream : Stream, IStreamStack
private Stream Current => _streams[_stream];
/// <summary>
/// Creates an independent stream for the specified volume index.
/// This allows multiple threads to read from different positions concurrently.
/// Only works when IsFileMode is true.
/// </summary>
/// <param name="volumeIndex">The volume index to create a stream for</param>
/// <returns>A new independent FileStream, or null if not in file mode or volume doesn't exist</returns>
public Stream? CreateIndependentStream(int volumeIndex)
{
if (!IsFileMode)
{
return null;
}
// Ensure the volume is loaded
if (!LoadStream(volumeIndex))
{
return null;
}
// Create a new independent stream from the FileInfo
return _files[volumeIndex].OpenRead();
}
public bool LoadStream(int index) //ensure all parts to id are loaded
{
while (_streams.Count <= index)
@@ -289,7 +313,7 @@ public class SourceStream : Stream, IStreamStack
return total - count;
}
#if !NETFRAMEWORK && !NETSTANDARD2_0
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,

View File

@@ -21,7 +21,7 @@ internal static class NotNullExtensions
return source.AsEnumerable();
}
#if NETFRAMEWORK || NETSTANDARD
#if LEGACY_DOTNET
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static T NotNull<T>(this T? obj, string? message = null)
where T : class

View File

@@ -1,4 +1,4 @@
#if NETFRAMEWORK || NETSTANDARD2_0
#if LEGACY_DOTNET
namespace SharpCompress;

View File

@@ -277,7 +277,7 @@ public abstract class AbstractReader<TEntry, TVolume> : IReader, IAsyncReader
}
}
//don't know the size so we have to try to decompress to skip
#if NETFRAMEWORK || NETSTANDARD2_0
#if LEGACY_DOTNET
using var s = await OpenEntryStreamAsync(cancellationToken).ConfigureAwait(false);
await s.SkipEntryAsync(cancellationToken).ConfigureAwait(false);
#else
@@ -344,7 +344,7 @@ public abstract class AbstractReader<TEntry, TVolume> : IReader, IAsyncReader
internal async ValueTask WriteAsync(Stream writeStream, CancellationToken cancellationToken)
{
#if NETFRAMEWORK || NETSTANDARD2_0
#if LEGACY_DOTNET
using Stream s = await OpenEntryStreamAsync(cancellationToken).ConfigureAwait(false);
var sourceStream = WrapWithProgress(s, Entry);
await sourceStream.CopyToAsync(writeStream, 81920, cancellationToken).ConfigureAwait(false);

View File

@@ -28,4 +28,12 @@ public class ReaderOptions : OptionsBase
/// When set, progress updates will be reported as entries are extracted.
/// </summary>
public IProgress<ProgressReport>? Progress { get; set; }
/// <summary>
/// Enable multi-threaded extraction support when the archive is opened from a FileInfo or file path.
/// When enabled, multiple threads can extract different entries concurrently by creating
/// independent file streams. This is only effective for archives opened from files, not streams.
/// Default is false for backward compatibility.
/// </summary>
public bool EnableMultiThreadedExtraction { get; set; }
}

View File

@@ -28,6 +28,9 @@
<EmbedUntrackedSources>true</EmbedUntrackedSources>
<AllowedOutputExtensionsInPackageBuildOutputFolder>$(AllowedOutputExtensionsInPackageBuildOutputFolder);.pdb</AllowedOutputExtensionsInPackageBuildOutputFolder>
</PropertyGroup>
<PropertyGroup Condition=" '$(TargetFramework)' == 'net48' Or '$(TargetFramework)' == 'netstandard20' ">
<DefineConstants>$(DefineConstants);LEGACY_DOTNET</DefineConstants>
</PropertyGroup>
<PropertyGroup Condition=" '$(TargetFramework)' == 'net8.0' Or '$(TargetFramework)' == 'net10.0' ">
<IsTrimmable>true</IsTrimmable>
</PropertyGroup>

View File

@@ -189,7 +189,7 @@ internal static class Utility
}
}
#if NET60_OR_GREATER
#if NET8_0_OR_GREATER
public bool ReadFully(byte[] buffer)
{
try

View File

@@ -216,9 +216,9 @@
"net10.0": {
"Microsoft.NET.ILLink.Tasks": {
"type": "Direct",
"requested": "[10.0.0, )",
"resolved": "10.0.0",
"contentHash": "kICGrGYEzCNI3wPzfEXcwNHgTvlvVn9yJDhSdRK+oZQy4jvYH529u7O0xf5ocQKzOMjfS07+3z9PKRIjrFMJDA=="
"requested": "[10.0.1, )",
"resolved": "10.0.1",
"contentHash": "ISahzLHsHY7vrwqr2p1YWZ+gsxoBRtH7gWRDK8fDUst9pp2He0GiesaqEfeX0V8QMCJM3eNEHGGpnIcPjFo2NQ=="
},
"Microsoft.NETFramework.ReferenceAssemblies": {
"type": "Direct",

View File

@@ -0,0 +1,115 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using SharpCompress.Archives.Tar;
using SharpCompress.Common;
using Xunit;
namespace SharpCompress.Test.Tar;
public class TarMultiThreadTests : TestBase
{
[Fact]
public void Tar_Archive_Concurrent_Extraction_From_FileInfo()
{
// Test concurrent extraction of multiple entries from a Tar archive opened from FileInfo
var testArchive = Path.Combine(TEST_ARCHIVES_PATH, "Tar.tar");
var fileInfo = new FileInfo(testArchive);
var options = new SharpCompress.Readers.ReaderOptions
{
EnableMultiThreadedExtraction = true,
};
using var archive = TarArchive.OpenArchive(fileInfo, options);
// Verify multi-threading is supported
Assert.True(archive.SupportsMultiThreadedExtraction);
var entries = archive.Entries.Where(e => !e.IsDirectory).Take(5).ToList();
// Extract multiple entries concurrently
var tasks = new List<Task>();
var outputFiles = new List<string>();
foreach (var entry in entries)
{
var outputFile = Path.Combine(SCRATCH_FILES_PATH, entry.Key!);
outputFiles.Add(outputFile);
tasks.Add(
Task.Run(() =>
{
var dir = Path.GetDirectoryName(outputFile);
if (dir != null)
{
Directory.CreateDirectory(dir);
}
using var entryStream = entry.OpenEntryStream();
using var fileStream = File.Create(outputFile);
entryStream.CopyTo(fileStream);
})
);
}
Task.WaitAll(tasks.ToArray());
// Verify all files were extracted
Assert.Equal(entries.Count, outputFiles.Count);
foreach (var outputFile in outputFiles)
{
Assert.True(File.Exists(outputFile), $"File {outputFile} should exist");
}
}
[Fact]
public async Task Tar_Archive_Concurrent_Extraction_From_FileInfo_Async()
{
// Test concurrent async extraction of multiple entries from a Tar archive opened from FileInfo
var testArchive = Path.Combine(TEST_ARCHIVES_PATH, "Tar.tar");
var fileInfo = new FileInfo(testArchive);
var options = new SharpCompress.Readers.ReaderOptions
{
EnableMultiThreadedExtraction = true,
};
using var archive = TarArchive.OpenArchive(fileInfo, options);
var entries = archive.Entries.Where(e => !e.IsDirectory).Take(5).ToList();
// Extract multiple entries concurrently
var tasks = new List<Task>();
var outputFiles = new List<string>();
foreach (var entry in entries)
{
var outputFile = Path.Combine(SCRATCH_FILES_PATH, entry.Key!);
outputFiles.Add(outputFile);
tasks.Add(
Task.Run(async () =>
{
var dir = Path.GetDirectoryName(outputFile);
if (dir != null)
{
Directory.CreateDirectory(dir);
}
using var entryStream = await entry.OpenEntryStreamAsync();
using var fileStream = File.Create(outputFile);
await entryStream.CopyToAsync(fileStream);
})
);
}
await Task.WhenAll(tasks);
// Verify all files were extracted
Assert.Equal(entries.Count, outputFiles.Count);
foreach (var outputFile in outputFiles)
{
Assert.True(File.Exists(outputFile), $"File {outputFile} should exist");
}
}
}

View File

@@ -0,0 +1,192 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using SharpCompress.Archives.Zip;
using SharpCompress.Common;
using Xunit;
namespace SharpCompress.Test.Zip;
public class ZipMultiThreadTests : TestBase
{
[Fact]
public void Zip_Archive_Without_MultiThreading_Enabled()
{
// Test that extraction still works when multi-threading is NOT enabled
var testArchive = Path.Combine(TEST_ARCHIVES_PATH, "Zip.none.zip");
var fileInfo = new FileInfo(testArchive);
// Default options - multi-threading disabled
using var archive = ZipArchive.OpenArchive(fileInfo);
// Verify multi-threading is NOT supported
Assert.False(archive.SupportsMultiThreadedExtraction);
var entry = archive.Entries.First(e => !e.IsDirectory);
var outputFile = Path.Combine(SCRATCH_FILES_PATH, entry.Key!);
var dir = Path.GetDirectoryName(outputFile);
if (dir != null)
{
Directory.CreateDirectory(dir);
}
using var entryStream = entry.OpenEntryStream();
using var fileStream = File.Create(outputFile);
entryStream.CopyTo(fileStream);
Assert.True(File.Exists(outputFile));
}
[Fact]
public void Zip_Archive_Concurrent_Extraction_From_FileInfo()
{
// Test concurrent extraction of multiple entries from a Zip archive opened from FileInfo
var testArchive = Path.Combine(TEST_ARCHIVES_PATH, "Zip.none.zip");
var fileInfo = new FileInfo(testArchive);
var options = new SharpCompress.Readers.ReaderOptions
{
EnableMultiThreadedExtraction = true,
};
using var archive = ZipArchive.OpenArchive(fileInfo, options);
// Verify multi-threading is supported
Assert.True(archive.SupportsMultiThreadedExtraction);
var entries = archive.Entries.Where(e => !e.IsDirectory).Take(5).ToList();
// Extract multiple entries concurrently
var tasks = new List<Task>();
var outputFiles = new List<string>();
foreach (var entry in entries)
{
var outputFile = Path.Combine(SCRATCH_FILES_PATH, entry.Key!);
outputFiles.Add(outputFile);
tasks.Add(
Task.Run(() =>
{
var dir = Path.GetDirectoryName(outputFile);
if (dir != null)
{
Directory.CreateDirectory(dir);
}
using var entryStream = entry.OpenEntryStream();
using var fileStream = File.Create(outputFile);
entryStream.CopyTo(fileStream);
})
);
}
Task.WaitAll(tasks.ToArray());
// Verify all files were extracted
Assert.Equal(entries.Count, outputFiles.Count);
foreach (var outputFile in outputFiles)
{
Assert.True(File.Exists(outputFile), $"File {outputFile} should exist");
}
}
[Fact]
public async Task Zip_Archive_Concurrent_Extraction_From_FileInfo_Async()
{
// Test concurrent async extraction of multiple entries from a Zip archive opened from FileInfo
var testArchive = Path.Combine(TEST_ARCHIVES_PATH, "Zip.none.zip");
var fileInfo = new FileInfo(testArchive);
var options = new SharpCompress.Readers.ReaderOptions
{
EnableMultiThreadedExtraction = true,
};
using var archive = ZipArchive.OpenArchive(fileInfo, options);
var entries = archive.Entries.Where(e => !e.IsDirectory).Take(5).ToList();
// Extract multiple entries concurrently
var tasks = new List<Task>();
var outputFiles = new List<string>();
foreach (var entry in entries)
{
var outputFile = Path.Combine(SCRATCH_FILES_PATH, entry.Key!);
outputFiles.Add(outputFile);
tasks.Add(
Task.Run(async () =>
{
var dir = Path.GetDirectoryName(outputFile);
if (dir != null)
{
Directory.CreateDirectory(dir);
}
using var entryStream = await entry.OpenEntryStreamAsync();
using var fileStream = File.Create(outputFile);
await entryStream.CopyToAsync(fileStream);
})
);
}
await Task.WhenAll(tasks);
// Verify all files were extracted
Assert.Equal(entries.Count, outputFiles.Count);
foreach (var outputFile in outputFiles)
{
Assert.True(File.Exists(outputFile), $"File {outputFile} should exist");
}
}
[Fact]
public void Zip_Archive_Concurrent_Extraction_From_Path()
{
// Test concurrent extraction when opening from path (should use FileInfo internally)
var testArchive = Path.Combine(TEST_ARCHIVES_PATH, "Zip.none.zip");
var options = new SharpCompress.Readers.ReaderOptions
{
EnableMultiThreadedExtraction = true,
};
using var archive = ZipArchive.OpenArchive(testArchive, options);
var entries = archive.Entries.Where(e => !e.IsDirectory).Take(5).ToList();
// Extract multiple entries concurrently
var tasks = new List<Task>();
var outputFiles = new List<string>();
foreach (var entry in entries)
{
var outputFile = Path.Combine(SCRATCH_FILES_PATH, entry.Key!);
outputFiles.Add(outputFile);
tasks.Add(
Task.Run(() =>
{
var dir = Path.GetDirectoryName(outputFile);
if (dir != null)
{
Directory.CreateDirectory(dir);
}
using var entryStream = entry.OpenEntryStream();
using var fileStream = File.Create(outputFile);
entryStream.CopyTo(fileStream);
})
);
}
Task.WaitAll(tasks.ToArray());
// Verify all files were extracted
Assert.Equal(entries.Count, outputFiles.Count);
foreach (var outputFile in outputFiles)
{
Assert.True(File.Exists(outputFile), $"File {outputFile} should exist");
}
}
}