Merge remote-tracking branch 'origin/master' into adam/async-creation

# Conflicts:
#	src/SharpCompress/Common/EntryStream.cs
#	src/SharpCompress/IO/BufferedSubStream.cs
#	src/SharpCompress/packages.lock.json
This commit is contained in:
Adam Hathcock
2026-01-26 14:16:14 +00:00
10 changed files with 393 additions and 32 deletions

View File

@@ -70,25 +70,11 @@ public partial class EntryStream : Stream, IStreamStack
{
if (ss.BaseStream() is SharpCompress.Compressors.Deflate.DeflateStream deflateStream)
{
try
{
deflateStream.Flush(); //Deflate over reads. Knock it back
}
catch (NotSupportedException)
{
// Ignore: underlying stream does not support required operations for Flush
}
deflateStream.Flush(); //Deflate over reads. Knock it back
}
else if (ss.BaseStream() is SharpCompress.Compressors.LZMA.LzmaStream lzmaStream)
{
try
{
lzmaStream.Flush(); //Lzma over reads. Knock it back
}
catch (NotSupportedException)
{
// Ignore: underlying stream does not support required operations for Flush
}
lzmaStream.Flush(); //Lzma over reads. Knock it back
}
}
#if DEBUG_STREAMS
@@ -98,6 +84,39 @@ public partial class EntryStream : Stream, IStreamStack
_stream.Dispose();
}
#if !LEGACY_DOTNET
public override async ValueTask DisposeAsync()
{
if (_isDisposed)
{
return;
}
_isDisposed = true;
if (!(_completed || _reader.Cancelled))
{
await SkipEntryAsync().ConfigureAwait(false);
}
//Need a safe standard approach to this - it's okay for compression to overreads. Handling needs to be standardised
if (_stream is IStreamStack ss)
{
if (ss.BaseStream() is SharpCompress.Compressors.Deflate.DeflateStream deflateStream)
{
await deflateStream.FlushAsync().ConfigureAwait(false);
}
else if (ss.BaseStream() is SharpCompress.Compressors.LZMA.LzmaStream lzmaStream)
{
await lzmaStream.FlushAsync().ConfigureAwait(false);
}
}
#if DEBUG_STREAMS
this.DebugDispose(typeof(EntryStream));
#endif
await base.DisposeAsync().ConfigureAwait(false);
await _stream.DisposeAsync().ConfigureAwait(false);
}
#endif
public override bool CanRead => true;
public override bool CanSeek => false;

View File

@@ -604,7 +604,13 @@ internal class ZlibBaseStream : Stream, IStreamStack
public override void Flush()
{
_stream.Flush();
// Only flush the underlying stream when in write mode
// Flushing input streams during read operations is not meaningful
// and can cause issues with forward-only/non-seekable streams
if (_streamMode == StreamMode.Writer)
{
_stream.Flush();
}
//rewind the buffer
((IStreamStack)this).Rewind(z.AvailableBytesIn); //unused
z.AvailableBytesIn = 0;
@@ -612,7 +618,13 @@ internal class ZlibBaseStream : Stream, IStreamStack
public override async Task FlushAsync(CancellationToken cancellationToken)
{
await _stream.FlushAsync(cancellationToken).ConfigureAwait(false);
// Only flush the underlying stream when in write mode
// Flushing input streams during read operations is not meaningful
// and can cause issues with forward-only/non-seekable streams
if (_streamMode == StreamMode.Writer)
{
await _stream.FlushAsync(cancellationToken).ConfigureAwait(false);
}
//rewind the buffer
((IStreamStack)this).Rewind(z.AvailableBytesIn); //unused
z.AvailableBytesIn = 0;

View File

@@ -29,17 +29,25 @@ internal partial class BufferedSubStream : SharpCompressStream, IStreamStack
#if DEBUG_STREAMS
this.DebugDispose(typeof(BufferedSubStream));
#endif
if (disposing)
if (_isDisposed)
{
return;
}
_isDisposed = true;
if (disposing && _cache is not null)
{
ArrayPool<byte>.Shared.Return(_cache);
_cache = null;
}
base.Dispose(disposing);
}
private int _cacheOffset;
private int _cacheLength;
private readonly byte[] _cache = ArrayPool<byte>.Shared.Rent(32 << 10);
private byte[]? _cache = ArrayPool<byte>.Shared.Rent(81920);
private long origin;
private bool _isDisposed;
private long BytesLeftToRead { get; set; }
@@ -61,19 +69,58 @@ internal partial class BufferedSubStream : SharpCompressStream, IStreamStack
private void RefillCache()
{
var count = (int)Math.Min(BytesLeftToRead, _cache.Length);
if (_isDisposed)
{
throw new ObjectDisposedException(nameof(BufferedSubStream));
}
var count = (int)Math.Min(BytesLeftToRead, _cache!.Length);
_cacheOffset = 0;
if (count == 0)
{
_cacheLength = 0;
return;
}
Stream.Position = origin;
// Only seek if we're not already at the correct position
// This avoids expensive seek operations when reading sequentially
if (Stream.CanSeek && Stream.Position != origin)
{
Stream.Position = origin;
}
_cacheLength = Stream.Read(_cache, 0, count);
origin += _cacheLength;
BytesLeftToRead -= _cacheLength;
}
private async ValueTask RefillCacheAsync(CancellationToken cancellationToken)
{
if (_isDisposed)
{
throw new ObjectDisposedException(nameof(BufferedSubStream));
}
var count = (int)Math.Min(BytesLeftToRead, _cache!.Length);
_cacheOffset = 0;
if (count == 0)
{
_cacheLength = 0;
return;
}
// Only seek if we're not already at the correct position
// This avoids expensive seek operations when reading sequentially
if (Stream.CanSeek && Stream.Position != origin)
{
Stream.Position = origin;
}
_cacheLength = await Stream
.ReadAsync(_cache, 0, count, cancellationToken)
.ConfigureAwait(false);
origin += _cacheLength;
BytesLeftToRead -= _cacheLength;
}
public override int Read(byte[] buffer, int offset, int count)
{
if (count > Length)
@@ -89,7 +136,7 @@ internal partial class BufferedSubStream : SharpCompressStream, IStreamStack
}
count = Math.Min(count, _cacheLength - _cacheOffset);
Buffer.BlockCopy(_cache, _cacheOffset, buffer, offset, count);
Buffer.BlockCopy(_cache!, _cacheOffset, buffer, offset, count);
_cacheOffset += count;
}
@@ -107,9 +154,64 @@ internal partial class BufferedSubStream : SharpCompressStream, IStreamStack
}
}
return _cache[_cacheOffset++];
return _cache![_cacheOffset++];
}
public override async Task<int> ReadAsync(
byte[] buffer,
int offset,
int count,
CancellationToken cancellationToken
)
{
if (count > Length)
{
count = (int)Length;
}
if (count > 0)
{
if (_cacheOffset == _cacheLength)
{
await RefillCacheAsync(cancellationToken).ConfigureAwait(false);
}
count = Math.Min(count, _cacheLength - _cacheOffset);
Buffer.BlockCopy(_cache!, _cacheOffset, buffer, offset, count);
_cacheOffset += count;
}
return count;
}
#if !LEGACY_DOTNET
public override async ValueTask<int> ReadAsync(
Memory<byte> buffer,
CancellationToken cancellationToken = default
)
{
var count = buffer.Length;
if (count > Length)
{
count = (int)Length;
}
if (count > 0)
{
if (_cacheOffset == _cacheLength)
{
await RefillCacheAsync(cancellationToken).ConfigureAwait(false);
}
count = Math.Min(count, _cacheLength - _cacheOffset);
_cache!.AsSpan(_cacheOffset, count).CopyTo(buffer.Span);
_cacheOffset += count;
}
return count;
}
#endif
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
public override void SetLength(long value) => throw new NotSupportedException();

View File

@@ -265,7 +265,6 @@ public partial class SharpCompressStream : Stream, IStreamStack
ValidateBufferState();
}
long orig = _internalPosition;
long targetPos;
// Calculate the absolute target position based on origin
switch (origin)

View File

@@ -216,9 +216,9 @@
"net10.0": {
"Microsoft.NET.ILLink.Tasks": {
"type": "Direct",
"requested": "[10.0.0, )",
"resolved": "10.0.0",
"contentHash": "kICGrGYEzCNI3wPzfEXcwNHgTvlvVn9yJDhSdRK+oZQy4jvYH529u7O0xf5ocQKzOMjfS07+3z9PKRIjrFMJDA=="
"requested": "[10.0.2, )",
"resolved": "10.0.2",
"contentHash": "sXdDtMf2qcnbygw9OdE535c2lxSxrZP8gO4UhDJ0xiJbl1wIqXS1OTcTDFTIJPOFd6Mhcm8gPEthqWGUxBsTqw=="
},
"Microsoft.NETFramework.ReferenceAssemblies": {
"type": "Direct",
@@ -264,9 +264,9 @@
"net8.0": {
"Microsoft.NET.ILLink.Tasks": {
"type": "Direct",
"requested": "[8.0.22, )",
"resolved": "8.0.22",
"contentHash": "MhcMithKEiyyNkD2ZfbDZPmcOdi0GheGfg8saEIIEfD/fol3iHmcV8TsZkD4ZYz5gdUuoX4YtlVySUU7Sxl9SQ=="
"requested": "[8.0.23, )",
"resolved": "8.0.23",
"contentHash": "GqHiB1HbbODWPbY/lc5xLQH8siEEhNA0ptpJCC6X6adtAYNEzu5ZlqV3YHA3Gh7fuEwgA8XqVwMtH2KNtuQM1Q=="
},
"Microsoft.NETFramework.ReferenceAssemblies": {
"type": "Direct",