mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-04 05:25:00 +00:00
Merge remote-tracking branch 'origin/master' into adam/async-creation
# Conflicts: # src/SharpCompress/Common/EntryStream.cs # src/SharpCompress/IO/BufferedSubStream.cs # src/SharpCompress/packages.lock.json
This commit is contained in:
@@ -70,25 +70,11 @@ public partial class EntryStream : Stream, IStreamStack
|
||||
{
|
||||
if (ss.BaseStream() is SharpCompress.Compressors.Deflate.DeflateStream deflateStream)
|
||||
{
|
||||
try
|
||||
{
|
||||
deflateStream.Flush(); //Deflate over reads. Knock it back
|
||||
}
|
||||
catch (NotSupportedException)
|
||||
{
|
||||
// Ignore: underlying stream does not support required operations for Flush
|
||||
}
|
||||
deflateStream.Flush(); //Deflate over reads. Knock it back
|
||||
}
|
||||
else if (ss.BaseStream() is SharpCompress.Compressors.LZMA.LzmaStream lzmaStream)
|
||||
{
|
||||
try
|
||||
{
|
||||
lzmaStream.Flush(); //Lzma over reads. Knock it back
|
||||
}
|
||||
catch (NotSupportedException)
|
||||
{
|
||||
// Ignore: underlying stream does not support required operations for Flush
|
||||
}
|
||||
lzmaStream.Flush(); //Lzma over reads. Knock it back
|
||||
}
|
||||
}
|
||||
#if DEBUG_STREAMS
|
||||
@@ -98,6 +84,39 @@ public partial class EntryStream : Stream, IStreamStack
|
||||
_stream.Dispose();
|
||||
}
|
||||
|
||||
#if !LEGACY_DOTNET
|
||||
public override async ValueTask DisposeAsync()
|
||||
{
|
||||
if (_isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_isDisposed = true;
|
||||
if (!(_completed || _reader.Cancelled))
|
||||
{
|
||||
await SkipEntryAsync().ConfigureAwait(false);
|
||||
}
|
||||
|
||||
//Need a safe standard approach to this - it's okay for compression to overreads. Handling needs to be standardised
|
||||
if (_stream is IStreamStack ss)
|
||||
{
|
||||
if (ss.BaseStream() is SharpCompress.Compressors.Deflate.DeflateStream deflateStream)
|
||||
{
|
||||
await deflateStream.FlushAsync().ConfigureAwait(false);
|
||||
}
|
||||
else if (ss.BaseStream() is SharpCompress.Compressors.LZMA.LzmaStream lzmaStream)
|
||||
{
|
||||
await lzmaStream.FlushAsync().ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(EntryStream));
|
||||
#endif
|
||||
await base.DisposeAsync().ConfigureAwait(false);
|
||||
await _stream.DisposeAsync().ConfigureAwait(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
@@ -604,7 +604,13 @@ internal class ZlibBaseStream : Stream, IStreamStack
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
_stream.Flush();
|
||||
// Only flush the underlying stream when in write mode
|
||||
// Flushing input streams during read operations is not meaningful
|
||||
// and can cause issues with forward-only/non-seekable streams
|
||||
if (_streamMode == StreamMode.Writer)
|
||||
{
|
||||
_stream.Flush();
|
||||
}
|
||||
//rewind the buffer
|
||||
((IStreamStack)this).Rewind(z.AvailableBytesIn); //unused
|
||||
z.AvailableBytesIn = 0;
|
||||
@@ -612,7 +618,13 @@ internal class ZlibBaseStream : Stream, IStreamStack
|
||||
|
||||
public override async Task FlushAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
await _stream.FlushAsync(cancellationToken).ConfigureAwait(false);
|
||||
// Only flush the underlying stream when in write mode
|
||||
// Flushing input streams during read operations is not meaningful
|
||||
// and can cause issues with forward-only/non-seekable streams
|
||||
if (_streamMode == StreamMode.Writer)
|
||||
{
|
||||
await _stream.FlushAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
//rewind the buffer
|
||||
((IStreamStack)this).Rewind(z.AvailableBytesIn); //unused
|
||||
z.AvailableBytesIn = 0;
|
||||
|
||||
@@ -29,17 +29,25 @@ internal partial class BufferedSubStream : SharpCompressStream, IStreamStack
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(BufferedSubStream));
|
||||
#endif
|
||||
if (disposing)
|
||||
if (_isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_isDisposed = true;
|
||||
|
||||
if (disposing && _cache is not null)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(_cache);
|
||||
_cache = null;
|
||||
}
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
private int _cacheOffset;
|
||||
private int _cacheLength;
|
||||
private readonly byte[] _cache = ArrayPool<byte>.Shared.Rent(32 << 10);
|
||||
private byte[]? _cache = ArrayPool<byte>.Shared.Rent(81920);
|
||||
private long origin;
|
||||
private bool _isDisposed;
|
||||
|
||||
private long BytesLeftToRead { get; set; }
|
||||
|
||||
@@ -61,19 +69,58 @@ internal partial class BufferedSubStream : SharpCompressStream, IStreamStack
|
||||
|
||||
private void RefillCache()
|
||||
{
|
||||
var count = (int)Math.Min(BytesLeftToRead, _cache.Length);
|
||||
if (_isDisposed)
|
||||
{
|
||||
throw new ObjectDisposedException(nameof(BufferedSubStream));
|
||||
}
|
||||
|
||||
var count = (int)Math.Min(BytesLeftToRead, _cache!.Length);
|
||||
_cacheOffset = 0;
|
||||
if (count == 0)
|
||||
{
|
||||
_cacheLength = 0;
|
||||
return;
|
||||
}
|
||||
Stream.Position = origin;
|
||||
|
||||
// Only seek if we're not already at the correct position
|
||||
// This avoids expensive seek operations when reading sequentially
|
||||
if (Stream.CanSeek && Stream.Position != origin)
|
||||
{
|
||||
Stream.Position = origin;
|
||||
}
|
||||
|
||||
_cacheLength = Stream.Read(_cache, 0, count);
|
||||
origin += _cacheLength;
|
||||
BytesLeftToRead -= _cacheLength;
|
||||
}
|
||||
|
||||
private async ValueTask RefillCacheAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
if (_isDisposed)
|
||||
{
|
||||
throw new ObjectDisposedException(nameof(BufferedSubStream));
|
||||
}
|
||||
|
||||
var count = (int)Math.Min(BytesLeftToRead, _cache!.Length);
|
||||
_cacheOffset = 0;
|
||||
if (count == 0)
|
||||
{
|
||||
_cacheLength = 0;
|
||||
return;
|
||||
}
|
||||
// Only seek if we're not already at the correct position
|
||||
// This avoids expensive seek operations when reading sequentially
|
||||
if (Stream.CanSeek && Stream.Position != origin)
|
||||
{
|
||||
Stream.Position = origin;
|
||||
}
|
||||
_cacheLength = await Stream
|
||||
.ReadAsync(_cache, 0, count, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
origin += _cacheLength;
|
||||
BytesLeftToRead -= _cacheLength;
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (count > Length)
|
||||
@@ -89,7 +136,7 @@ internal partial class BufferedSubStream : SharpCompressStream, IStreamStack
|
||||
}
|
||||
|
||||
count = Math.Min(count, _cacheLength - _cacheOffset);
|
||||
Buffer.BlockCopy(_cache, _cacheOffset, buffer, offset, count);
|
||||
Buffer.BlockCopy(_cache!, _cacheOffset, buffer, offset, count);
|
||||
_cacheOffset += count;
|
||||
}
|
||||
|
||||
@@ -107,9 +154,64 @@ internal partial class BufferedSubStream : SharpCompressStream, IStreamStack
|
||||
}
|
||||
}
|
||||
|
||||
return _cache[_cacheOffset++];
|
||||
return _cache![_cacheOffset++];
|
||||
}
|
||||
|
||||
public override async Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
if (count > Length)
|
||||
{
|
||||
count = (int)Length;
|
||||
}
|
||||
|
||||
if (count > 0)
|
||||
{
|
||||
if (_cacheOffset == _cacheLength)
|
||||
{
|
||||
await RefillCacheAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
count = Math.Min(count, _cacheLength - _cacheOffset);
|
||||
Buffer.BlockCopy(_cache!, _cacheOffset, buffer, offset, count);
|
||||
_cacheOffset += count;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
#if !LEGACY_DOTNET
|
||||
public override async ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var count = buffer.Length;
|
||||
if (count > Length)
|
||||
{
|
||||
count = (int)Length;
|
||||
}
|
||||
|
||||
if (count > 0)
|
||||
{
|
||||
if (_cacheOffset == _cacheLength)
|
||||
{
|
||||
await RefillCacheAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
count = Math.Min(count, _cacheLength - _cacheOffset);
|
||||
_cache!.AsSpan(_cacheOffset, count).CopyTo(buffer.Span);
|
||||
_cacheOffset += count;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
#endif
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
@@ -265,7 +265,6 @@ public partial class SharpCompressStream : Stream, IStreamStack
|
||||
ValidateBufferState();
|
||||
}
|
||||
|
||||
long orig = _internalPosition;
|
||||
long targetPos;
|
||||
// Calculate the absolute target position based on origin
|
||||
switch (origin)
|
||||
|
||||
@@ -216,9 +216,9 @@
|
||||
"net10.0": {
|
||||
"Microsoft.NET.ILLink.Tasks": {
|
||||
"type": "Direct",
|
||||
"requested": "[10.0.0, )",
|
||||
"resolved": "10.0.0",
|
||||
"contentHash": "kICGrGYEzCNI3wPzfEXcwNHgTvlvVn9yJDhSdRK+oZQy4jvYH529u7O0xf5ocQKzOMjfS07+3z9PKRIjrFMJDA=="
|
||||
"requested": "[10.0.2, )",
|
||||
"resolved": "10.0.2",
|
||||
"contentHash": "sXdDtMf2qcnbygw9OdE535c2lxSxrZP8gO4UhDJ0xiJbl1wIqXS1OTcTDFTIJPOFd6Mhcm8gPEthqWGUxBsTqw=="
|
||||
},
|
||||
"Microsoft.NETFramework.ReferenceAssemblies": {
|
||||
"type": "Direct",
|
||||
@@ -264,9 +264,9 @@
|
||||
"net8.0": {
|
||||
"Microsoft.NET.ILLink.Tasks": {
|
||||
"type": "Direct",
|
||||
"requested": "[8.0.22, )",
|
||||
"resolved": "8.0.22",
|
||||
"contentHash": "MhcMithKEiyyNkD2ZfbDZPmcOdi0GheGfg8saEIIEfD/fol3iHmcV8TsZkD4ZYz5gdUuoX4YtlVySUU7Sxl9SQ=="
|
||||
"requested": "[8.0.23, )",
|
||||
"resolved": "8.0.23",
|
||||
"contentHash": "GqHiB1HbbODWPbY/lc5xLQH8siEEhNA0ptpJCC6X6adtAYNEzu5ZlqV3YHA3Gh7fuEwgA8XqVwMtH2KNtuQM1Q=="
|
||||
},
|
||||
"Microsoft.NETFramework.ReferenceAssemblies": {
|
||||
"type": "Direct",
|
||||
|
||||
73
tests/SharpCompress.Test/Mocks/ThrowOnFlushStream.cs
Normal file
73
tests/SharpCompress.Test/Mocks/ThrowOnFlushStream.cs
Normal file
@@ -0,0 +1,73 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Test.Mocks;
|
||||
|
||||
/// <summary>
|
||||
/// A stream wrapper that throws NotSupportedException on Flush() calls.
|
||||
/// This is used to test that archive iteration handles streams that don't support flushing.
|
||||
/// </summary>
|
||||
public class ThrowOnFlushStream : Stream
|
||||
{
|
||||
private readonly Stream inner;
|
||||
|
||||
public ThrowOnFlushStream(Stream inner)
|
||||
{
|
||||
this.inner = inner;
|
||||
}
|
||||
|
||||
public override bool CanRead => inner.CanRead;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotSupportedException("Flush not supported");
|
||||
|
||||
public override Task FlushAsync(CancellationToken cancellationToken) =>
|
||||
throw new NotSupportedException("FlushAsync not supported");
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) =>
|
||||
inner.Read(buffer, offset, count);
|
||||
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => inner.ReadAsync(buffer, offset, count, cancellationToken);
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
) => inner.ReadAsync(buffer, cancellationToken);
|
||||
#endif
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (disposing)
|
||||
{
|
||||
inner.Dispose();
|
||||
}
|
||||
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ using System.Linq;
|
||||
using System.Text;
|
||||
using SharpCompress.Compressors.LZMA;
|
||||
using SharpCompress.IO;
|
||||
using SharpCompress.Test.Mocks;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test.Streams;
|
||||
@@ -64,7 +65,14 @@ public class SharpCompressStreamTests
|
||||
{
|
||||
createData(ms);
|
||||
|
||||
using (SharpCompressStream scs = new SharpCompressStream(ms, true, false, 0x10000))
|
||||
using (
|
||||
SharpCompressStream scs = new SharpCompressStream(
|
||||
new ForwardOnlyStream(ms),
|
||||
true,
|
||||
false,
|
||||
0x10000
|
||||
)
|
||||
)
|
||||
{
|
||||
IStreamStack stack = (IStreamStack)scs;
|
||||
|
||||
@@ -89,4 +97,25 @@ public class SharpCompressStreamTests
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BufferedSubStream_DoubleDispose_DoesNotCorruptArrayPool()
|
||||
{
|
||||
// This test verifies that calling Dispose multiple times on BufferedSubStream
|
||||
// doesn't return the same array to the pool twice, which would cause pool corruption
|
||||
byte[] data = new byte[0x10000];
|
||||
using (MemoryStream ms = new MemoryStream(data))
|
||||
{
|
||||
var stream = new BufferedSubStream(ms, 0, data.Length);
|
||||
|
||||
// First disposal
|
||||
stream.Dispose();
|
||||
|
||||
// Second disposal should not throw or corrupt the pool
|
||||
stream.Dispose();
|
||||
}
|
||||
|
||||
// If we got here without an exception, the test passed
|
||||
Assert.True(true);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -337,4 +337,52 @@ public class ZipReaderAsyncTests : ReaderTests
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async ValueTask Archive_Iteration_DoesNotBreak_WhenFlushThrows_Deflate_Async()
|
||||
{
|
||||
// Regression test: since 0.41.0, archive iteration would silently break
|
||||
// when the input stream throws NotSupportedException in Flush().
|
||||
// Only the first entry would be returned, then iteration would stop without exception.
|
||||
var path = Path.Combine(TEST_ARCHIVES_PATH, "Zip.deflate.dd.zip");
|
||||
using var fileStream = File.OpenRead(path);
|
||||
using Stream stream = new ThrowOnFlushStream(fileStream);
|
||||
await using var reader = ReaderFactory.OpenAsyncReader(new AsyncOnlyStream(stream));
|
||||
|
||||
var count = 0;
|
||||
while (await reader.MoveToNextEntryAsync())
|
||||
{
|
||||
if (!reader.Entry.IsDirectory)
|
||||
{
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
// Should iterate through all entries, not just the first one
|
||||
Assert.True(count > 1, $"Expected more than 1 entry, but got {count}");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async ValueTask Archive_Iteration_DoesNotBreak_WhenFlushThrows_LZMA_Async()
|
||||
{
|
||||
// Regression test: since 0.41.0, archive iteration would silently break
|
||||
// when the input stream throws NotSupportedException in Flush().
|
||||
// Only the first entry would be returned, then iteration would stop without exception.
|
||||
var path = Path.Combine(TEST_ARCHIVES_PATH, "Zip.lzma.dd.zip");
|
||||
using var fileStream = File.OpenRead(path);
|
||||
using Stream stream = new ThrowOnFlushStream(fileStream);
|
||||
await using var reader = ReaderFactory.OpenAsyncReader(new AsyncOnlyStream(stream));
|
||||
|
||||
var count = 0;
|
||||
while (await reader.MoveToNextEntryAsync())
|
||||
{
|
||||
if (!reader.Entry.IsDirectory)
|
||||
{
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
// Should iterate through all entries, not just the first one
|
||||
Assert.True(count > 1, $"Expected more than 1 entry, but got {count}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -490,4 +490,52 @@ public class ZipReaderTests : ReaderTests
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Archive_Iteration_DoesNotBreak_WhenFlushThrows_Deflate()
|
||||
{
|
||||
// Regression test: since 0.41.0, archive iteration would silently break
|
||||
// when the input stream throws NotSupportedException in Flush().
|
||||
// Only the first entry would be returned, then iteration would stop without exception.
|
||||
var path = Path.Combine(TEST_ARCHIVES_PATH, "Zip.deflate.dd.zip");
|
||||
using var fileStream = File.OpenRead(path);
|
||||
using Stream stream = new ThrowOnFlushStream(fileStream);
|
||||
using var reader = ReaderFactory.OpenReader(stream);
|
||||
|
||||
var count = 0;
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
if (!reader.Entry.IsDirectory)
|
||||
{
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
// Should iterate through all entries, not just the first one
|
||||
Assert.True(count > 1, $"Expected more than 1 entry, but got {count}");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Archive_Iteration_DoesNotBreak_WhenFlushThrows_LZMA()
|
||||
{
|
||||
// Regression test: since 0.41.0, archive iteration would silently break
|
||||
// when the input stream throws NotSupportedException in Flush().
|
||||
// Only the first entry would be returned, then iteration would stop without exception.
|
||||
var path = Path.Combine(TEST_ARCHIVES_PATH, "Zip.lzma.dd.zip");
|
||||
using var fileStream = File.OpenRead(path);
|
||||
using Stream stream = new ThrowOnFlushStream(fileStream);
|
||||
using var reader = ReaderFactory.OpenReader(stream);
|
||||
|
||||
var count = 0;
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
if (!reader.Entry.IsDirectory)
|
||||
{
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
// Should iterate through all entries, not just the first one
|
||||
Assert.True(count > 1, $"Expected more than 1 entry, but got {count}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -319,6 +319,30 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
".NETFramework,Version=v4.8/win-x86": {
|
||||
"Microsoft.Win32.Registry": {
|
||||
"type": "Transitive",
|
||||
"resolved": "5.0.0",
|
||||
"contentHash": "dDoKi0PnDz31yAyETfRntsLArTlVAVzUzCIvvEDsDsucrl33Dl8pIJG06ePTJTI3tGpeyHS9Cq7Foc/s4EeKcg==",
|
||||
"dependencies": {
|
||||
"System.Security.AccessControl": "5.0.0",
|
||||
"System.Security.Principal.Windows": "5.0.0"
|
||||
}
|
||||
},
|
||||
"System.Security.AccessControl": {
|
||||
"type": "Transitive",
|
||||
"resolved": "5.0.0",
|
||||
"contentHash": "dagJ1mHZO3Ani8GH0PHpPEe/oYO+rVdbQjvjJkBRNQkX4t0r1iaeGn8+/ybkSLEan3/slM0t59SVdHzuHf2jmw==",
|
||||
"dependencies": {
|
||||
"System.Security.Principal.Windows": "5.0.0"
|
||||
}
|
||||
},
|
||||
"System.Security.Principal.Windows": {
|
||||
"type": "Transitive",
|
||||
"resolved": "5.0.0",
|
||||
"contentHash": "t0MGLukB5WAVU9bO3MGzvlGnyJPgUlcwerXn1kzBRjwLKixT96XV0Uza41W49gVd8zEMFu9vQEFlv0IOrytICA=="
|
||||
}
|
||||
},
|
||||
"net10.0": {
|
||||
"AwesomeAssertions": {
|
||||
"type": "Direct",
|
||||
@@ -531,6 +555,13 @@
|
||||
"resolved": "10.0.0",
|
||||
"contentHash": "vFuwSLj9QJBbNR0NeNO4YVASUbokxs+i/xbuu8B+Fs4FAZg5QaFa6eGrMaRqTzzNI5tAb97T7BhSxtLckFyiRA=="
|
||||
}
|
||||
},
|
||||
"net10.0/win-x86": {
|
||||
"Microsoft.Win32.Registry": {
|
||||
"type": "Transitive",
|
||||
"resolved": "5.0.0",
|
||||
"contentHash": "dDoKi0PnDz31yAyETfRntsLArTlVAVzUzCIvvEDsDsucrl33Dl8pIJG06ePTJTI3tGpeyHS9Cq7Foc/s4EeKcg=="
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user