mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-04 13:34:59 +00:00
Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1f8bd6d7e3 | ||
|
|
f893c1272c | ||
|
|
e701f5277e | ||
|
|
f85fd1f6a4 | ||
|
|
8f7ea420b3 | ||
|
|
d8c8dabb52 | ||
|
|
9092ecf331 | ||
|
|
2fd9fe96ad | ||
|
|
02f68b793c | ||
|
|
57b9133a0f | ||
|
|
815f5e09e8 | ||
|
|
5bdf01ee59 |
@@ -4,7 +4,6 @@ using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using SharpCompress.Readers;
|
||||
using SharpCompress.Readers.Tar;
|
||||
@@ -13,6 +12,38 @@ using SharpCompress.Writers.Tar;
|
||||
|
||||
namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
public static class PaxHeaders
|
||||
{
|
||||
public const string paxGNUSparseNumBlocks = "GNU.sparse.numblocks";
|
||||
public const string paxGNUSparseOffset = "GNU.sparse.offset";
|
||||
public const string paxGNUSparseNumBytes = "GNU.sparse.numbytes";
|
||||
public const string paxGNUSparseMap = "GNU.sparse.map";
|
||||
public const string paxGNUSparseName = "GNU.sparse.name";
|
||||
public const string paxGNUSparseMajor = "GNU.sparse.major";
|
||||
public const string paxGNUSparseMinor = "GNU.sparse.minor";
|
||||
public const string paxGNUSparseSize = "GNU.sparse.size";
|
||||
public const string paxGNUSparseRealSize = "GNU.sparse.realsize";
|
||||
}
|
||||
|
||||
// Keywords for the PAX Extended Header
|
||||
public static class PaxKeywords
|
||||
{
|
||||
public const string paxAtime = "atime";
|
||||
public const string paxCharset = "charset";
|
||||
public const string paxComment = "comment";
|
||||
public const string paxCtime = "ctime";// please note that ctime is not a valid pax header.
|
||||
public const string paxGid = "gid";
|
||||
public const string paxGname = "gname";
|
||||
public const string paxLinkpath = "linkpath";
|
||||
public const string paxMtime = "mtime";
|
||||
public const string paxPath = "path";
|
||||
public const string paxSize = "size";
|
||||
public const string paxUid = "uid";
|
||||
public const string paxUname = "uname";
|
||||
public const string paxXattr = "SCHILY.xattr.";
|
||||
public const string paxNone = "";
|
||||
}
|
||||
|
||||
public class TarArchive : AbstractWritableArchive<TarArchiveEntry, TarVolume>
|
||||
{
|
||||
#if !NO_FILE
|
||||
@@ -64,6 +95,7 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
using (Stream stream = fileInfo.OpenRead())
|
||||
{
|
||||
return IsTarFile(stream);
|
||||
@@ -82,6 +114,7 @@ namespace SharpCompress.Archives.Tar
|
||||
catch
|
||||
{
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -127,62 +160,269 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
Stream stream = volumes.Single().Stream;
|
||||
TarHeader previousHeader = null;
|
||||
byte[] previousBytes = null;
|
||||
foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream, ReaderOptions.ArchiveEncoding))
|
||||
{
|
||||
if (header != null)
|
||||
{
|
||||
if (header.EntryType == EntryType.LongName)
|
||||
switch (header.EntryType)
|
||||
{
|
||||
previousHeader = header;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (previousHeader != null)
|
||||
case EntryType.GlobalExtendedHeader:
|
||||
case EntryType.PosixExtendedHeader:
|
||||
case EntryType.LongName:
|
||||
{
|
||||
var entry = new TarArchiveEntry(this, new TarFilePart(previousHeader, stream),
|
||||
previousHeader = header;
|
||||
var entry = new TarArchiveEntry(this,
|
||||
new TarFilePart(previousHeader, stream),
|
||||
CompressionType.None);
|
||||
|
||||
var oldStreamPos = stream.Position;
|
||||
|
||||
using (var entryStream = entry.OpenEntryStream())
|
||||
{
|
||||
using (var memoryStream = new MemoryStream())
|
||||
{
|
||||
entryStream.TransferTo(memoryStream);
|
||||
memoryStream.Position = 0;
|
||||
var bytes = memoryStream.ToArray();
|
||||
|
||||
header.Name = ReaderOptions.ArchiveEncoding.Decode(bytes).TrimNulls();
|
||||
previousBytes = memoryStream.ToArray();
|
||||
}
|
||||
}
|
||||
|
||||
stream.Position = oldStreamPos;
|
||||
|
||||
previousHeader = null;
|
||||
continue;
|
||||
}
|
||||
yield return new TarArchiveEntry(this, new TarFilePart(header, stream), CompressionType.None);
|
||||
}
|
||||
|
||||
if (previousHeader != null && previousHeader.EntryType == EntryType.LongName)
|
||||
{
|
||||
header.Name = ReaderOptions.ArchiveEncoding.Decode(previousBytes).TrimNulls();
|
||||
|
||||
previousHeader = null;
|
||||
previousBytes = null;
|
||||
}
|
||||
|
||||
if (previousHeader != null && previousHeader.EntryType == EntryType.PosixExtendedHeader)
|
||||
{
|
||||
mergePAX(header, parsePAX(previousBytes));
|
||||
|
||||
|
||||
previousHeader = null;
|
||||
previousBytes = null;
|
||||
}
|
||||
|
||||
yield return new TarArchiveEntry(this, new TarFilePart(header, stream), CompressionType.None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parsePAX parses PAX headers.
|
||||
// If an extended header (type 'x') is invalid, ErrHeader is returned
|
||||
private Dictionary<string, string> parsePAX(byte[] previousBytes)
|
||||
{
|
||||
byte[] s = previousBytes;
|
||||
|
||||
// For GNU PAX sparse format 0.0 support.
|
||||
// This function transforms the sparse format 0.0 headers into format 0.1
|
||||
// headers since 0.0 headers were not PAX compliant.
|
||||
var sparseMap = new List<string>();
|
||||
|
||||
var extHdrs = new Dictionary<string, string>();
|
||||
while (s.Length > 0)
|
||||
{
|
||||
var t = parsePAXRecord(s);
|
||||
string key = ReaderOptions.ArchiveEncoding.Decode(t.Item1);
|
||||
string value = ReaderOptions.ArchiveEncoding.Decode(t.Item2);
|
||||
byte[] residual = t.Item3;
|
||||
string x = ReaderOptions.ArchiveEncoding.Decode(t.Item3);
|
||||
Console.WriteLine(x);
|
||||
s = residual;
|
||||
|
||||
switch (key) {
|
||||
case PaxHeaders.paxGNUSparseOffset:
|
||||
case PaxHeaders.paxGNUSparseNumBytes:
|
||||
// Validate sparse header order and value.
|
||||
if ((sparseMap.Count%2 == 0 && key != PaxHeaders.paxGNUSparseOffset) ||
|
||||
(sparseMap.Count%2 == 1 && key != PaxHeaders.paxGNUSparseNumBytes) ||
|
||||
value.Contains(","))
|
||||
{
|
||||
extHdrs.Clear();
|
||||
return extHdrs;
|
||||
}
|
||||
|
||||
sparseMap.Add(value);
|
||||
break;
|
||||
default:
|
||||
// According to PAX specification, a value is stored only if it is
|
||||
// non-empty. Otherwise, the key is deleted.
|
||||
if (value.Length > 0)
|
||||
{
|
||||
extHdrs[key] = value;
|
||||
} else
|
||||
{
|
||||
extHdrs.Remove(key);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (sparseMap.Count> 0)
|
||||
{
|
||||
extHdrs[PaxHeaders.paxGNUSparseMap] = string.Join(",", sparseMap);
|
||||
}
|
||||
|
||||
return extHdrs;
|
||||
}
|
||||
|
||||
// parsePAXRecord parses the input PAX record string into a key-value pair.
|
||||
// If parsing is successful, it will slice off the currently read record and
|
||||
// return the remainder as r.
|
||||
//
|
||||
// A PAX record is of the following form:
|
||||
// "%d %s=%s\n" % (size, key, value)
|
||||
private Tuple<byte[], byte[], byte[]> parsePAXRecord(byte[] s) {
|
||||
// The size field ends at the first space.
|
||||
var sp = Array.IndexOf(s, (byte)' ');
|
||||
if (sp == -1)
|
||||
{
|
||||
return Tuple.Create(Array.Empty<byte>(), Array.Empty<byte>(), s);
|
||||
}
|
||||
|
||||
// Parse the first token as a decimal integer.
|
||||
var x = s.Take(sp).ToArray();
|
||||
var n = Convert.ToInt64(ReaderOptions.ArchiveEncoding.Decode(x), 10); // Intentionally parse as native int
|
||||
if (n < 5 || s.Length < n) {
|
||||
return Tuple.Create(Array.Empty<byte>(), Array.Empty<byte>(), s);
|
||||
}
|
||||
|
||||
// Extract everything between the space and the final newline.
|
||||
var rec = s.Skip(sp + 1).Take((int)n -sp - 2).ToArray();
|
||||
var nl = s.Skip((int)n-1).Take(1).Single();
|
||||
var rem = s.Skip((int)n).ToArray();
|
||||
if (nl != '\n') {
|
||||
return Tuple.Create(Array.Empty<byte>(), Array.Empty<byte>(), s);
|
||||
}
|
||||
|
||||
// The first equals separates the key from the value.
|
||||
var eq = Array.IndexOf(rec, (byte)'=');
|
||||
if (eq == -1) {
|
||||
return Tuple.Create(Array.Empty<byte>(), Array.Empty<byte>(), s);
|
||||
}
|
||||
return Tuple.Create( rec.Take(eq).ToArray(), rec.Skip(eq+1).ToArray(), rem);
|
||||
}
|
||||
|
||||
// mergePAX merges well known headers according to PAX standard.
|
||||
// In general headers with the same name as those found
|
||||
// in the header struct overwrite those found in the header
|
||||
// struct with higher precision or longer values. Esp. useful
|
||||
// for name and linkname fields.
|
||||
private void mergePAX(TarHeader hdr, Dictionary<string, string> headers)
|
||||
{
|
||||
foreach (var kv in headers)
|
||||
{
|
||||
switch (kv.Key)
|
||||
{
|
||||
case PaxKeywords.paxPath:
|
||||
hdr.Name = kv.Value;
|
||||
break;
|
||||
//case PaxKeywords.paxLinkpath:
|
||||
//hdr.Linkname = v
|
||||
//case PaxKeywords.paxUname:
|
||||
//hdr.Uname = v
|
||||
//case PaxKeywords.paxGname:
|
||||
//hdr.Gname = v
|
||||
//case PaxKeywords.paxUid:
|
||||
//id64, err = strconv.ParseInt(v, 10, 64)
|
||||
//hdr.Uid = int(id64) // Integer overflow possible
|
||||
//case PaxKeywords.paxGid:
|
||||
//id64, err = strconv.ParseInt(v, 10, 64)
|
||||
//hdr.Gid = int(id64) // Integer overflow possible
|
||||
//case PaxKeywords.paxAtime:
|
||||
//hdr.AccessTime, err = parsePAXTime(v)
|
||||
case PaxKeywords.paxMtime:
|
||||
hdr.LastModifiedTime = parsePAXTime(kv.Value).DateTime;
|
||||
break;
|
||||
//case PaxKeywords.paxCtime:
|
||||
//hdr.ChangeTime, err = parsePAXTime(v)
|
||||
case PaxKeywords.paxSize:
|
||||
hdr.Size = long.Parse(kv.Value);
|
||||
break;
|
||||
/*default:
|
||||
if (kv.Key.StartsWith(PaxKeywords.paxXattr)) {
|
||||
if hdr.Xattrs == nil {
|
||||
hdr.Xattrs = make(map[string]string)
|
||||
}
|
||||
hdr.Xattrs[k[len(paxXattr):]] = v
|
||||
}*/
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parsePAXTime takes a string of the form %d.%d as described in the PAX
|
||||
// specification. Note that this implementation allows for negative timestamps,
|
||||
// which is allowed for by the PAX specification, but not always portable.
|
||||
private static DateTimeOffset parsePAXTime(string s)
|
||||
{
|
||||
//const int maxNanoSecondDigits = 9;
|
||||
|
||||
// Split string into seconds and sub-seconds parts.
|
||||
var ss = s;
|
||||
var sn = "";
|
||||
var pos = s.IndexOf('.');
|
||||
if (pos >= 0)
|
||||
{
|
||||
ss = s.Substring(0, pos);
|
||||
sn = s.Substring(pos + 1);
|
||||
}
|
||||
|
||||
// Parse the seconds.
|
||||
var secs = long.Parse(ss);
|
||||
// if (sn.Length == 0)
|
||||
//{
|
||||
return DateTimeOffset.FromUnixTimeSeconds(secs);
|
||||
/*}
|
||||
|
||||
// Parse the nanoseconds.
|
||||
if (sn.Trim("0123456789".ToCharArray()) != "") {
|
||||
return DateTimeOffset.MinValue;
|
||||
}
|
||||
while (sn.Length < maxNanoSecondDigits)
|
||||
{
|
||||
sn += "0"; // Right pad
|
||||
}
|
||||
|
||||
if (sn.Length > maxNanoSecondDigits) {
|
||||
sn = sn.Substring(0, maxNanoSecondDigits); // Right truncate
|
||||
}
|
||||
|
||||
var nsecs = long.Parse(sn); // Must succeed
|
||||
if (ss.Length > 0 && ss[0] == '-')
|
||||
{
|
||||
return DateTimeOffset.FromUnixTimeSeconds(secs); // Negative correction
|
||||
}
|
||||
return time.Unix(secs, int64(nsecs)), nil*/
|
||||
}
|
||||
|
||||
public static TarArchive Create()
|
||||
{
|
||||
return new TarArchive();
|
||||
}
|
||||
|
||||
protected override TarArchiveEntry CreateEntryInternal(string filePath, Stream source,
|
||||
long size, DateTime? modified, bool closeStream)
|
||||
protected override TarArchiveEntry CreateEntryInternal(string filePath,
|
||||
Stream source,
|
||||
long size,
|
||||
DateTime? modified,
|
||||
bool closeStream)
|
||||
{
|
||||
return new TarWritableArchiveEntry(this, source, CompressionType.Unknown, filePath, size, modified,
|
||||
return new TarWritableArchiveEntry(this,
|
||||
source,
|
||||
CompressionType.Unknown,
|
||||
filePath,
|
||||
size,
|
||||
modified,
|
||||
closeStream);
|
||||
}
|
||||
|
||||
protected override void SaveTo(Stream stream, WriterOptions options,
|
||||
protected override void SaveTo(Stream stream,
|
||||
WriterOptions options,
|
||||
IEnumerable<TarArchiveEntry> oldEntries,
|
||||
IEnumerable<TarArchiveEntry> newEntries)
|
||||
{
|
||||
using (var writer = new TarWriter(stream, options))
|
||||
using (var writer = new TarWriter(stream, new TarWriterOptions(options)))
|
||||
{
|
||||
foreach (var entry in oldEntries.Concat(newEntries)
|
||||
.Where(x => !x.IsDirectory))
|
||||
|
||||
119
src/SharpCompress/Buffers/ArrayPool.cs
Normal file
119
src/SharpCompress/Buffers/ArrayPool.cs
Normal file
@@ -0,0 +1,119 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
#if NETCORE
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
/// <summary>
|
||||
/// Provides a resource pool that enables reusing instances of type <see cref="T:T[]"/>.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// <para>
|
||||
/// Renting and returning buffers with an <see cref="ArrayPool{T}"/> can increase performance
|
||||
/// in situations where arrays are created and destroyed frequently, resulting in significant
|
||||
/// memory pressure on the garbage collector.
|
||||
/// </para>
|
||||
/// <para>
|
||||
/// This class is thread-safe. All members may be used by multiple threads concurrently.
|
||||
/// </para>
|
||||
/// </remarks>
|
||||
internal abstract class ArrayPool<T>
|
||||
{
|
||||
/// <summary>The lazily-initialized shared pool instance.</summary>
|
||||
private static ArrayPool<T> s_sharedInstance = null;
|
||||
|
||||
/// <summary>
|
||||
/// Retrieves a shared <see cref="ArrayPool{T}"/> instance.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The shared pool provides a default implementation of <see cref="ArrayPool{T}"/>
|
||||
/// that's intended for general applicability. It maintains arrays of multiple sizes, and
|
||||
/// may hand back a larger array than was actually requested, but will never hand back a smaller
|
||||
/// array than was requested. Renting a buffer from it with <see cref="Rent"/> will result in an
|
||||
/// existing buffer being taken from the pool if an appropriate buffer is available or in a new
|
||||
/// buffer being allocated if one is not available.
|
||||
/// </remarks>
|
||||
public static ArrayPool<T> Shared
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
get { return Volatile.Read(ref s_sharedInstance) ?? EnsureSharedCreated(); }
|
||||
}
|
||||
|
||||
/// <summary>Ensures that <see cref="s_sharedInstance"/> has been initialized to a pool and returns it.</summary>
|
||||
[MethodImpl(MethodImplOptions.NoInlining)]
|
||||
private static ArrayPool<T> EnsureSharedCreated()
|
||||
{
|
||||
Interlocked.CompareExchange(ref s_sharedInstance, Create(), null);
|
||||
return s_sharedInstance;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new <see cref="ArrayPool{T}"/> instance using default configuration options.
|
||||
/// </summary>
|
||||
/// <returns>A new <see cref="ArrayPool{T}"/> instance.</returns>
|
||||
public static ArrayPool<T> Create()
|
||||
{
|
||||
return new DefaultArrayPool<T>();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new <see cref="ArrayPool{T}"/> instance using custom configuration options.
|
||||
/// </summary>
|
||||
/// <param name="maxArrayLength">The maximum length of array instances that may be stored in the pool.</param>
|
||||
/// <param name="maxArraysPerBucket">
|
||||
/// The maximum number of array instances that may be stored in each bucket in the pool. The pool
|
||||
/// groups arrays of similar lengths into buckets for faster access.
|
||||
/// </param>
|
||||
/// <returns>A new <see cref="ArrayPool{T}"/> instance with the specified configuration options.</returns>
|
||||
/// <remarks>
|
||||
/// The created pool will group arrays into buckets, with no more than <paramref name="maxArraysPerBucket"/>
|
||||
/// in each bucket and with those arrays not exceeding <paramref name="maxArrayLength"/> in length.
|
||||
/// </remarks>
|
||||
public static ArrayPool<T> Create(int maxArrayLength, int maxArraysPerBucket)
|
||||
{
|
||||
return new DefaultArrayPool<T>(maxArrayLength, maxArraysPerBucket);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Retrieves a buffer that is at least the requested length.
|
||||
/// </summary>
|
||||
/// <param name="minimumLength">The minimum length of the array needed.</param>
|
||||
/// <returns>
|
||||
/// An <see cref="T:T[]"/> that is at least <paramref name="minimumLength"/> in length.
|
||||
/// </returns>
|
||||
/// <remarks>
|
||||
/// This buffer is loaned to the caller and should be returned to the same pool via
|
||||
/// <see cref="Return"/> so that it may be reused in subsequent usage of <see cref="Rent"/>.
|
||||
/// It is not a fatal error to not return a rented buffer, but failure to do so may lead to
|
||||
/// decreased application performance, as the pool may need to create a new buffer to replace
|
||||
/// the one lost.
|
||||
/// </remarks>
|
||||
public abstract T[] Rent(int minimumLength);
|
||||
|
||||
/// <summary>
|
||||
/// Returns to the pool an array that was previously obtained via <see cref="Rent"/> on the same
|
||||
/// <see cref="ArrayPool{T}"/> instance.
|
||||
/// </summary>
|
||||
/// <param name="array">
|
||||
/// The buffer previously obtained from <see cref="Rent"/> to return to the pool.
|
||||
/// </param>
|
||||
/// <param name="clearArray">
|
||||
/// If <c>true</c> and if the pool will store the buffer to enable subsequent reuse, <see cref="Return"/>
|
||||
/// will clear <paramref name="array"/> of its contents so that a subsequent consumer via <see cref="Rent"/>
|
||||
/// will not see the previous consumer's content. If <c>false</c> or if the pool will release the buffer,
|
||||
/// the array's contents are left unchanged.
|
||||
/// </param>
|
||||
/// <remarks>
|
||||
/// Once a buffer has been returned to the pool, the caller gives up all ownership of the buffer
|
||||
/// and must not use it. The reference returned from a given call to <see cref="Rent"/> must only be
|
||||
/// returned via <see cref="Return"/> once. The default <see cref="ArrayPool{T}"/>
|
||||
/// may hold onto the returned buffer in order to rent it again, or it may release the returned buffer
|
||||
/// if it's determined that the pool already has enough buffers stored.
|
||||
/// </remarks>
|
||||
public abstract void Return(T[] array, bool clearArray = false);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
144
src/SharpCompress/Buffers/DefaultArrayPool.cs
Normal file
144
src/SharpCompress/Buffers/DefaultArrayPool.cs
Normal file
@@ -0,0 +1,144 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
#if NETCORE
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
internal sealed partial class DefaultArrayPool<T> : ArrayPool<T>
|
||||
{
|
||||
/// <summary>The default maximum length of each array in the pool (2^20).</summary>
|
||||
private const int DefaultMaxArrayLength = 1024 * 1024;
|
||||
/// <summary>The default maximum number of arrays per bucket that are available for rent.</summary>
|
||||
private const int DefaultMaxNumberOfArraysPerBucket = 50;
|
||||
/// <summary>Lazily-allocated empty array used when arrays of length 0 are requested.</summary>
|
||||
private static T[] s_emptyArray; // we support contracts earlier than those with Array.Empty<T>()
|
||||
|
||||
private readonly Bucket[] _buckets;
|
||||
|
||||
internal DefaultArrayPool() : this(DefaultMaxArrayLength, DefaultMaxNumberOfArraysPerBucket)
|
||||
{
|
||||
}
|
||||
|
||||
internal DefaultArrayPool(int maxArrayLength, int maxArraysPerBucket)
|
||||
{
|
||||
if (maxArrayLength <= 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(maxArrayLength));
|
||||
}
|
||||
if (maxArraysPerBucket <= 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(maxArraysPerBucket));
|
||||
}
|
||||
|
||||
// Our bucketing algorithm has a min length of 2^4 and a max length of 2^30.
|
||||
// Constrain the actual max used to those values.
|
||||
const int MinimumArrayLength = 0x10, MaximumArrayLength = 0x40000000;
|
||||
if (maxArrayLength > MaximumArrayLength)
|
||||
{
|
||||
maxArrayLength = MaximumArrayLength;
|
||||
}
|
||||
else if (maxArrayLength < MinimumArrayLength)
|
||||
{
|
||||
maxArrayLength = MinimumArrayLength;
|
||||
}
|
||||
|
||||
// Create the buckets.
|
||||
int poolId = Id;
|
||||
int maxBuckets = Utilities.SelectBucketIndex(maxArrayLength);
|
||||
var buckets = new Bucket[maxBuckets + 1];
|
||||
for (int i = 0; i < buckets.Length; i++)
|
||||
{
|
||||
buckets[i] = new Bucket(Utilities.GetMaxSizeForBucket(i), maxArraysPerBucket, poolId);
|
||||
}
|
||||
_buckets = buckets;
|
||||
}
|
||||
|
||||
/// <summary>Gets an ID for the pool to use with events.</summary>
|
||||
private int Id => GetHashCode();
|
||||
|
||||
public override T[] Rent(int minimumLength)
|
||||
{
|
||||
// Arrays can't be smaller than zero. We allow requesting zero-length arrays (even though
|
||||
// pooling such an array isn't valuable) as it's a valid length array, and we want the pool
|
||||
// to be usable in general instead of using `new`, even for computed lengths.
|
||||
if (minimumLength < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(minimumLength));
|
||||
}
|
||||
else if (minimumLength == 0)
|
||||
{
|
||||
// No need for events with the empty array. Our pool is effectively infinite
|
||||
// and we'll never allocate for rents and never store for returns.
|
||||
return s_emptyArray ?? (s_emptyArray = new T[0]);
|
||||
}
|
||||
|
||||
T[] buffer = null;
|
||||
|
||||
int index = Utilities.SelectBucketIndex(minimumLength);
|
||||
if (index < _buckets.Length)
|
||||
{
|
||||
// Search for an array starting at the 'index' bucket. If the bucket is empty, bump up to the
|
||||
// next higher bucket and try that one, but only try at most a few buckets.
|
||||
const int MaxBucketsToTry = 2;
|
||||
int i = index;
|
||||
do
|
||||
{
|
||||
// Attempt to rent from the bucket. If we get a buffer from it, return it.
|
||||
buffer = _buckets[i].Rent();
|
||||
if (buffer != null)
|
||||
{
|
||||
return buffer;
|
||||
}
|
||||
}
|
||||
while (++i < _buckets.Length && i != index + MaxBucketsToTry);
|
||||
|
||||
// The pool was exhausted for this buffer size. Allocate a new buffer with a size corresponding
|
||||
// to the appropriate bucket.
|
||||
buffer = new T[_buckets[index]._bufferLength];
|
||||
}
|
||||
else
|
||||
{
|
||||
// The request was for a size too large for the pool. Allocate an array of exactly the requested length.
|
||||
// When it's returned to the pool, we'll simply throw it away.
|
||||
buffer = new T[minimumLength];
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public override void Return(T[] array, bool clearArray = false)
|
||||
{
|
||||
if (array == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(array));
|
||||
}
|
||||
else if (array.Length == 0)
|
||||
{
|
||||
// Ignore empty arrays. When a zero-length array is rented, we return a singleton
|
||||
// rather than actually taking a buffer out of the lowest bucket.
|
||||
return;
|
||||
}
|
||||
|
||||
// Determine with what bucket this array length is associated
|
||||
int bucket = Utilities.SelectBucketIndex(array.Length);
|
||||
|
||||
// If we can tell that the buffer was allocated, drop it. Otherwise, check if we have space in the pool
|
||||
if (bucket < _buckets.Length)
|
||||
{
|
||||
// Clear the array if the user requests
|
||||
if (clearArray)
|
||||
{
|
||||
Array.Clear(array, 0, array.Length);
|
||||
}
|
||||
|
||||
// Return the buffer to its bucket. In the future, we might consider having Return return false
|
||||
// instead of dropping a bucket, in which case we could try to return to a lower-sized bucket,
|
||||
// just as how in Rent we allow renting from a higher-sized bucket.
|
||||
_buckets[bucket].Return(array);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
111
src/SharpCompress/Buffers/DefaultArrayPoolBucket.cs
Normal file
111
src/SharpCompress/Buffers/DefaultArrayPoolBucket.cs
Normal file
@@ -0,0 +1,111 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
#if NETCORE
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.Threading;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
internal sealed partial class DefaultArrayPool<T> : ArrayPool<T>
|
||||
{
|
||||
/// <summary>Provides a thread-safe bucket containing buffers that can be Rent'd and Return'd.</summary>
|
||||
private sealed class Bucket
|
||||
{
|
||||
internal readonly int _bufferLength;
|
||||
private readonly T[][] _buffers;
|
||||
private readonly int _poolId;
|
||||
|
||||
private SpinLock _lock; // do not make this readonly; it's a mutable struct
|
||||
private int _index;
|
||||
|
||||
/// <summary>
|
||||
/// Creates the pool with numberOfBuffers arrays where each buffer is of bufferLength length.
|
||||
/// </summary>
|
||||
internal Bucket(int bufferLength, int numberOfBuffers, int poolId)
|
||||
{
|
||||
_lock = new SpinLock(Debugger.IsAttached); // only enable thread tracking if debugger is attached; it adds non-trivial overheads to Enter/Exit
|
||||
_buffers = new T[numberOfBuffers][];
|
||||
_bufferLength = bufferLength;
|
||||
_poolId = poolId;
|
||||
}
|
||||
|
||||
/// <summary>Gets an ID for the bucket to use with events.</summary>
|
||||
internal int Id => GetHashCode();
|
||||
|
||||
/// <summary>Takes an array from the bucket. If the bucket is empty, returns null.</summary>
|
||||
internal T[] Rent()
|
||||
{
|
||||
T[][] buffers = _buffers;
|
||||
T[] buffer = null;
|
||||
|
||||
// While holding the lock, grab whatever is at the next available index and
|
||||
// update the index. We do as little work as possible while holding the spin
|
||||
// lock to minimize contention with other threads. The try/finally is
|
||||
// necessary to properly handle thread aborts on platforms which have them.
|
||||
bool lockTaken = false, allocateBuffer = false;
|
||||
try
|
||||
{
|
||||
_lock.Enter(ref lockTaken);
|
||||
|
||||
if (_index < buffers.Length)
|
||||
{
|
||||
buffer = buffers[_index];
|
||||
buffers[_index++] = null;
|
||||
allocateBuffer = buffer == null;
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (lockTaken) _lock.Exit(false);
|
||||
}
|
||||
|
||||
// While we were holding the lock, we grabbed whatever was at the next available index, if
|
||||
// there was one. If we tried and if we got back null, that means we hadn't yet allocated
|
||||
// for that slot, in which case we should do so now.
|
||||
if (allocateBuffer)
|
||||
{
|
||||
buffer = new T[_bufferLength];
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Attempts to return the buffer to the bucket. If successful, the buffer will be stored
|
||||
/// in the bucket and true will be returned; otherwise, the buffer won't be stored, and false
|
||||
/// will be returned.
|
||||
/// </summary>
|
||||
internal void Return(T[] array)
|
||||
{
|
||||
// Check to see if the buffer is the correct size for this bucket
|
||||
if (array.Length != _bufferLength)
|
||||
{
|
||||
throw new ArgumentException("Buffer not from pool", nameof(array));
|
||||
}
|
||||
|
||||
// While holding the spin lock, if there's room available in the bucket,
|
||||
// put the buffer into the next available slot. Otherwise, we just drop it.
|
||||
// The try/finally is necessary to properly handle thread aborts on platforms
|
||||
// which have them.
|
||||
bool lockTaken = false;
|
||||
try
|
||||
{
|
||||
_lock.Enter(ref lockTaken);
|
||||
|
||||
if (_index != 0)
|
||||
{
|
||||
_buffers[--_index] = array;
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (lockTaken) _lock.Exit(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
38
src/SharpCompress/Buffers/Utilities.cs
Normal file
38
src/SharpCompress/Buffers/Utilities.cs
Normal file
@@ -0,0 +1,38 @@
|
||||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
#if NETCORE
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace SharpCompress.Buffers
|
||||
{
|
||||
internal static class Utilities
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
internal static int SelectBucketIndex(int bufferSize)
|
||||
{
|
||||
Debug.Assert(bufferSize > 0);
|
||||
|
||||
uint bitsRemaining = ((uint)bufferSize - 1) >> 4;
|
||||
|
||||
int poolIndex = 0;
|
||||
if (bitsRemaining > 0xFFFF) { bitsRemaining >>= 16; poolIndex = 16; }
|
||||
if (bitsRemaining > 0xFF) { bitsRemaining >>= 8; poolIndex += 8; }
|
||||
if (bitsRemaining > 0xF) { bitsRemaining >>= 4; poolIndex += 4; }
|
||||
if (bitsRemaining > 0x3) { bitsRemaining >>= 2; poolIndex += 2; }
|
||||
if (bitsRemaining > 0x1) { bitsRemaining >>= 1; poolIndex += 1; }
|
||||
|
||||
return poolIndex + (int)bitsRemaining;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
internal static int GetMaxSizeForBucket(int binIndex)
|
||||
{
|
||||
int maxSize = 16 << binIndex;
|
||||
Debug.Assert(maxSize >= 0);
|
||||
return maxSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,11 +1,11 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
using SharpCompress.Common.Tar;
|
||||
|
||||
namespace SharpCompress.Common.GZip
|
||||
{
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
namespace SharpCompress.Common.Tar.Headers
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal enum EntryType : byte
|
||||
{
|
||||
@@ -14,6 +14,7 @@
|
||||
LongName = (byte)'L',
|
||||
SparseFile = (byte)'S',
|
||||
VolumeHeader = (byte)'V',
|
||||
GlobalExtendedHeader = (byte)'g'
|
||||
GlobalExtendedHeader = (byte)'g',
|
||||
PosixExtendedHeader = (byte)'x'
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
|
||||
@@ -3,7 +3,7 @@ using System.IO;
|
||||
using System.Text;
|
||||
using SharpCompress.Converters;
|
||||
|
||||
namespace SharpCompress.Common.Tar.Headers
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal class TarHeader
|
||||
{
|
||||
@@ -129,11 +129,6 @@ namespace SharpCompress.Common.Tar.Headers
|
||||
Name = namePrefix + "/" + Name;
|
||||
}
|
||||
}
|
||||
if (EntryType != EntryType.LongName
|
||||
&& Name.Length == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
using System.Collections.Generic;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using System.Text;
|
||||
|
||||
@@ -25,28 +25,31 @@ namespace SharpCompress.Common.Tar
|
||||
switch (mode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
header.DataStartPosition = reader.BaseStream.Position;
|
||||
{
|
||||
header.DataStartPosition = reader.BaseStream.Position;
|
||||
|
||||
//skip to nearest 512
|
||||
reader.BaseStream.Position += PadTo512(header.Size);
|
||||
}
|
||||
//skip to nearest 512
|
||||
reader.BaseStream.Position += PadTo512(header.Size);
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
header.PackedStream = new TarReadOnlySubStream(stream, header.Size);
|
||||
}
|
||||
{
|
||||
header.PackedStream = new TarReadOnlySubStream(stream, header.Size);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
catch
|
||||
catch(Exception e)
|
||||
{
|
||||
Console.WriteLine(e);
|
||||
header = null;
|
||||
}
|
||||
|
||||
yield return header;
|
||||
}
|
||||
}
|
||||
@@ -58,6 +61,7 @@ namespace SharpCompress.Common.Tar
|
||||
{
|
||||
return size;
|
||||
}
|
||||
|
||||
return 512 - zeros + size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,9 +28,9 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Converters;
|
||||
using System.Text;
|
||||
using SharpCompress.Common.Tar;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
{
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
<PropertyGroup>
|
||||
<AssemblyTitle>SharpCompress - Pure C# Decompression/Compression</AssemblyTitle>
|
||||
<NeutralLanguage>en-US</NeutralLanguage>
|
||||
<VersionPrefix>0.19.0</VersionPrefix>
|
||||
<AssemblyVersion>0.19.0.0</AssemblyVersion>
|
||||
<FileVersion>0.19.0.0</FileVersion>
|
||||
<VersionPrefix>0.19.2</VersionPrefix>
|
||||
<AssemblyVersion>0.19.2.0</AssemblyVersion>
|
||||
<FileVersion>0.19.2.0</FileVersion>
|
||||
<Authors>Adam Hathcock</Authors>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'==''">net45;net35;netstandard1.0;netstandard1.3;netstandard2.0</TargetFrameworks>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'==''">netstandard2.0</TargetFrameworks>
|
||||
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
|
||||
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
|
||||
<AssemblyName>SharpCompress</AssemblyName>
|
||||
@@ -30,13 +30,4 @@
|
||||
<PropertyGroup Condition=" '$(TargetFramework)' == 'netstandard2.0' ">
|
||||
<DefineConstants>$(DefineConstants);NETCORE</DefineConstants>
|
||||
</PropertyGroup>
|
||||
<ItemGroup Condition=" '$(TargetFramework)' == 'netstandard1.3' ">
|
||||
<PackageReference Include="System.Buffers" Version="4.3.0" />
|
||||
</ItemGroup>
|
||||
<ItemGroup Condition=" '$(TargetFramework)' == 'netstandard2.0' ">
|
||||
<PackageReference Include="System.Buffers" Version="4.4.0" />
|
||||
</ItemGroup>
|
||||
<ItemGroup Condition=" '$(TargetFramework)' == 'net45' ">
|
||||
<PackageReference Include="System.Buffers" Version="4.4.0" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
@@ -1,10 +1,10 @@
|
||||
using System;
|
||||
#if NETCORE || NET45
|
||||
using System.Buffers;
|
||||
#endif
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
#if NETCORE
|
||||
using SharpCompress.Buffers;
|
||||
#endif
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress
|
||||
@@ -141,6 +141,12 @@ namespace SharpCompress
|
||||
|
||||
public static void Skip(this Stream source, long advanceAmount)
|
||||
{
|
||||
if (source.CanSeek)
|
||||
{
|
||||
source.Position += advanceAmount;
|
||||
return;
|
||||
}
|
||||
|
||||
byte[] buffer = GetTransferByteArray();
|
||||
try
|
||||
{
|
||||
@@ -168,7 +174,7 @@ namespace SharpCompress
|
||||
}
|
||||
finally
|
||||
{
|
||||
#if NETCORE || NET45
|
||||
#if NETCORE
|
||||
ArrayPool<byte>.Shared.Return(buffer);
|
||||
#endif
|
||||
}
|
||||
@@ -186,7 +192,7 @@ namespace SharpCompress
|
||||
}
|
||||
finally
|
||||
{
|
||||
#if NETCORE || NET45
|
||||
#if NETCORE
|
||||
ArrayPool<byte>.Shared.Return(buffer);
|
||||
#endif
|
||||
}
|
||||
@@ -267,7 +273,7 @@ namespace SharpCompress
|
||||
}
|
||||
finally
|
||||
{
|
||||
#if NETCORE || NET45
|
||||
#if NETCORE
|
||||
ArrayPool<byte>.Shared.Return(array);
|
||||
#endif
|
||||
}
|
||||
@@ -292,7 +298,7 @@ namespace SharpCompress
|
||||
}
|
||||
finally
|
||||
{
|
||||
#if NETCORE || NET45
|
||||
#if NETCORE
|
||||
ArrayPool<byte>.Shared.Return(array);
|
||||
#endif
|
||||
}
|
||||
@@ -305,7 +311,7 @@ namespace SharpCompress
|
||||
|
||||
private static byte[] GetTransferByteArray()
|
||||
{
|
||||
#if NETCORE || NET45
|
||||
#if NETCORE
|
||||
return ArrayPool<byte>.Shared.Rent(81920);
|
||||
#else
|
||||
return new byte[81920];
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Common.Tar;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.BZip2;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
@@ -11,9 +11,13 @@ namespace SharpCompress.Writers.Tar
|
||||
{
|
||||
public class TarWriter : AbstractWriter
|
||||
{
|
||||
public TarWriter(Stream destination, WriterOptions options)
|
||||
private bool finalizeArchiveOnClose;
|
||||
|
||||
public TarWriter(Stream destination, TarWriterOptions options)
|
||||
: base(ArchiveType.Tar, options)
|
||||
{
|
||||
finalizeArchiveOnClose = options.FinalizeArchiveOnClose;
|
||||
|
||||
if (!destination.CanWrite)
|
||||
{
|
||||
throw new ArgumentException("Tars require writable streams.");
|
||||
@@ -97,8 +101,10 @@ namespace SharpCompress.Writers.Tar
|
||||
{
|
||||
if (isDisposing)
|
||||
{
|
||||
PadTo512(0, true);
|
||||
PadTo512(0, true);
|
||||
if (finalizeArchiveOnClose) {
|
||||
PadTo512(0, true);
|
||||
PadTo512(0, true);
|
||||
}
|
||||
switch (OutputStream)
|
||||
{
|
||||
case BZip2Stream b:
|
||||
|
||||
23
src/SharpCompress/Writers/Tar/TarWriterOptions.cs
Executable file
23
src/SharpCompress/Writers/Tar/TarWriterOptions.cs
Executable file
@@ -0,0 +1,23 @@
|
||||
using SharpCompress.Archives;
|
||||
using SharpCompress.Common;
|
||||
|
||||
namespace SharpCompress.Writers.Tar
|
||||
{
|
||||
public class TarWriterOptions : WriterOptions
|
||||
{
|
||||
/// <summary>
|
||||
/// Indicates if archive should be finalized (by 2 empty blocks) on close.
|
||||
/// </summary>
|
||||
public bool FinalizeArchiveOnClose { get; }
|
||||
|
||||
public TarWriterOptions(CompressionType compressionType, bool finalizeArchiveOnClose)
|
||||
: base(compressionType)
|
||||
{
|
||||
FinalizeArchiveOnClose = finalizeArchiveOnClose;
|
||||
}
|
||||
|
||||
internal TarWriterOptions(WriterOptions options) : this(options.CompressionType, true)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,7 +27,7 @@ namespace SharpCompress.Writers
|
||||
}
|
||||
case ArchiveType.Tar:
|
||||
{
|
||||
return new TarWriter(stream, writerOptions);
|
||||
return new TarWriter(stream, new TarWriterOptions(writerOptions));
|
||||
}
|
||||
default:
|
||||
{
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<TargetFrameworks>netcoreapp1.1;netcoreapp2.0</TargetFrameworks>
|
||||
<TargetFrameworks>netcoreapp2.0</TargetFrameworks>
|
||||
<AssemblyName>SharpCompress.Test</AssemblyName>
|
||||
<AssemblyOriginatorKeyFile>../../SharpCompress.snk</AssemblyOriginatorKeyFile>
|
||||
<SignAssembly>true</SignAssembly>
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
using SharpCompress.Common;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Writers.Tar;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test.Tar
|
||||
@@ -34,5 +36,22 @@ namespace SharpCompress.Test.Tar
|
||||
{
|
||||
Assert.Throws<InvalidFormatException>(() => Write(CompressionType.Rar, "Zip.ppmd.noEmptyDirs.zip", "Zip.ppmd.noEmptyDirs.zip"));
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(true)]
|
||||
[InlineData(false)]
|
||||
public void Tar_Finalize_Archive(bool finalizeArchive)
|
||||
{
|
||||
using (MemoryStream stream = new MemoryStream())
|
||||
using (Stream content = File.OpenRead(Path.Combine(ORIGINAL_FILES_PATH, "jpg", "test.jpg"))) {
|
||||
using (TarWriter writer = new TarWriter(stream, new TarWriterOptions(CompressionType.None, finalizeArchive))) {
|
||||
writer.Write("doesn't matter", content, null);
|
||||
}
|
||||
|
||||
var paddedContentWithHeader = content.Length / 512 * 512 + 512 + 512;
|
||||
var expectedStreamLength = finalizeArchive ? paddedContentWithHeader + 512 * 2 : paddedContentWithHeader;
|
||||
Assert.Equal(expectedStreamLength, stream.Length);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Binary file not shown.
Reference in New Issue
Block a user