mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-05 21:23:57 +00:00
Compare commits
34 Commits
native_zli
...
tar_redux
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03618a704b | ||
|
|
925842bc4b | ||
|
|
cead62704e | ||
|
|
cce97548a2 | ||
|
|
264aa6d366 | ||
|
|
a361d41e68 | ||
|
|
38766dac99 | ||
|
|
c30bc65281 | ||
|
|
296ebd942a | ||
|
|
afa19f7ad8 | ||
|
|
a193b2d3b1 | ||
|
|
be4a65e572 | ||
|
|
6832918e71 | ||
|
|
fd9a3ffbcc | ||
|
|
41added690 | ||
|
|
18641d4f9b | ||
|
|
4d0c5099d4 | ||
|
|
9d9d491245 | ||
|
|
7b81d18071 | ||
|
|
7d0acbc988 | ||
|
|
313c044c41 | ||
|
|
f6f8adf97e | ||
|
|
bc97d325ca | ||
|
|
0f2d325f20 | ||
|
|
63d5503e12 | ||
|
|
e53f2cac4a | ||
|
|
3b73464233 | ||
|
|
575f10f766 | ||
|
|
8d3fc3533b | ||
|
|
60370b8539 | ||
|
|
f6db114865 | ||
|
|
1c6c344b6b | ||
|
|
d0302898e0 | ||
|
|
ba12019bc7 |
10
.travis.yml
Normal file
10
.travis.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
dist: trusty
|
||||
language: csharp
|
||||
solution: SharpCompress.sln
|
||||
matrix:
|
||||
include:
|
||||
- dotnet: 1.0.4
|
||||
mono: none
|
||||
env: DOTNETCORE=1
|
||||
script:
|
||||
- ./build.sh
|
||||
21
README.md
21
README.md
@@ -1,11 +1,15 @@
|
||||
# SharpCompress
|
||||
|
||||
SharpCompress is a compression library for .NET/Mono/Silverlight/WP7 that can unrar, un7zip, unzip, untar unbzip2 and ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip are implemented.
|
||||
SharpCompress is a compression library in pure C# for .NET 3.5, 4.5, .NET Standard 1.0, 1.3 that can unrar, un7zip, unzip, untar unbzip2 and ungzip with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip are implemented.
|
||||
|
||||
The major feature is support for non-seekable streams so large files can be processed on the fly (i.e. download stream).
|
||||
|
||||
AppVeyor Build -
|
||||
[](https://ci.appveyor.com/project/adamhathcock/sharpcompress/branch/master)
|
||||
|
||||
Travis CI Build -
|
||||
[](https://travis-ci.org/adamhathcock/sharpcompress)
|
||||
|
||||
## Need Help?
|
||||
Post Issues on Github!
|
||||
|
||||
@@ -27,10 +31,23 @@ I'm always looking for help or ideas. Please submit code or email with ideas. Un
|
||||
* 7Zip writing
|
||||
* Zip64 (Need writing and extend Reading)
|
||||
* Multi-volume Zip support.
|
||||
* RAR5 support
|
||||
|
||||
## Version Log
|
||||
|
||||
### Version 0.16.1
|
||||
|
||||
* Fix [Preserve compression method when getting a compressed stream](https://github.com/adamhathcock/sharpcompress/pull/235)
|
||||
* Fix [RAR entry key normalization fix](https://github.com/adamhathcock/sharpcompress/issues/201)
|
||||
|
||||
### Version 0.16.0
|
||||
|
||||
* Breaking - [Progress Event Tracking rethink](https://github.com/adamhathcock/sharpcompress/pull/226)
|
||||
* Update to VS2017 - [VS2017](https://github.com/adamhathcock/sharpcompress/pull/231) - Framework targets have been changed.
|
||||
* New - [Add Zip64 writing](https://github.com/adamhathcock/sharpcompress/pull/211)
|
||||
* [Fix invalid/mismatching Zip version flags.](https://github.com/adamhathcock/sharpcompress/issues/164) - This allows nuget/System.IO.Packaging to read zip files generated by SharpCompress
|
||||
* [Fix 7Zip directory hiding](https://github.com/adamhathcock/sharpcompress/pull/215/files)
|
||||
* [Verify RAR CRC headers](https://github.com/adamhathcock/sharpcompress/pull/220)
|
||||
|
||||
### Version 0.15.2
|
||||
|
||||
* [Fix invalid headers](https://github.com/adamhathcock/sharpcompress/pull/210) - fixes an issue creating large-ish zip archives that was introduced with zip64 reading.
|
||||
|
||||
14
appveyor.yml
14
appveyor.yml
@@ -11,18 +11,10 @@ branches:
|
||||
nuget:
|
||||
disable_publish_on_pr: true
|
||||
|
||||
before_build:
|
||||
- cmd: dotnet restore
|
||||
build_script:
|
||||
- ps: .\build.ps1
|
||||
|
||||
build:
|
||||
parallel: true
|
||||
verbosity: minimal
|
||||
|
||||
after_build:
|
||||
- dotnet pack "src\SharpCompress\SharpCompress.csproj" -c Release
|
||||
|
||||
test_script:
|
||||
- dotnet test --no-build .\tests\SharpCompress.Test\SharpCompress.Test.csproj
|
||||
test: off
|
||||
|
||||
artifacts:
|
||||
- path: src\SharpCompress\bin\Release\*.nupkg
|
||||
93
build.cake
Normal file
93
build.cake
Normal file
@@ -0,0 +1,93 @@
|
||||
var target = Argument("target", "Default");
|
||||
var tag = Argument("tag", "cake");
|
||||
|
||||
Task("Restore")
|
||||
.Does(() =>
|
||||
{
|
||||
DotNetCoreRestore(".");
|
||||
});
|
||||
|
||||
Task("Build")
|
||||
.IsDependentOn("Restore")
|
||||
.Does(() =>
|
||||
{
|
||||
if (IsRunningOnWindows())
|
||||
{
|
||||
MSBuild("./sharpcompress.sln", c =>
|
||||
{
|
||||
c.SetConfiguration("Release")
|
||||
.SetVerbosity(Verbosity.Minimal)
|
||||
.UseToolVersion(MSBuildToolVersion.VS2017);
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
var settings = new DotNetCoreBuildSettings
|
||||
{
|
||||
Framework = "netstandard1.0",
|
||||
Configuration = "Release"
|
||||
};
|
||||
|
||||
DotNetCoreBuild("./src/SharpCompress/SharpCompress.csproj", settings);
|
||||
|
||||
settings.Framework = "netcoreapp1.1";
|
||||
DotNetCoreBuild("./tests/SharpCompress.Test/SharpCompress.Test.csproj", settings);
|
||||
}
|
||||
});
|
||||
|
||||
Task("Test")
|
||||
.IsDependentOn("Build")
|
||||
.Does(() =>
|
||||
{
|
||||
if (!bool.Parse(EnvironmentVariable("APPVEYOR") ?? "false")
|
||||
&& !bool.Parse(EnvironmentVariable("TRAVIS") ?? "false"))
|
||||
{
|
||||
var files = GetFiles("tests/**/*.csproj");
|
||||
foreach(var file in files)
|
||||
{
|
||||
var settings = new DotNetCoreTestSettings
|
||||
{
|
||||
Configuration = "Release"
|
||||
};
|
||||
|
||||
DotNetCoreTest(file.ToString(), settings);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Information("Skipping tests as this is AppVeyor or Travis CI");
|
||||
}
|
||||
});
|
||||
|
||||
Task("Pack")
|
||||
.IsDependentOn("Build")
|
||||
.Does(() =>
|
||||
{
|
||||
if (IsRunningOnWindows())
|
||||
{
|
||||
MSBuild("src/SharpCompress/SharpCompress.csproj", c => c
|
||||
.SetConfiguration("Release")
|
||||
.SetVerbosity(Verbosity.Minimal)
|
||||
.UseToolVersion(MSBuildToolVersion.VS2017)
|
||||
.WithProperty("NoBuild", "true")
|
||||
.WithTarget("Pack"));
|
||||
}
|
||||
else
|
||||
{
|
||||
Information("Skipping Pack as this is not Windows");
|
||||
}
|
||||
});
|
||||
|
||||
Task("Default")
|
||||
.IsDependentOn("Restore")
|
||||
.IsDependentOn("Build")
|
||||
.IsDependentOn("Test")
|
||||
.IsDependentOn("Pack");
|
||||
|
||||
Task("RunTests")
|
||||
.IsDependentOn("Restore")
|
||||
.IsDependentOn("Build")
|
||||
.IsDependentOn("Test");
|
||||
|
||||
|
||||
RunTarget(target);
|
||||
228
build.ps1
Normal file
228
build.ps1
Normal file
@@ -0,0 +1,228 @@
|
||||
##########################################################################
|
||||
# This is the Cake bootstrapper script for PowerShell.
|
||||
# This file was downloaded from https://github.com/cake-build/resources
|
||||
# Feel free to change this file to fit your needs.
|
||||
##########################################################################
|
||||
|
||||
<#
|
||||
|
||||
.SYNOPSIS
|
||||
This is a Powershell script to bootstrap a Cake build.
|
||||
|
||||
.DESCRIPTION
|
||||
This Powershell script will download NuGet if missing, restore NuGet tools (including Cake)
|
||||
and execute your Cake build script with the parameters you provide.
|
||||
|
||||
.PARAMETER Script
|
||||
The build script to execute.
|
||||
.PARAMETER Target
|
||||
The build script target to run.
|
||||
.PARAMETER Configuration
|
||||
The build configuration to use.
|
||||
.PARAMETER Verbosity
|
||||
Specifies the amount of information to be displayed.
|
||||
.PARAMETER Experimental
|
||||
Tells Cake to use the latest Roslyn release.
|
||||
.PARAMETER WhatIf
|
||||
Performs a dry run of the build script.
|
||||
No tasks will be executed.
|
||||
.PARAMETER Mono
|
||||
Tells Cake to use the Mono scripting engine.
|
||||
.PARAMETER SkipToolPackageRestore
|
||||
Skips restoring of packages.
|
||||
.PARAMETER ScriptArgs
|
||||
Remaining arguments are added here.
|
||||
|
||||
.LINK
|
||||
http://cakebuild.net
|
||||
|
||||
#>
|
||||
|
||||
[CmdletBinding()]
|
||||
Param(
|
||||
[string]$Script = "build.cake",
|
||||
[string]$Target = "Default",
|
||||
[ValidateSet("Release", "Debug")]
|
||||
[string]$Configuration = "Release",
|
||||
[ValidateSet("Quiet", "Minimal", "Normal", "Verbose", "Diagnostic")]
|
||||
[string]$Verbosity = "Verbose",
|
||||
[switch]$Experimental,
|
||||
[Alias("DryRun","Noop")]
|
||||
[switch]$WhatIf,
|
||||
[switch]$Mono,
|
||||
[switch]$SkipToolPackageRestore,
|
||||
[Parameter(Position=0,Mandatory=$false,ValueFromRemainingArguments=$true)]
|
||||
[string[]]$ScriptArgs
|
||||
)
|
||||
|
||||
[Reflection.Assembly]::LoadWithPartialName("System.Security") | Out-Null
|
||||
function MD5HashFile([string] $filePath)
|
||||
{
|
||||
if ([string]::IsNullOrEmpty($filePath) -or !(Test-Path $filePath -PathType Leaf))
|
||||
{
|
||||
return $null
|
||||
}
|
||||
|
||||
[System.IO.Stream] $file = $null;
|
||||
[System.Security.Cryptography.MD5] $md5 = $null;
|
||||
try
|
||||
{
|
||||
$md5 = [System.Security.Cryptography.MD5]::Create()
|
||||
$file = [System.IO.File]::OpenRead($filePath)
|
||||
return [System.BitConverter]::ToString($md5.ComputeHash($file))
|
||||
}
|
||||
finally
|
||||
{
|
||||
if ($file -ne $null)
|
||||
{
|
||||
$file.Dispose()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Write-Host "Preparing to run build script..."
|
||||
|
||||
if(!$PSScriptRoot){
|
||||
$PSScriptRoot = Split-Path $MyInvocation.MyCommand.Path -Parent
|
||||
}
|
||||
|
||||
$TOOLS_DIR = Join-Path $PSScriptRoot "tools"
|
||||
$ADDINS_DIR = Join-Path $TOOLS_DIR "addins"
|
||||
$MODULES_DIR = Join-Path $TOOLS_DIR "modules"
|
||||
$NUGET_EXE = Join-Path $TOOLS_DIR "nuget.exe"
|
||||
$CAKE_EXE = Join-Path $TOOLS_DIR "Cake/Cake.exe"
|
||||
$NUGET_URL = "https://dist.nuget.org/win-x86-commandline/latest/nuget.exe"
|
||||
$PACKAGES_CONFIG = Join-Path $TOOLS_DIR "packages.config"
|
||||
$PACKAGES_CONFIG_MD5 = Join-Path $TOOLS_DIR "packages.config.md5sum"
|
||||
$ADDINS_PACKAGES_CONFIG = Join-Path $ADDINS_DIR "packages.config"
|
||||
$MODULES_PACKAGES_CONFIG = Join-Path $MODULES_DIR "packages.config"
|
||||
|
||||
# Should we use mono?
|
||||
$UseMono = "";
|
||||
if($Mono.IsPresent) {
|
||||
Write-Verbose -Message "Using the Mono based scripting engine."
|
||||
$UseMono = "-mono"
|
||||
}
|
||||
|
||||
# Should we use the new Roslyn?
|
||||
$UseExperimental = "";
|
||||
if($Experimental.IsPresent -and !($Mono.IsPresent)) {
|
||||
Write-Verbose -Message "Using experimental version of Roslyn."
|
||||
$UseExperimental = "-experimental"
|
||||
}
|
||||
|
||||
# Is this a dry run?
|
||||
$UseDryRun = "";
|
||||
if($WhatIf.IsPresent) {
|
||||
$UseDryRun = "-dryrun"
|
||||
}
|
||||
|
||||
# Make sure tools folder exists
|
||||
if ((Test-Path $PSScriptRoot) -and !(Test-Path $TOOLS_DIR)) {
|
||||
Write-Verbose -Message "Creating tools directory..."
|
||||
New-Item -Path $TOOLS_DIR -Type directory | out-null
|
||||
}
|
||||
|
||||
# Make sure that packages.config exist.
|
||||
if (!(Test-Path $PACKAGES_CONFIG)) {
|
||||
Write-Verbose -Message "Downloading packages.config..."
|
||||
try { (New-Object System.Net.WebClient).DownloadFile("http://cakebuild.net/download/bootstrapper/packages", $PACKAGES_CONFIG) } catch {
|
||||
Throw "Could not download packages.config."
|
||||
}
|
||||
}
|
||||
|
||||
# Try find NuGet.exe in path if not exists
|
||||
if (!(Test-Path $NUGET_EXE)) {
|
||||
Write-Verbose -Message "Trying to find nuget.exe in PATH..."
|
||||
$existingPaths = $Env:Path -Split ';' | Where-Object { (![string]::IsNullOrEmpty($_)) -and (Test-Path $_ -PathType Container) }
|
||||
$NUGET_EXE_IN_PATH = Get-ChildItem -Path $existingPaths -Filter "nuget.exe" | Select -First 1
|
||||
if ($NUGET_EXE_IN_PATH -ne $null -and (Test-Path $NUGET_EXE_IN_PATH.FullName)) {
|
||||
Write-Verbose -Message "Found in PATH at $($NUGET_EXE_IN_PATH.FullName)."
|
||||
$NUGET_EXE = $NUGET_EXE_IN_PATH.FullName
|
||||
}
|
||||
}
|
||||
|
||||
# Try download NuGet.exe if not exists
|
||||
if (!(Test-Path $NUGET_EXE)) {
|
||||
Write-Verbose -Message "Downloading NuGet.exe..."
|
||||
try {
|
||||
(New-Object System.Net.WebClient).DownloadFile($NUGET_URL, $NUGET_EXE)
|
||||
} catch {
|
||||
Throw "Could not download NuGet.exe."
|
||||
}
|
||||
}
|
||||
|
||||
# Save nuget.exe path to environment to be available to child processed
|
||||
$ENV:NUGET_EXE = $NUGET_EXE
|
||||
|
||||
# Restore tools from NuGet?
|
||||
if(-Not $SkipToolPackageRestore.IsPresent) {
|
||||
Push-Location
|
||||
Set-Location $TOOLS_DIR
|
||||
|
||||
# Check for changes in packages.config and remove installed tools if true.
|
||||
[string] $md5Hash = MD5HashFile($PACKAGES_CONFIG)
|
||||
if((!(Test-Path $PACKAGES_CONFIG_MD5)) -Or
|
||||
($md5Hash -ne (Get-Content $PACKAGES_CONFIG_MD5 ))) {
|
||||
Write-Verbose -Message "Missing or changed package.config hash..."
|
||||
Remove-Item * -Recurse -Exclude packages.config,nuget.exe
|
||||
}
|
||||
|
||||
Write-Verbose -Message "Restoring tools from NuGet..."
|
||||
$NuGetOutput = Invoke-Expression "&`"$NUGET_EXE`" install -ExcludeVersion -OutputDirectory `"$TOOLS_DIR`""
|
||||
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Throw "An error occured while restoring NuGet tools."
|
||||
}
|
||||
else
|
||||
{
|
||||
$md5Hash | Out-File $PACKAGES_CONFIG_MD5 -Encoding "ASCII"
|
||||
}
|
||||
Write-Verbose -Message ($NuGetOutput | out-string)
|
||||
|
||||
Pop-Location
|
||||
}
|
||||
|
||||
# Restore addins from NuGet
|
||||
if (Test-Path $ADDINS_PACKAGES_CONFIG) {
|
||||
Push-Location
|
||||
Set-Location $ADDINS_DIR
|
||||
|
||||
Write-Verbose -Message "Restoring addins from NuGet..."
|
||||
$NuGetOutput = Invoke-Expression "&`"$NUGET_EXE`" install -ExcludeVersion -OutputDirectory `"$ADDINS_DIR`""
|
||||
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Throw "An error occured while restoring NuGet addins."
|
||||
}
|
||||
|
||||
Write-Verbose -Message ($NuGetOutput | out-string)
|
||||
|
||||
Pop-Location
|
||||
}
|
||||
|
||||
# Restore modules from NuGet
|
||||
if (Test-Path $MODULES_PACKAGES_CONFIG) {
|
||||
Push-Location
|
||||
Set-Location $MODULES_DIR
|
||||
|
||||
Write-Verbose -Message "Restoring modules from NuGet..."
|
||||
$NuGetOutput = Invoke-Expression "&`"$NUGET_EXE`" install -ExcludeVersion -OutputDirectory `"$MODULES_DIR`""
|
||||
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Throw "An error occured while restoring NuGet modules."
|
||||
}
|
||||
|
||||
Write-Verbose -Message ($NuGetOutput | out-string)
|
||||
|
||||
Pop-Location
|
||||
}
|
||||
|
||||
# Make sure that Cake has been installed.
|
||||
if (!(Test-Path $CAKE_EXE)) {
|
||||
Throw "Could not find Cake.exe at $CAKE_EXE"
|
||||
}
|
||||
|
||||
# Start Cake
|
||||
Write-Host "Running build script..."
|
||||
Invoke-Expression "& `"$CAKE_EXE`" `"$Script`" -target=`"$Target`" -configuration=`"$Configuration`" -verbosity=`"$Verbosity`" $UseMono $UseDryRun $UseExperimental $ScriptArgs"
|
||||
exit $LASTEXITCODE
|
||||
42
build.sh
Executable file
42
build.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
##########################################################################
|
||||
# This is the Cake bootstrapper script for Linux and OS X.
|
||||
# This file was downloaded from https://github.com/cake-build/resources
|
||||
# Feel free to change this file to fit your needs.
|
||||
##########################################################################
|
||||
|
||||
# Define directories.
|
||||
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
TOOLS_DIR=$SCRIPT_DIR/tools
|
||||
CAKE_VERSION=0.19.1
|
||||
CAKE_DLL=$TOOLS_DIR/Cake.CoreCLR.$CAKE_VERSION/Cake.dll
|
||||
|
||||
# Make sure the tools folder exist.
|
||||
if [ ! -d "$TOOLS_DIR" ]; then
|
||||
mkdir "$TOOLS_DIR"
|
||||
fi
|
||||
|
||||
###########################################################################
|
||||
# INSTALL CAKE
|
||||
###########################################################################
|
||||
|
||||
if [ ! -f "$CAKE_DLL" ]; then
|
||||
curl -Lsfo Cake.CoreCLR.zip "https://www.nuget.org/api/v2/package/Cake.CoreCLR/$CAKE_VERSION" && unzip -q Cake.CoreCLR.zip -d "$TOOLS_DIR/Cake.CoreCLR.$CAKE_VERSION" && rm -f Cake.CoreCLR.zip
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "An error occured while installing Cake."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Make sure that Cake has been installed.
|
||||
if [ ! -f "$CAKE_DLL" ]; then
|
||||
echo "Could not find Cake.exe at '$CAKE_DLL'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
###########################################################################
|
||||
# RUN BUILD SCRIPT
|
||||
###########################################################################
|
||||
|
||||
# Start Cake
|
||||
exec dotnet "$CAKE_DLL" "$@"
|
||||
@@ -4,7 +4,6 @@ using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
using SharpCompress.Readers;
|
||||
using SharpCompress.Readers.Tar;
|
||||
@@ -74,9 +73,9 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
try
|
||||
{
|
||||
TarHeader tar = new TarHeader();
|
||||
tar.Read(new BinaryReader(stream));
|
||||
return tar.Name.Length > 0 && Enum.IsDefined(typeof(EntryType), tar.EntryType);
|
||||
var input = new TarInputStream(stream);
|
||||
var header = input.GetNextEntry();
|
||||
return header.Name.Length > 0;
|
||||
}
|
||||
catch
|
||||
{
|
||||
@@ -131,7 +130,7 @@ namespace SharpCompress.Archives.Tar
|
||||
{
|
||||
if (header != null)
|
||||
{
|
||||
if (header.EntryType == EntryType.LongName)
|
||||
if (header.TypeFlag == TarHeader.LF_GNU_LONGNAME)
|
||||
{
|
||||
previousHeader = header;
|
||||
}
|
||||
|
||||
@@ -9,6 +9,6 @@ namespace SharpCompress.Common
|
||||
Item = entry;
|
||||
}
|
||||
|
||||
public T Item { get; private set; }
|
||||
public T Item { get; }
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Converters;
|
||||
|
||||
@@ -165,25 +165,13 @@ namespace SharpCompress.Common.Rar.Headers
|
||||
#if NO_FILE
|
||||
return path.Replace('\\', '/');
|
||||
#else
|
||||
switch (os)
|
||||
if (Path.DirectorySeparatorChar == '/')
|
||||
{
|
||||
case HostOS.MacOS:
|
||||
case HostOS.Unix:
|
||||
{
|
||||
if (Path.DirectorySeparatorChar == '\\')
|
||||
{
|
||||
return path.Replace('/', '\\');
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
if (Path.DirectorySeparatorChar == '/')
|
||||
{
|
||||
return path.Replace('\\', '/');
|
||||
}
|
||||
}
|
||||
break;
|
||||
return path.Replace('\\', '/');
|
||||
}
|
||||
else if (Path.DirectorySeparatorChar == '\\')
|
||||
{
|
||||
return path.Replace('/', '\\');
|
||||
}
|
||||
return path;
|
||||
#endif
|
||||
|
||||
@@ -14,9 +14,9 @@ namespace SharpCompress.Common.Rar
|
||||
FileHeader = fh;
|
||||
}
|
||||
|
||||
internal MarkHeader MarkHeader { get; private set; }
|
||||
internal MarkHeader MarkHeader { get; }
|
||||
|
||||
internal FileHeader FileHeader { get; private set; }
|
||||
internal FileHeader FileHeader { get; }
|
||||
|
||||
internal override Stream GetRawStream()
|
||||
{
|
||||
|
||||
@@ -11,7 +11,7 @@ namespace SharpCompress.Common
|
||||
ReaderProgress = readerProgress;
|
||||
}
|
||||
|
||||
public T Item { get; private set; }
|
||||
public ReaderProgress ReaderProgress { get; private set; }
|
||||
public T Item { get; }
|
||||
public ReaderProgress ReaderProgress { get; }
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
internal enum EntryType : byte
|
||||
{
|
||||
File = 0,
|
||||
OldFile = (byte)'0',
|
||||
HardLink = (byte)'1',
|
||||
SymLink = (byte)'2',
|
||||
CharDevice = (byte)'3',
|
||||
BlockDevice = (byte)'4',
|
||||
Directory = (byte)'5',
|
||||
Fifo = (byte)'6',
|
||||
LongLink = (byte)'K',
|
||||
LongName = (byte)'L',
|
||||
SparseFile = (byte)'S',
|
||||
VolumeHeader = (byte)'V',
|
||||
GlobalExtendedHeader = (byte)'g'
|
||||
}
|
||||
}
|
||||
@@ -1,269 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using SharpCompress.Converters;
|
||||
|
||||
namespace SharpCompress.Common.Tar.Headers
|
||||
{
|
||||
internal class TarHeader
|
||||
{
|
||||
internal static readonly DateTime Epoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
internal string Name { get; set; }
|
||||
|
||||
//internal int Mode { get; set; }
|
||||
//internal int UserId { get; set; }
|
||||
//internal string UserName { get; set; }
|
||||
//internal int GroupId { get; set; }
|
||||
//internal string GroupName { get; set; }
|
||||
internal long Size { get; set; }
|
||||
internal DateTime LastModifiedTime { get; set; }
|
||||
internal EntryType EntryType { get; set; }
|
||||
internal Stream PackedStream { get; set; }
|
||||
|
||||
internal const int BlockSize = 512;
|
||||
|
||||
internal void Write(Stream output)
|
||||
{
|
||||
byte[] buffer = new byte[BlockSize];
|
||||
|
||||
WriteOctalBytes(511, buffer, 100, 8); // file mode
|
||||
WriteOctalBytes(0, buffer, 108, 8); // owner ID
|
||||
WriteOctalBytes(0, buffer, 116, 8); // group ID
|
||||
|
||||
//Encoding.UTF8.GetBytes("magic").CopyTo(buffer, 257);
|
||||
if (Name.Length > 100)
|
||||
{
|
||||
// Set mock filename and filetype to indicate the next block is the actual name of the file
|
||||
WriteStringBytes("././@LongLink", buffer, 0, 100);
|
||||
buffer[156] = (byte)EntryType.LongName;
|
||||
WriteOctalBytes(Name.Length + 1, buffer, 124, 12);
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteStringBytes(Name, buffer, 0, 100);
|
||||
WriteOctalBytes(Size, buffer, 124, 12);
|
||||
var time = (long)(LastModifiedTime.ToUniversalTime() - Epoch).TotalSeconds;
|
||||
WriteOctalBytes(time, buffer, 136, 12);
|
||||
buffer[156] = (byte)EntryType;
|
||||
|
||||
if (Size >= 0x1FFFFFFFF)
|
||||
{
|
||||
byte[] bytes = DataConverter.BigEndian.GetBytes(Size);
|
||||
var bytes12 = new byte[12];
|
||||
bytes.CopyTo(bytes12, 12 - bytes.Length);
|
||||
bytes12[0] |= 0x80;
|
||||
bytes12.CopyTo(buffer, 124);
|
||||
}
|
||||
}
|
||||
|
||||
int crc = RecalculateChecksum(buffer);
|
||||
WriteOctalBytes(crc, buffer, 148, 8);
|
||||
|
||||
output.Write(buffer, 0, buffer.Length);
|
||||
|
||||
if (Name.Length > 100)
|
||||
{
|
||||
WriteLongFilenameHeader(output);
|
||||
Name = Name.Substring(0, 100);
|
||||
Write(output);
|
||||
}
|
||||
}
|
||||
|
||||
private void WriteLongFilenameHeader(Stream output)
|
||||
{
|
||||
byte[] nameBytes = ArchiveEncoding.Default.GetBytes(Name);
|
||||
output.Write(nameBytes, 0, nameBytes.Length);
|
||||
|
||||
// pad to multiple of BlockSize bytes, and make sure a terminating null is added
|
||||
int numPaddingBytes = BlockSize - (nameBytes.Length % BlockSize);
|
||||
if (numPaddingBytes == 0)
|
||||
{
|
||||
numPaddingBytes = BlockSize;
|
||||
}
|
||||
output.Write(new byte[numPaddingBytes], 0, numPaddingBytes);
|
||||
}
|
||||
|
||||
internal bool Read(BinaryReader reader)
|
||||
{
|
||||
var buffer = ReadBlock(reader);
|
||||
if (buffer.Length == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ReadEntryType(buffer) == EntryType.LongName)
|
||||
{
|
||||
Name = ReadLongName(reader, buffer);
|
||||
buffer = ReadBlock(reader);
|
||||
}
|
||||
else
|
||||
{
|
||||
Name = ArchiveEncoding.Default.GetString(buffer, 0, 100).TrimNulls();
|
||||
}
|
||||
|
||||
EntryType = ReadEntryType(buffer);
|
||||
Size = ReadSize(buffer);
|
||||
|
||||
//Mode = ReadASCIIInt32Base8(buffer, 100, 7);
|
||||
//UserId = ReadASCIIInt32Base8(buffer, 108, 7);
|
||||
//GroupId = ReadASCIIInt32Base8(buffer, 116, 7);
|
||||
long unixTimeStamp = ReadASCIIInt64Base8(buffer, 136, 11);
|
||||
LastModifiedTime = Epoch.AddSeconds(unixTimeStamp).ToLocalTime();
|
||||
|
||||
Magic = ArchiveEncoding.Default.GetString(buffer, 257, 6).TrimNulls();
|
||||
|
||||
if (!string.IsNullOrEmpty(Magic)
|
||||
&& "ustar".Equals(Magic))
|
||||
{
|
||||
string namePrefix = ArchiveEncoding.Default.GetString(buffer, 345, 157);
|
||||
namePrefix = namePrefix.TrimNulls();
|
||||
if (!string.IsNullOrEmpty(namePrefix))
|
||||
{
|
||||
Name = namePrefix + "/" + Name;
|
||||
}
|
||||
}
|
||||
if (EntryType != EntryType.LongName
|
||||
&& Name.Length == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private string ReadLongName(BinaryReader reader, byte[] buffer)
|
||||
{
|
||||
var size = ReadSize(buffer);
|
||||
var nameLength = (int)size;
|
||||
var nameBytes = reader.ReadBytes(nameLength);
|
||||
var remainingBytesToRead = BlockSize - (nameLength % BlockSize);
|
||||
|
||||
// Read the rest of the block and discard the data
|
||||
if (remainingBytesToRead < BlockSize)
|
||||
{
|
||||
reader.ReadBytes(remainingBytesToRead);
|
||||
}
|
||||
return ArchiveEncoding.Default.GetString(nameBytes, 0, nameBytes.Length).TrimNulls();
|
||||
}
|
||||
|
||||
private static EntryType ReadEntryType(byte[] buffer)
|
||||
{
|
||||
return (EntryType)buffer[156];
|
||||
}
|
||||
|
||||
private long ReadSize(byte[] buffer)
|
||||
{
|
||||
if ((buffer[124] & 0x80) == 0x80) // if size in binary
|
||||
{
|
||||
return DataConverter.BigEndian.GetInt64(buffer, 0x80);
|
||||
}
|
||||
return ReadASCIIInt64Base8(buffer, 124, 11);
|
||||
}
|
||||
|
||||
private static byte[] ReadBlock(BinaryReader reader)
|
||||
{
|
||||
byte[] buffer = reader.ReadBytes(BlockSize);
|
||||
|
||||
if (buffer.Length != 0 && buffer.Length < BlockSize)
|
||||
{
|
||||
throw new InvalidOperationException("Buffer is invalid size");
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
private static void WriteStringBytes(string name, byte[] buffer, int offset, int length)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < length - 1 && i < name.Length; ++i)
|
||||
{
|
||||
buffer[offset + i] = (byte)name[i];
|
||||
}
|
||||
|
||||
for (; i < length; ++i)
|
||||
{
|
||||
buffer[offset + i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
private static void WriteOctalBytes(long value, byte[] buffer, int offset, int length)
|
||||
{
|
||||
string val = Convert.ToString(value, 8);
|
||||
int shift = length - val.Length - 1;
|
||||
for (int i = 0; i < shift; i++)
|
||||
{
|
||||
buffer[offset + i] = (byte)' ';
|
||||
}
|
||||
for (int i = 0; i < val.Length; i++)
|
||||
{
|
||||
buffer[offset + i + shift] = (byte)val[i];
|
||||
}
|
||||
}
|
||||
|
||||
private static int ReadASCIIInt32Base8(byte[] buffer, int offset, int count)
|
||||
{
|
||||
string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls();
|
||||
if (string.IsNullOrEmpty(s))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return Convert.ToInt32(s, 8);
|
||||
}
|
||||
|
||||
private static long ReadASCIIInt64Base8(byte[] buffer, int offset, int count)
|
||||
{
|
||||
string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls();
|
||||
if (string.IsNullOrEmpty(s))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return Convert.ToInt64(s, 8);
|
||||
}
|
||||
|
||||
private static long ReadASCIIInt64(byte[] buffer, int offset, int count)
|
||||
{
|
||||
string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls();
|
||||
if (string.IsNullOrEmpty(s))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
return Convert.ToInt64(s);
|
||||
}
|
||||
|
||||
internal static int RecalculateChecksum(byte[] buf)
|
||||
{
|
||||
// Set default value for checksum. That is 8 spaces.
|
||||
Encoding.UTF8.GetBytes(" ").CopyTo(buf, 148);
|
||||
|
||||
// Calculate checksum
|
||||
int headerChecksum = 0;
|
||||
foreach (byte b in buf)
|
||||
{
|
||||
headerChecksum += b;
|
||||
}
|
||||
return headerChecksum;
|
||||
}
|
||||
|
||||
internal static int RecalculateAltChecksum(byte[] buf)
|
||||
{
|
||||
Encoding.UTF8.GetBytes(" ").CopyTo(buf, 148);
|
||||
int headerChecksum = 0;
|
||||
foreach (byte b in buf)
|
||||
{
|
||||
if ((b & 0x80) == 0x80)
|
||||
{
|
||||
headerChecksum -= b ^ 0x80;
|
||||
}
|
||||
else
|
||||
{
|
||||
headerChecksum += b;
|
||||
}
|
||||
}
|
||||
return headerChecksum;
|
||||
}
|
||||
|
||||
public long? DataStartPosition { get; set; }
|
||||
|
||||
public string Magic { get; set; }
|
||||
}
|
||||
}
|
||||
541
src/SharpCompress/Common/Tar/TarBuffer.cs
Normal file
541
src/SharpCompress/Common/Tar/TarBuffer.cs
Normal file
@@ -0,0 +1,541 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// The TarBuffer class implements the tar archive concept
|
||||
/// of a buffered input stream. This concept goes back to the
|
||||
/// days of blocked tape drives and special io devices. In the
|
||||
/// C# universe, the only real function that this class
|
||||
/// performs is to ensure that files have the correct "record"
|
||||
/// size, or other tars will complain.
|
||||
/// <p>
|
||||
/// You should never have a need to access this class directly.
|
||||
/// TarBuffers are created by Tar IO Streams.
|
||||
/// </p>
|
||||
/// </summary>
|
||||
public class TarBuffer
|
||||
{
|
||||
|
||||
/* A quote from GNU tar man file on blocking and records
|
||||
A `tar' archive file contains a series of blocks. Each block
|
||||
contains `BLOCKSIZE' bytes. Although this format may be thought of as
|
||||
being on magnetic tape, other media are often used.
|
||||
|
||||
Each file archived is represented by a header block which describes
|
||||
the file, followed by zero or more blocks which give the contents of
|
||||
the file. At the end of the archive file there may be a block filled
|
||||
with binary zeros as an end-of-file marker. A reasonable system should
|
||||
write a block of zeros at the end, but must not assume that such a
|
||||
block exists when reading an archive.
|
||||
|
||||
The blocks may be "blocked" for physical I/O operations. Each
|
||||
record of N blocks is written with a single 'write ()'
|
||||
operation. On magnetic tapes, the result of such a write is a single
|
||||
record. When writing an archive, the last record of blocks should be
|
||||
written at the full size, with blocks after the zero block containing
|
||||
all zeros. When reading an archive, a reasonable system should
|
||||
properly handle an archive whose last record is shorter than the rest,
|
||||
or which contains garbage records after a zero block.
|
||||
*/
|
||||
|
||||
#region Constants
|
||||
/// <summary>
|
||||
/// The size of a block in a tar archive in bytes.
|
||||
/// </summary>
|
||||
/// <remarks>This is 512 bytes.</remarks>
|
||||
public const int BlockSize = 512;
|
||||
|
||||
/// <summary>
|
||||
/// The number of blocks in a default record.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The default value is 20 blocks per record.
|
||||
/// </remarks>
|
||||
public const int DefaultBlockFactor = 20;
|
||||
|
||||
/// <summary>
|
||||
/// The size in bytes of a default record.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The default size is 10KB.
|
||||
/// </remarks>
|
||||
public const int DefaultRecordSize = BlockSize * DefaultBlockFactor;
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size for this buffer
|
||||
/// </summary>
|
||||
/// <value>The record size in bytes.
|
||||
/// This is equal to the <see cref="BlockFactor"/> multiplied by the <see cref="BlockSize"/></value>
|
||||
public int RecordSize => recordSize;
|
||||
|
||||
/// <summary>
|
||||
/// Get the TAR Buffer's record size.
|
||||
/// </summary>
|
||||
/// <returns>The record size in bytes.
|
||||
/// This is equal to the <see cref="BlockFactor"/> multiplied by the <see cref="BlockSize"/></returns>
|
||||
[Obsolete("Use RecordSize property instead")]
|
||||
public int GetRecordSize()
|
||||
{
|
||||
return recordSize;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the Blocking factor for the buffer
|
||||
/// </summary>
|
||||
/// <value>This is the number of blocks in each record.</value>
|
||||
public int BlockFactor => blockFactor;
|
||||
|
||||
/// <summary>
|
||||
/// Get the TAR Buffer's block factor
|
||||
/// </summary>
|
||||
/// <returns>The block factor; the number of blocks per record.</returns>
|
||||
[Obsolete("Use BlockFactor property instead")]
|
||||
public int GetBlockFactor()
|
||||
{
|
||||
return blockFactor;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct a default TarBuffer
|
||||
/// </summary>
|
||||
protected TarBuffer()
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Create TarBuffer for reading with default BlockFactor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">Stream to buffer</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for input.</returns>
|
||||
public static TarBuffer CreateInputTarBuffer(Stream inputStream)
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new ArgumentNullException(nameof(inputStream));
|
||||
}
|
||||
|
||||
return CreateInputTarBuffer(inputStream, DefaultBlockFactor);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarBuffer for reading inputStream setting BlockFactor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">Stream to buffer</param>
|
||||
/// <param name="blockFactor">Blocking factor to apply</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for input.</returns>
|
||||
public static TarBuffer CreateInputTarBuffer(Stream inputStream, int blockFactor)
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new ArgumentNullException(nameof(inputStream));
|
||||
}
|
||||
|
||||
if (blockFactor <= 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(blockFactor), "Factor cannot be negative");
|
||||
}
|
||||
|
||||
var tarBuffer = new TarBuffer
|
||||
{
|
||||
inputStream = inputStream,
|
||||
outputStream = null
|
||||
};
|
||||
tarBuffer.Initialize(blockFactor);
|
||||
|
||||
return tarBuffer;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarBuffer for writing with default BlockFactor
|
||||
/// </summary>
|
||||
/// <param name="outputStream">output stream for buffer</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for output.</returns>
|
||||
public static TarBuffer CreateOutputTarBuffer(Stream outputStream)
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new ArgumentNullException(nameof(outputStream));
|
||||
}
|
||||
|
||||
return CreateOutputTarBuffer(outputStream, DefaultBlockFactor);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarBuffer for writing Tar output to streams.
|
||||
/// </summary>
|
||||
/// <param name="outputStream">Output stream to write to.</param>
|
||||
/// <param name="blockFactor">Blocking factor to apply</param>
|
||||
/// <returns>A new <see cref="TarBuffer"/> suitable for output.</returns>
|
||||
public static TarBuffer CreateOutputTarBuffer(Stream outputStream, int blockFactor)
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new ArgumentNullException(nameof(outputStream));
|
||||
}
|
||||
|
||||
if (blockFactor <= 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(blockFactor), "Factor cannot be negative");
|
||||
}
|
||||
|
||||
var tarBuffer = new TarBuffer();
|
||||
tarBuffer.inputStream = null;
|
||||
tarBuffer.outputStream = outputStream;
|
||||
tarBuffer.Initialize(blockFactor);
|
||||
|
||||
return tarBuffer;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Initialization common to all constructors.
|
||||
/// </summary>
|
||||
void Initialize(int archiveBlockFactor)
|
||||
{
|
||||
blockFactor = archiveBlockFactor;
|
||||
recordSize = archiveBlockFactor * BlockSize;
|
||||
recordBuffer = new byte[RecordSize];
|
||||
|
||||
if (inputStream != null) {
|
||||
currentRecordIndex = -1;
|
||||
currentBlockIndex = BlockFactor;
|
||||
} else {
|
||||
currentRecordIndex = 0;
|
||||
currentBlockIndex = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Determine if an archive block indicates End of Archive. End of
|
||||
/// archive is indicated by a block that consists entirely of null bytes.
|
||||
/// All remaining blocks for the record should also be null's
|
||||
/// However some older tars only do a couple of null blocks (Old GNU tar for one)
|
||||
/// and also partial records
|
||||
/// </summary>
|
||||
/// <param name = "block">The data block to check.</param>
|
||||
/// <returns>Returns true if the block is an EOF block; false otherwise.</returns>
|
||||
[Obsolete("Use IsEndOfArchiveBlock instead")]
|
||||
public bool IsEOFBlock(byte[] block)
|
||||
{
|
||||
if (block == null) {
|
||||
throw new ArgumentNullException(nameof(block));
|
||||
}
|
||||
|
||||
if (block.Length != BlockSize) {
|
||||
throw new ArgumentException("block length is invalid");
|
||||
}
|
||||
|
||||
for (int i = 0; i < BlockSize; ++i) {
|
||||
if (block[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Determine if an archive block indicates the End of an Archive has been reached.
|
||||
/// End of archive is indicated by a block that consists entirely of null bytes.
|
||||
/// All remaining blocks for the record should also be null's
|
||||
/// However some older tars only do a couple of null blocks (Old GNU tar for one)
|
||||
/// and also partial records
|
||||
/// </summary>
|
||||
/// <param name = "block">The data block to check.</param>
|
||||
/// <returns>Returns true if the block is an EOF block; false otherwise.</returns>
|
||||
public static bool IsEndOfArchiveBlock(byte[] block)
|
||||
{
|
||||
if (block == null) {
|
||||
throw new ArgumentNullException(nameof(block));
|
||||
}
|
||||
|
||||
if (block.Length != BlockSize) {
|
||||
throw new ArgumentException("block length is invalid");
|
||||
}
|
||||
|
||||
for (int i = 0; i < BlockSize; ++i) {
|
||||
if (block[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Skip over a block on the input stream.
|
||||
/// </summary>
|
||||
public void SkipBlock()
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new TarException("no input stream defined");
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
if (!ReadRecord()) {
|
||||
throw new TarException("Failed to read a record");
|
||||
}
|
||||
}
|
||||
|
||||
currentBlockIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read a block from the input stream.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The block of data read.
|
||||
/// </returns>
|
||||
public byte[] ReadBlock()
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new TarException("TarBuffer.ReadBlock - no input stream defined");
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
if (!ReadRecord()) {
|
||||
throw new TarException("Failed to read a record");
|
||||
}
|
||||
}
|
||||
|
||||
byte[] result = new byte[BlockSize];
|
||||
|
||||
Array.Copy(recordBuffer, (currentBlockIndex * BlockSize), result, 0, BlockSize);
|
||||
currentBlockIndex++;
|
||||
return result;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read a record from data stream.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// false if End-Of-File, else true.
|
||||
/// </returns>
|
||||
bool ReadRecord()
|
||||
{
|
||||
if (inputStream == null) {
|
||||
throw new TarException("no input stream stream defined");
|
||||
}
|
||||
|
||||
currentBlockIndex = 0;
|
||||
|
||||
int offset = 0;
|
||||
int bytesNeeded = RecordSize;
|
||||
|
||||
while (bytesNeeded > 0) {
|
||||
long numBytes = inputStream.Read(recordBuffer, offset, bytesNeeded);
|
||||
|
||||
//
|
||||
// NOTE
|
||||
// We have found EOF, and the record is not full!
|
||||
//
|
||||
// This is a broken archive. It does not follow the standard
|
||||
// blocking algorithm. However, because we are generous, and
|
||||
// it requires little effort, we will simply ignore the error
|
||||
// and continue as if the entire record were read. This does
|
||||
// not appear to break anything upstream. We used to return
|
||||
// false in this case.
|
||||
//
|
||||
// Thanks to 'Yohann.Roussel@alcatel.fr' for this fix.
|
||||
//
|
||||
if (numBytes <= 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
offset += (int)numBytes;
|
||||
bytesNeeded -= (int)numBytes;
|
||||
}
|
||||
|
||||
currentRecordIndex++;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the current block number, within the current record, zero based.
|
||||
/// </summary>
|
||||
/// <remarks>Block numbers are zero based values</remarks>
|
||||
/// <seealso cref="RecordSize"/>
|
||||
public int CurrentBlock => currentBlockIndex;
|
||||
|
||||
/// <summary>
|
||||
/// Get/set flag indicating ownership of the underlying stream.
|
||||
/// When the flag is true <see cref="Close"></see> will close the underlying stream also.
|
||||
/// </summary>
|
||||
public bool IsStreamOwner {
|
||||
get => isStreamOwner_;
|
||||
set => isStreamOwner_ = value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the current block number, within the current record, zero based.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The current zero based block number.
|
||||
/// </returns>
|
||||
/// <remarks>
|
||||
/// The absolute block number = (<see cref="GetCurrentRecordNum">record number</see> * <see cref="BlockFactor">block factor</see>) + <see cref="GetCurrentBlockNum">block number</see>.
|
||||
/// </remarks>
|
||||
[Obsolete("Use CurrentBlock property instead")]
|
||||
public int GetCurrentBlockNum()
|
||||
{
|
||||
return currentBlockIndex;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the current record number.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The current zero based record number.
|
||||
/// </returns>
|
||||
public int CurrentRecord => currentRecordIndex;
|
||||
|
||||
/// <summary>
|
||||
/// Get the current record number.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The current zero based record number.
|
||||
/// </returns>
|
||||
[Obsolete("Use CurrentRecord property instead")]
|
||||
public int GetCurrentRecordNum()
|
||||
{
|
||||
return currentRecordIndex;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write a block of data to the archive.
|
||||
/// </summary>
|
||||
/// <param name="block">
|
||||
/// The data to write to the archive.
|
||||
/// </param>
|
||||
public void WriteBlock(byte[] block)
|
||||
{
|
||||
if (block == null) {
|
||||
throw new ArgumentNullException(nameof(block));
|
||||
}
|
||||
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteBlock - no output stream defined");
|
||||
}
|
||||
|
||||
if (block.Length != BlockSize) {
|
||||
string errorText = string.Format("TarBuffer.WriteBlock - block to write has length '{0}' which is not the block size of '{1}'",
|
||||
block.Length, BlockSize);
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
WriteRecord();
|
||||
}
|
||||
|
||||
Array.Copy(block, 0, recordBuffer, (currentBlockIndex * BlockSize), BlockSize);
|
||||
currentBlockIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write an archive record to the archive, where the record may be
|
||||
/// inside of a larger array buffer. The buffer must be "offset plus
|
||||
/// record size" long.
|
||||
/// </summary>
|
||||
/// <param name="buffer">
|
||||
/// The buffer containing the record data to write.
|
||||
/// </param>
|
||||
/// <param name="offset">
|
||||
/// The offset of the record data within buffer.
|
||||
/// </param>
|
||||
public void WriteBlock(byte[] buffer, int offset)
|
||||
{
|
||||
if (buffer == null) {
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteBlock - no output stream stream defined");
|
||||
}
|
||||
|
||||
if ((offset < 0) || (offset >= buffer.Length)) {
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
|
||||
if ((offset + BlockSize) > buffer.Length) {
|
||||
string errorText = string.Format("TarBuffer.WriteBlock - record has length '{0}' with offset '{1}' which is less than the record size of '{2}'",
|
||||
buffer.Length, offset, recordSize);
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
|
||||
if (currentBlockIndex >= BlockFactor) {
|
||||
WriteRecord();
|
||||
}
|
||||
|
||||
Array.Copy(buffer, offset, recordBuffer, (currentBlockIndex * BlockSize), BlockSize);
|
||||
|
||||
currentBlockIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write a TarBuffer record to the archive.
|
||||
/// </summary>
|
||||
void WriteRecord()
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteRecord no output stream defined");
|
||||
}
|
||||
|
||||
outputStream.Write(recordBuffer, 0, RecordSize);
|
||||
outputStream.Flush();
|
||||
|
||||
currentBlockIndex = 0;
|
||||
currentRecordIndex++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// WriteFinalRecord writes the current record buffer to output any unwritten data is present.
|
||||
/// </summary>
|
||||
/// <remarks>Any trailing bytes are set to zero which is by definition correct behaviour
|
||||
/// for the end of a tar stream.</remarks>
|
||||
void WriteFinalRecord()
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new TarException("TarBuffer.WriteFinalRecord no output stream defined");
|
||||
}
|
||||
|
||||
if (currentBlockIndex > 0) {
|
||||
int dataBytes = currentBlockIndex * BlockSize;
|
||||
Array.Clear(recordBuffer, dataBytes, RecordSize - dataBytes);
|
||||
WriteRecord();
|
||||
}
|
||||
|
||||
outputStream.Flush();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Close the TarBuffer. If this is an output buffer, also flush the
|
||||
/// current block before closing.
|
||||
/// </summary>
|
||||
public void Close()
|
||||
{
|
||||
if (outputStream != null) {
|
||||
WriteFinalRecord();
|
||||
|
||||
if (isStreamOwner_) {
|
||||
outputStream.Dispose();
|
||||
}
|
||||
outputStream = null;
|
||||
} else if (inputStream != null) {
|
||||
if (isStreamOwner_) {
|
||||
inputStream.Dispose();
|
||||
}
|
||||
inputStream = null;
|
||||
}
|
||||
}
|
||||
|
||||
#region Instance Fields
|
||||
Stream inputStream;
|
||||
Stream outputStream;
|
||||
|
||||
byte[] recordBuffer;
|
||||
int currentBlockIndex;
|
||||
int currentRecordIndex;
|
||||
|
||||
int recordSize = DefaultRecordSize;
|
||||
int blockFactor = DefaultBlockFactor;
|
||||
bool isStreamOwner_ = true;
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
@@ -16,6 +15,8 @@ namespace SharpCompress.Common.Tar
|
||||
CompressionType = type;
|
||||
}
|
||||
|
||||
internal TarHeader Header => filePart.Header;
|
||||
|
||||
public override CompressionType CompressionType { get; }
|
||||
|
||||
public override long Crc => 0;
|
||||
@@ -26,7 +27,7 @@ namespace SharpCompress.Common.Tar
|
||||
|
||||
public override long Size => filePart.Header.Size;
|
||||
|
||||
public override DateTime? LastModifiedTime => filePart.Header.LastModifiedTime;
|
||||
public override DateTime? LastModifiedTime => filePart.Header.ModTime;
|
||||
|
||||
public override DateTime? CreatedTime => null;
|
||||
|
||||
@@ -36,7 +37,7 @@ namespace SharpCompress.Common.Tar
|
||||
|
||||
public override bool IsEncrypted => false;
|
||||
|
||||
public override bool IsDirectory => filePart.Header.EntryType == EntryType.Directory;
|
||||
public override bool IsDirectory => filePart.Header.TypeFlag == TarHeader.LF_DIR;
|
||||
|
||||
public override bool IsSplit => false;
|
||||
|
||||
@@ -45,17 +46,18 @@ namespace SharpCompress.Common.Tar
|
||||
internal static IEnumerable<TarEntry> GetEntries(StreamingMode mode, Stream stream,
|
||||
CompressionType compressionType)
|
||||
{
|
||||
foreach (TarHeader h in TarHeaderFactory.ReadHeader(mode, stream))
|
||||
using (var tarStream = new TarInputStream(stream))
|
||||
{
|
||||
if (h != null)
|
||||
TarHeader header = null;
|
||||
while ((header = tarStream.GetNextEntry()) != null)
|
||||
{
|
||||
if (mode == StreamingMode.Seekable)
|
||||
{
|
||||
yield return new TarEntry(new TarFilePart(h, stream), compressionType);
|
||||
yield return new TarEntry(new TarFilePart(header, stream), compressionType);
|
||||
}
|
||||
else
|
||||
{
|
||||
yield return new TarEntry(new TarFilePart(h, null), compressionType);
|
||||
yield return new TarEntry(new TarFilePart(header, null), compressionType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
19
src/SharpCompress/Common/Tar/TarException.cs
Normal file
19
src/SharpCompress/Common/Tar/TarException.cs
Normal file
@@ -0,0 +1,19 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// TarException represents exceptions specific to Tar classes and code.
|
||||
/// </summary>
|
||||
public class TarException : ArchiveException
|
||||
{
|
||||
/// <summary>
|
||||
/// Initialise a new instance of <see cref="TarException" /> with its message string.
|
||||
/// </summary>
|
||||
/// <param name="message">A <see cref="string"/> that describes the error.</param>
|
||||
public TarException(string message)
|
||||
: base(message)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
|
||||
1060
src/SharpCompress/Common/Tar/TarHeader.cs
Normal file
1060
src/SharpCompress/Common/Tar/TarHeader.cs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,62 +0,0 @@
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal static class TarHeaderFactory
|
||||
{
|
||||
internal static IEnumerable<TarHeader> ReadHeader(StreamingMode mode, Stream stream)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
TarHeader header = null;
|
||||
try
|
||||
{
|
||||
BinaryReader reader = new BinaryReader(stream);
|
||||
header = new TarHeader();
|
||||
if (!header.Read(reader))
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
switch (mode)
|
||||
{
|
||||
case StreamingMode.Seekable:
|
||||
{
|
||||
header.DataStartPosition = reader.BaseStream.Position;
|
||||
|
||||
//skip to nearest 512
|
||||
reader.BaseStream.Position += PadTo512(header.Size);
|
||||
}
|
||||
break;
|
||||
case StreamingMode.Streaming:
|
||||
{
|
||||
header.PackedStream = new TarReadOnlySubStream(stream, header.Size);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
throw new InvalidFormatException("Invalid StreamingMode");
|
||||
}
|
||||
}
|
||||
}
|
||||
catch
|
||||
{
|
||||
header = null;
|
||||
}
|
||||
yield return header;
|
||||
}
|
||||
}
|
||||
|
||||
private static long PadTo512(long size)
|
||||
{
|
||||
int zeros = (int)(size % 512);
|
||||
if (zeros == 0)
|
||||
{
|
||||
return size;
|
||||
}
|
||||
return 512 - zeros + size;
|
||||
}
|
||||
}
|
||||
}
|
||||
547
src/SharpCompress/Common/Tar/TarInputStream.cs
Normal file
547
src/SharpCompress/Common/Tar/TarInputStream.cs
Normal file
@@ -0,0 +1,547 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// The TarInputStream reads a UNIX tar archive as an InputStream.
|
||||
/// methods are provided to position at each successive entry in
|
||||
/// the archive, and the read each entry as a normal input stream
|
||||
/// using read().
|
||||
/// </summary>
|
||||
public class TarInputStream : Stream
|
||||
{
|
||||
#region Constructors
|
||||
|
||||
/// <summary>
|
||||
/// Construct a TarInputStream with default block factor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">stream to source data from</param>
|
||||
public TarInputStream(Stream inputStream)
|
||||
: this(inputStream, TarBuffer.DefaultBlockFactor)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct a TarInputStream with user specified block factor
|
||||
/// </summary>
|
||||
/// <param name="inputStream">stream to source data from</param>
|
||||
/// <param name="blockFactor">block factor to apply to archive</param>
|
||||
public TarInputStream(Stream inputStream, int blockFactor)
|
||||
{
|
||||
this.inputStream = inputStream;
|
||||
tarBuffer = TarBuffer.CreateInputTarBuffer(inputStream, blockFactor);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
/// Get/set flag indicating ownership of the underlying stream.
|
||||
/// When the flag is true <see cref="Close"></see> will close the underlying stream also.
|
||||
/// </summary>
|
||||
public bool IsStreamOwner { get => tarBuffer.IsStreamOwner; set => tarBuffer.IsStreamOwner = value; }
|
||||
|
||||
#region Stream Overrides
|
||||
|
||||
/// <summary>
|
||||
/// Gets a value indicating whether the current stream supports reading
|
||||
/// </summary>
|
||||
public override bool CanRead => inputStream.CanRead;
|
||||
|
||||
/// <summary>
|
||||
/// Gets a value indicating whether the current stream supports seeking
|
||||
/// This property always returns false.
|
||||
/// </summary>
|
||||
public override bool CanSeek => false;
|
||||
|
||||
/// <summary>
|
||||
/// Gets a value indicating if the stream supports writing.
|
||||
/// This property always returns false.
|
||||
/// </summary>
|
||||
public override bool CanWrite => false;
|
||||
|
||||
/// <summary>
|
||||
/// The length in bytes of the stream
|
||||
/// </summary>
|
||||
public override long Length => inputStream.Length;
|
||||
|
||||
/// <summary>
|
||||
/// Gets or sets the position within the stream.
|
||||
/// Setting the Position is not supported and throws a NotSupportedExceptionNotSupportedException
|
||||
/// </summary>
|
||||
/// <exception cref="NotSupportedException">Any attempt to set position</exception>
|
||||
public override long Position { get => inputStream.Position; set => throw new NotSupportedException("TarInputStream Seek not supported"); }
|
||||
|
||||
/// <summary>
|
||||
/// Flushes the baseInputStream
|
||||
/// </summary>
|
||||
public override void Flush()
|
||||
{
|
||||
inputStream.Flush();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Set the streams position. This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="offset">The offset relative to the origin to seek to.</param>
|
||||
/// <param name="origin">The <see cref="SeekOrigin"/> to start seeking from.</param>
|
||||
/// <returns>The new position in the stream.</returns>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream Seek not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the length of the stream
|
||||
/// This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="value">The new stream length.</param>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream SetLength not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes a block of bytes to this stream using data from a buffer.
|
||||
/// This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="buffer">The buffer containing bytes to write.</param>
|
||||
/// <param name="offset">The offset in the buffer of the frist byte to write.</param>
|
||||
/// <param name="count">The number of bytes to write.</param>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream Write not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes a byte to the current position in the file stream.
|
||||
/// This operation is not supported and will throw a NotSupportedException
|
||||
/// </summary>
|
||||
/// <param name="value">The byte value to write.</param>
|
||||
/// <exception cref="NotSupportedException">Any access</exception>
|
||||
public override void WriteByte(byte value)
|
||||
{
|
||||
throw new NotSupportedException("TarInputStream WriteByte not supported");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads a byte from the current tar archive entry.
|
||||
/// </summary>
|
||||
/// <returns>A byte cast to an int; -1 if the at the end of the stream.</returns>
|
||||
public override int ReadByte()
|
||||
{
|
||||
byte[] oneByteBuffer = new byte[1];
|
||||
int num = Read(oneByteBuffer, 0, 1);
|
||||
if (num <= 0)
|
||||
{
|
||||
// return -1 to indicate that no byte was read.
|
||||
return -1;
|
||||
}
|
||||
return oneByteBuffer[0];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads bytes from the current tar archive entry.
|
||||
///
|
||||
/// This method is aware of the boundaries of the current
|
||||
/// entry in the archive and will deal with them appropriately
|
||||
/// </summary>
|
||||
/// <param name="buffer">
|
||||
/// The buffer into which to place bytes read.
|
||||
/// </param>
|
||||
/// <param name="offset">
|
||||
/// The offset at which to place bytes read.
|
||||
/// </param>
|
||||
/// <param name="count">
|
||||
/// The number of bytes to read.
|
||||
/// </param>
|
||||
/// <returns>
|
||||
/// The number of bytes read, or 0 at end of stream/EOF.
|
||||
/// </returns>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
int totalRead = 0;
|
||||
|
||||
if (entryOffset >= entrySize)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
long numToRead = count;
|
||||
|
||||
if ((numToRead + entryOffset) > entrySize)
|
||||
{
|
||||
numToRead = entrySize - entryOffset;
|
||||
}
|
||||
|
||||
if (readBuffer != null)
|
||||
{
|
||||
int sz = (numToRead > readBuffer.Length) ? readBuffer.Length : (int)numToRead;
|
||||
|
||||
Array.Copy(readBuffer, 0, buffer, offset, sz);
|
||||
|
||||
if (sz >= readBuffer.Length)
|
||||
{
|
||||
readBuffer = null;
|
||||
}
|
||||
else
|
||||
{
|
||||
int newLen = readBuffer.Length - sz;
|
||||
byte[] newBuf = new byte[newLen];
|
||||
Array.Copy(readBuffer, sz, newBuf, 0, newLen);
|
||||
readBuffer = newBuf;
|
||||
}
|
||||
|
||||
totalRead += sz;
|
||||
numToRead -= sz;
|
||||
offset += sz;
|
||||
}
|
||||
|
||||
while (numToRead > 0)
|
||||
{
|
||||
byte[] rec = tarBuffer.ReadBlock();
|
||||
if (rec == null)
|
||||
{
|
||||
// Unexpected EOF!
|
||||
throw new TarException("unexpected EOF with " + numToRead + " bytes unread");
|
||||
}
|
||||
|
||||
var sz = (int)numToRead;
|
||||
int recLen = rec.Length;
|
||||
|
||||
if (recLen > sz)
|
||||
{
|
||||
Array.Copy(rec, 0, buffer, offset, sz);
|
||||
readBuffer = new byte[recLen - sz];
|
||||
Array.Copy(rec, sz, readBuffer, 0, recLen - sz);
|
||||
}
|
||||
else
|
||||
{
|
||||
sz = recLen;
|
||||
Array.Copy(rec, 0, buffer, offset, recLen);
|
||||
}
|
||||
|
||||
totalRead += sz;
|
||||
numToRead -= sz;
|
||||
offset += sz;
|
||||
}
|
||||
|
||||
entryOffset += totalRead;
|
||||
|
||||
return totalRead;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Closes this stream. Calls the TarBuffer's close() method.
|
||||
/// The underlying stream is closed by the TarBuffer.
|
||||
/// </summary>
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (disposing)
|
||||
{
|
||||
tarBuffer.Close();
|
||||
}
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
public int RecordSize => tarBuffer.RecordSize;
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// TarBuffer record size.
|
||||
/// </returns>
|
||||
[Obsolete("Use RecordSize property instead")]
|
||||
public int GetRecordSize()
|
||||
{
|
||||
return tarBuffer.RecordSize;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the available data that can be read from the current
|
||||
/// entry in the archive. This does not indicate how much data
|
||||
/// is left in the entire archive, only in the current entry.
|
||||
/// This value is determined from the entry's size header field
|
||||
/// and the amount of data already read from the current entry.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The number of available bytes for the current entry.
|
||||
/// </returns>
|
||||
public long Available => entrySize - entryOffset;
|
||||
|
||||
/// <summary>
|
||||
/// Skip bytes in the input buffer. This skips bytes in the
|
||||
/// current entry's data, not the entire archive, and will
|
||||
/// stop at the end of the current entry's data if the number
|
||||
/// to skip extends beyond that point.
|
||||
/// </summary>
|
||||
/// <param name="skipCount">
|
||||
/// The number of bytes to skip.
|
||||
/// </param>
|
||||
public void Skip(long skipCount)
|
||||
{
|
||||
// TODO: REVIEW efficiency of TarInputStream.Skip
|
||||
// This is horribly inefficient, but it ensures that we
|
||||
// properly skip over bytes via the TarBuffer...
|
||||
//
|
||||
byte[] skipBuf = new byte[8 * 1024];
|
||||
|
||||
for (long num = skipCount; num > 0;)
|
||||
{
|
||||
int toRead = num > skipBuf.Length ? skipBuf.Length : (int)num;
|
||||
int numRead = Read(skipBuf, 0, toRead);
|
||||
|
||||
if (numRead == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
num -= numRead;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Return a value of true if marking is supported; false otherwise.
|
||||
/// </summary>
|
||||
/// <remarks>Currently marking is not supported, the return value is always false.</remarks>
|
||||
public bool IsMarkSupported => false;
|
||||
|
||||
/// <summary>
|
||||
/// Since we do not support marking just yet, we do nothing.
|
||||
/// </summary>
|
||||
/// <param name ="markLimit">
|
||||
/// The limit to mark.
|
||||
/// </param>
|
||||
public void Mark(int markLimit)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Since we do not support marking just yet, we do nothing.
|
||||
/// </summary>
|
||||
public void Reset()
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the next entry in this tar archive. This will skip
|
||||
/// over any remaining data in the current entry, if there
|
||||
/// is one, and place the input stream at the header of the
|
||||
/// next entry, and read the header and instantiate a new
|
||||
/// TarEntry from the header bytes and return that entry.
|
||||
/// If there are no more entries in the archive, null will
|
||||
/// be returned to indicate that the end of the archive has
|
||||
/// been reached.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The next TarEntry in the archive, or null.
|
||||
/// </returns>
|
||||
public TarHeader GetNextEntry()
|
||||
{
|
||||
if (hasHitEOF)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
if (currentEntry != null)
|
||||
{
|
||||
SkipToNextEntry();
|
||||
}
|
||||
|
||||
byte[] headerBuf = tarBuffer.ReadBlock();
|
||||
|
||||
if (headerBuf == null)
|
||||
{
|
||||
hasHitEOF = true;
|
||||
}
|
||||
else
|
||||
hasHitEOF |= TarBuffer.IsEndOfArchiveBlock(headerBuf);
|
||||
|
||||
if (hasHitEOF)
|
||||
{
|
||||
currentEntry = null;
|
||||
}
|
||||
else
|
||||
{
|
||||
try
|
||||
{
|
||||
var header = new TarHeader();
|
||||
header.ParseBuffer(headerBuf);
|
||||
if (!header.IsChecksumValid)
|
||||
{
|
||||
throw new TarException("Header checksum is invalid");
|
||||
}
|
||||
this.entryOffset = 0;
|
||||
this.entrySize = header.Size;
|
||||
|
||||
StringBuilder longName = null;
|
||||
|
||||
if (header.TypeFlag == TarHeader.LF_GNU_LONGNAME)
|
||||
{
|
||||
byte[] nameBuffer = new byte[TarBuffer.BlockSize];
|
||||
long numToRead = this.entrySize;
|
||||
|
||||
longName = new StringBuilder();
|
||||
|
||||
while (numToRead > 0)
|
||||
{
|
||||
int numRead = this.Read(nameBuffer, 0, (numToRead > nameBuffer.Length ? nameBuffer.Length : (int)numToRead));
|
||||
|
||||
if (numRead == -1)
|
||||
{
|
||||
throw new TarException("Failed to read long name entry");
|
||||
}
|
||||
|
||||
longName.Append(TarHeader.ParseName(nameBuffer, 0, numRead).ToString());
|
||||
numToRead -= numRead;
|
||||
}
|
||||
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag == TarHeader.LF_GHDR)
|
||||
{
|
||||
// POSIX global extended header
|
||||
// Ignore things we dont understand completely for now
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag == TarHeader.LF_XHDR)
|
||||
{
|
||||
// POSIX extended header
|
||||
// Ignore things we dont understand completely for now
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag == TarHeader.LF_GNU_VOLHDR)
|
||||
{
|
||||
// TODO: could show volume name when verbose
|
||||
SkipToNextEntry();
|
||||
headerBuf = this.tarBuffer.ReadBlock();
|
||||
}
|
||||
else if (header.TypeFlag != TarHeader.LF_NORMAL &&
|
||||
header.TypeFlag != TarHeader.LF_OLDNORM &&
|
||||
header.TypeFlag != TarHeader.LF_LINK &&
|
||||
header.TypeFlag != TarHeader.LF_SYMLINK &&
|
||||
header.TypeFlag != TarHeader.LF_DIR)
|
||||
{
|
||||
// Ignore things we dont understand completely for now
|
||||
SkipToNextEntry();
|
||||
headerBuf = tarBuffer.ReadBlock();
|
||||
}
|
||||
currentEntry = new TarHeader();
|
||||
|
||||
if (longName != null)
|
||||
{
|
||||
currentEntry.Name = longName.ToString();
|
||||
}
|
||||
|
||||
// Magic was checked here for 'ustar' but there are multiple valid possibilities
|
||||
// so this is not done anymore.
|
||||
|
||||
entryOffset = 0;
|
||||
|
||||
// TODO: Review How do we resolve this discrepancy?!
|
||||
entrySize = this.currentEntry.Size;
|
||||
}
|
||||
catch (TarException ex)
|
||||
{
|
||||
entrySize = 0;
|
||||
entryOffset = 0;
|
||||
currentEntry = null;
|
||||
string errorText = $"Bad header in record {tarBuffer.CurrentRecord} block {tarBuffer.CurrentBlock} {ex.Message}";
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
}
|
||||
return currentEntry;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Copies the contents of the current tar archive entry directly into
|
||||
/// an output stream.
|
||||
/// </summary>
|
||||
/// <param name="outputStream">
|
||||
/// The OutputStream into which to write the entry's data.
|
||||
/// </param>
|
||||
public void CopyEntryContents(Stream outputStream)
|
||||
{
|
||||
byte[] tempBuffer = new byte[32 * 1024];
|
||||
|
||||
while (true)
|
||||
{
|
||||
int numRead = Read(tempBuffer, 0, tempBuffer.Length);
|
||||
if (numRead <= 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
outputStream.Write(tempBuffer, 0, numRead);
|
||||
}
|
||||
}
|
||||
|
||||
private void SkipToNextEntry()
|
||||
{
|
||||
long numToSkip = entrySize - entryOffset;
|
||||
|
||||
if (numToSkip > 0)
|
||||
{
|
||||
Skip(numToSkip);
|
||||
}
|
||||
|
||||
readBuffer = null;
|
||||
}
|
||||
|
||||
#region Instance Fields
|
||||
|
||||
/// <summary>
|
||||
/// Flag set when last block has been read
|
||||
/// </summary>
|
||||
protected bool hasHitEOF;
|
||||
|
||||
/// <summary>
|
||||
/// Size of this entry as recorded in header
|
||||
/// </summary>
|
||||
protected long entrySize;
|
||||
|
||||
/// <summary>
|
||||
/// Number of bytes read for this entry so far
|
||||
/// </summary>
|
||||
protected long entryOffset;
|
||||
|
||||
/// <summary>
|
||||
/// Buffer used with calls to <code>Read()</code>
|
||||
/// </summary>
|
||||
protected byte[] readBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// Working buffer
|
||||
/// </summary>
|
||||
protected TarBuffer tarBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// Current entry being read
|
||||
/// </summary>
|
||||
private TarHeader currentEntry;
|
||||
|
||||
/// <summary>
|
||||
/// Stream used as the source of input data.
|
||||
/// </summary>
|
||||
private readonly Stream inputStream;
|
||||
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
417
src/SharpCompress/Common/Tar/TarOutputStream.cs
Normal file
417
src/SharpCompress/Common/Tar/TarOutputStream.cs
Normal file
@@ -0,0 +1,417 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
/// <summary>
|
||||
/// The TarOutputStream writes a UNIX tar archive as an OutputStream.
|
||||
/// Methods are provided to put entries, and then write their contents
|
||||
/// by writing to this stream using write().
|
||||
/// </summary>
|
||||
/// public
|
||||
public class TarOutputStream : Stream
|
||||
{
|
||||
#region Constructors
|
||||
/// <summary>
|
||||
/// Construct TarOutputStream using default block factor
|
||||
/// </summary>
|
||||
/// <param name="outputStream">stream to write to</param>
|
||||
public TarOutputStream(Stream outputStream)
|
||||
: this(outputStream, TarBuffer.DefaultBlockFactor)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Construct TarOutputStream with user specified block factor
|
||||
/// </summary>
|
||||
/// <param name="outputStream">stream to write to</param>
|
||||
/// <param name="blockFactor">blocking factor</param>
|
||||
public TarOutputStream(Stream outputStream, int blockFactor)
|
||||
{
|
||||
if (outputStream == null) {
|
||||
throw new ArgumentNullException(nameof(outputStream));
|
||||
}
|
||||
|
||||
this.outputStream = outputStream;
|
||||
buffer = TarBuffer.CreateOutputTarBuffer(outputStream, blockFactor);
|
||||
|
||||
assemblyBuffer = new byte[TarBuffer.BlockSize];
|
||||
blockBuffer = new byte[TarBuffer.BlockSize];
|
||||
}
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
/// Get/set flag indicating ownership of the underlying stream.
|
||||
/// When the flag is true <see cref="Close"></see> will close the underlying stream also.
|
||||
/// </summary>
|
||||
public bool IsStreamOwner {
|
||||
get => buffer.IsStreamOwner;
|
||||
set => buffer.IsStreamOwner = value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// true if the stream supports reading; otherwise, false.
|
||||
/// </summary>
|
||||
public override bool CanRead => outputStream.CanRead;
|
||||
|
||||
/// <summary>
|
||||
/// true if the stream supports seeking; otherwise, false.
|
||||
/// </summary>
|
||||
public override bool CanSeek => outputStream.CanSeek;
|
||||
|
||||
/// <summary>
|
||||
/// true if stream supports writing; otherwise, false.
|
||||
/// </summary>
|
||||
public override bool CanWrite => outputStream.CanWrite;
|
||||
|
||||
/// <summary>
|
||||
/// length of stream in bytes
|
||||
/// </summary>
|
||||
public override long Length => outputStream.Length;
|
||||
|
||||
/// <summary>
|
||||
/// gets or sets the position within the current stream.
|
||||
/// </summary>
|
||||
public override long Position {
|
||||
get => outputStream.Position;
|
||||
set => outputStream.Position = value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// set the position within the current stream
|
||||
/// </summary>
|
||||
/// <param name="offset">The offset relative to the <paramref name="origin"/> to seek to</param>
|
||||
/// <param name="origin">The <see cref="SeekOrigin"/> to seek from.</param>
|
||||
/// <returns>The new position in the stream.</returns>
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
return outputStream.Seek(offset, origin);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Set the length of the current stream
|
||||
/// </summary>
|
||||
/// <param name="value">The new stream length.</param>
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
outputStream.SetLength(value);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read a byte from the stream and advance the position within the stream
|
||||
/// by one byte or returns -1 if at the end of the stream.
|
||||
/// </summary>
|
||||
/// <returns>The byte value or -1 if at end of stream</returns>
|
||||
public override int ReadByte()
|
||||
{
|
||||
return outputStream.ReadByte();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// read bytes from the current stream and advance the position within the
|
||||
/// stream by the number of bytes read.
|
||||
/// </summary>
|
||||
/// <param name="buffer">The buffer to store read bytes in.</param>
|
||||
/// <param name="offset">The index into the buffer to being storing bytes at.</param>
|
||||
/// <param name="count">The desired number of bytes to read.</param>
|
||||
/// <returns>The total number of bytes read, or zero if at the end of the stream.
|
||||
/// The number of bytes may be less than the <paramref name="count">count</paramref>
|
||||
/// requested if data is not avialable.</returns>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
return outputStream.Read(buffer, offset, count);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// All buffered data is written to destination
|
||||
/// </summary>
|
||||
public override void Flush()
|
||||
{
|
||||
outputStream.Flush();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ends the TAR archive without closing the underlying OutputStream.
|
||||
/// The result is that the EOF block of nulls is written.
|
||||
/// </summary>
|
||||
public void Finish()
|
||||
{
|
||||
if (IsEntryOpen) {
|
||||
CloseEntry();
|
||||
}
|
||||
WriteEofBlock();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ends the TAR archive and closes the underlying OutputStream.
|
||||
/// </summary>
|
||||
/// <remarks>This means that Finish() is called followed by calling the
|
||||
/// TarBuffer's Close().</remarks>
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (!isClosed) {
|
||||
isClosed = true;
|
||||
Finish();
|
||||
buffer.Close();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
public int RecordSize => buffer.RecordSize;
|
||||
|
||||
/// <summary>
|
||||
/// Get the record size being used by this stream's TarBuffer.
|
||||
/// </summary>
|
||||
/// <returns>
|
||||
/// The TarBuffer record size.
|
||||
/// </returns>
|
||||
[Obsolete("Use RecordSize property instead")]
|
||||
public int GetRecordSize()
|
||||
{
|
||||
return buffer.RecordSize;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get a value indicating wether an entry is open, requiring more data to be written.
|
||||
/// </summary>
|
||||
bool IsEntryOpen => (currBytes < currSize);
|
||||
|
||||
/// <summary>
|
||||
/// Put an entry on the output stream. This writes the entry's
|
||||
/// header and positions the output stream for writing
|
||||
/// the contents of the entry. Once this method is called, the
|
||||
/// stream is ready for calls to write() to write the entry's
|
||||
/// contents. Once the contents are written, closeEntry()
|
||||
/// <B>MUST</B> be called to ensure that all buffered data
|
||||
/// is completely written to the output stream.
|
||||
/// </summary>
|
||||
/// <param name="entry">
|
||||
/// The TarEntry to be written to the archive.
|
||||
/// </param>
|
||||
public void PutNextEntry(TarEntry entry)
|
||||
{
|
||||
if (entry == null) {
|
||||
throw new ArgumentNullException(nameof(entry));
|
||||
}
|
||||
|
||||
if (entry.TarHeader.Name.Length > TarHeader.NAMELEN) {
|
||||
var longHeader = new TarHeader();
|
||||
longHeader.TypeFlag = TarHeader.LF_GNU_LONGNAME;
|
||||
longHeader.Name = longHeader.Name + "././@LongLink";
|
||||
longHeader.Mode = 420;//644 by default
|
||||
longHeader.UserId = entry.UserId;
|
||||
longHeader.GroupId = entry.GroupId;
|
||||
longHeader.GroupName = entry.GroupName;
|
||||
longHeader.UserName = entry.UserName;
|
||||
longHeader.LinkName = "";
|
||||
longHeader.Size = entry.TarHeader.Name.Length + 1; // Plus one to avoid dropping last char
|
||||
|
||||
longHeader.WriteHeader(blockBuffer);
|
||||
buffer.WriteBlock(blockBuffer); // Add special long filename header block
|
||||
|
||||
int nameCharIndex = 0;
|
||||
|
||||
while (nameCharIndex < entry.TarHeader.Name.Length + 1 /* we've allocated one for the null char, now we must make sure it gets written out */) {
|
||||
Array.Clear(blockBuffer, 0, blockBuffer.Length);
|
||||
TarHeader.GetAsciiBytes(entry.TarHeader.Name, nameCharIndex, this.blockBuffer, 0, TarBuffer.BlockSize); // This func handles OK the extra char out of string length
|
||||
nameCharIndex += TarBuffer.BlockSize;
|
||||
buffer.WriteBlock(blockBuffer);
|
||||
}
|
||||
}
|
||||
|
||||
entry.WriteEntryHeader(blockBuffer);
|
||||
buffer.WriteBlock(blockBuffer);
|
||||
|
||||
currBytes = 0;
|
||||
|
||||
currSize = entry.IsDirectory ? 0 : entry.Size;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Close an entry. This method MUST be called for all file
|
||||
/// entries that contain data. The reason is that we must
|
||||
/// buffer data written to the stream in order to satisfy
|
||||
/// the buffer's block based writes. Thus, there may be
|
||||
/// data fragments still being assembled that must be written
|
||||
/// to the output stream before this entry is closed and the
|
||||
/// next entry written.
|
||||
/// </summary>
|
||||
public void CloseEntry()
|
||||
{
|
||||
if (assemblyBufferLength > 0) {
|
||||
Array.Clear(assemblyBuffer, assemblyBufferLength, assemblyBuffer.Length - assemblyBufferLength);
|
||||
|
||||
buffer.WriteBlock(assemblyBuffer);
|
||||
|
||||
currBytes += assemblyBufferLength;
|
||||
assemblyBufferLength = 0;
|
||||
}
|
||||
|
||||
if (currBytes < currSize) {
|
||||
string errorText = string.Format(
|
||||
"Entry closed at '{0}' before the '{1}' bytes specified in the header were written",
|
||||
currBytes, currSize);
|
||||
throw new TarException(errorText);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes a byte to the current tar archive entry.
|
||||
/// This method simply calls Write(byte[], int, int).
|
||||
/// </summary>
|
||||
/// <param name="value">
|
||||
/// The byte to be written.
|
||||
/// </param>
|
||||
public override void WriteByte(byte value)
|
||||
{
|
||||
Write(new byte[] { value }, 0, 1);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes bytes to the current tar archive entry. This method
|
||||
/// is aware of the current entry and will throw an exception if
|
||||
/// you attempt to write bytes past the length specified for the
|
||||
/// current entry. The method is also (painfully) aware of the
|
||||
/// record buffering required by TarBuffer, and manages buffers
|
||||
/// that are not a multiple of recordsize in length, including
|
||||
/// assembling records from small buffers.
|
||||
/// </summary>
|
||||
/// <param name = "buffer">
|
||||
/// The buffer to write to the archive.
|
||||
/// </param>
|
||||
/// <param name = "offset">
|
||||
/// The offset in the buffer from which to get bytes.
|
||||
/// </param>
|
||||
/// <param name = "count">
|
||||
/// The number of bytes to write.
|
||||
/// </param>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (buffer == null) {
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
|
||||
if (offset < 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(offset), "Cannot be negative");
|
||||
}
|
||||
|
||||
if (buffer.Length - offset < count) {
|
||||
throw new ArgumentException("offset and count combination is invalid");
|
||||
}
|
||||
|
||||
if (count < 0) {
|
||||
throw new ArgumentOutOfRangeException(nameof(count), "Cannot be negative");
|
||||
}
|
||||
|
||||
if ((currBytes + count) > currSize) {
|
||||
string errorText = string.Format("request to write '{0}' bytes exceeds size in header of '{1}' bytes",
|
||||
count, this.currSize);
|
||||
throw new ArgumentOutOfRangeException(nameof(count), errorText);
|
||||
}
|
||||
|
||||
//
|
||||
// We have to deal with assembly!!!
|
||||
// The programmer can be writing little 32 byte chunks for all
|
||||
// we know, and we must assemble complete blocks for writing.
|
||||
// TODO REVIEW Maybe this should be in TarBuffer? Could that help to
|
||||
// eliminate some of the buffer copying.
|
||||
//
|
||||
if (assemblyBufferLength > 0) {
|
||||
if ((assemblyBufferLength + count) >= blockBuffer.Length) {
|
||||
int aLen = blockBuffer.Length - assemblyBufferLength;
|
||||
|
||||
Array.Copy(assemblyBuffer, 0, blockBuffer, 0, assemblyBufferLength);
|
||||
Array.Copy(buffer, offset, blockBuffer, assemblyBufferLength, aLen);
|
||||
|
||||
this.buffer.WriteBlock(blockBuffer);
|
||||
|
||||
currBytes += blockBuffer.Length;
|
||||
|
||||
offset += aLen;
|
||||
count -= aLen;
|
||||
|
||||
assemblyBufferLength = 0;
|
||||
} else {
|
||||
Array.Copy(buffer, offset, assemblyBuffer, assemblyBufferLength, count);
|
||||
offset += count;
|
||||
assemblyBufferLength += count;
|
||||
count -= count;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// When we get here we have EITHER:
|
||||
// o An empty "assembly" buffer.
|
||||
// o No bytes to write (count == 0)
|
||||
//
|
||||
while (count > 0) {
|
||||
if (count < blockBuffer.Length) {
|
||||
Array.Copy(buffer, offset, assemblyBuffer, assemblyBufferLength, count);
|
||||
assemblyBufferLength += count;
|
||||
break;
|
||||
}
|
||||
|
||||
this.buffer.WriteBlock(buffer, offset);
|
||||
|
||||
int bufferLength = blockBuffer.Length;
|
||||
currBytes += bufferLength;
|
||||
count -= bufferLength;
|
||||
offset += bufferLength;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Write an EOF (end of archive) block to the tar archive.
|
||||
/// An EOF block consists of all zeros.
|
||||
/// </summary>
|
||||
void WriteEofBlock()
|
||||
{
|
||||
Array.Clear(blockBuffer, 0, blockBuffer.Length);
|
||||
buffer.WriteBlock(blockBuffer);
|
||||
}
|
||||
|
||||
#region Instance Fields
|
||||
/// <summary>
|
||||
/// bytes written for this entry so far
|
||||
/// </summary>
|
||||
long currBytes;
|
||||
|
||||
/// <summary>
|
||||
/// current 'Assembly' buffer length
|
||||
/// </summary>
|
||||
int assemblyBufferLength;
|
||||
|
||||
/// <summary>
|
||||
/// Flag indicating wether this instance has been closed or not.
|
||||
/// </summary>
|
||||
bool isClosed;
|
||||
|
||||
/// <summary>
|
||||
/// Size for the current entry
|
||||
/// </summary>
|
||||
protected long currSize;
|
||||
|
||||
/// <summary>
|
||||
/// single block working buffer
|
||||
/// </summary>
|
||||
protected byte[] blockBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// 'Assembly' buffer used to assemble data before writing
|
||||
/// </summary>
|
||||
protected byte[] assemblyBuffer;
|
||||
|
||||
/// <summary>
|
||||
/// TarBuffer used to provide correct blocking factor
|
||||
/// </summary>
|
||||
protected TarBuffer buffer;
|
||||
|
||||
/// <summary>
|
||||
/// the destination stream for the archive contents
|
||||
/// </summary>
|
||||
protected Stream outputStream;
|
||||
#endregion
|
||||
}
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Common.Tar
|
||||
{
|
||||
internal class TarReadOnlySubStream : Stream
|
||||
{
|
||||
private bool isDisposed;
|
||||
private long amountRead;
|
||||
|
||||
public TarReadOnlySubStream(Stream stream, long bytesToRead)
|
||||
{
|
||||
Stream = stream;
|
||||
BytesLeftToRead = bytesToRead;
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
isDisposed = true;
|
||||
if (disposing)
|
||||
{
|
||||
long skipBytes = amountRead % 512;
|
||||
if (skipBytes == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
skipBytes = 512 - skipBytes;
|
||||
if (skipBytes == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
var buffer = new byte[skipBytes];
|
||||
Stream.ReadFully(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
private long BytesLeftToRead { get; set; }
|
||||
|
||||
public Stream Stream { get; }
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
|
||||
public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); }
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (BytesLeftToRead < count)
|
||||
{
|
||||
count = (int)BytesLeftToRead;
|
||||
}
|
||||
int read = Stream.Read(buffer, offset, count);
|
||||
if (read > 0)
|
||||
{
|
||||
BytesLeftToRead -= read;
|
||||
amountRead += read;
|
||||
}
|
||||
return read;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -21,18 +21,6 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
Comment = reader.ReadBytes(CommentLength);
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
writer.Write(VolumeNumber);
|
||||
writer.Write(FirstVolumeWithDirectory);
|
||||
writer.Write(TotalNumberOfEntriesInDisk);
|
||||
writer.Write(TotalNumberOfEntries);
|
||||
writer.Write(DirectorySize);
|
||||
writer.Write(DirectoryStartOffsetRelativeToDisk);
|
||||
writer.Write(CommentLength);
|
||||
writer.Write(Comment);
|
||||
}
|
||||
|
||||
public ushort VolumeNumber { get; private set; }
|
||||
|
||||
public ushort FirstVolumeWithDirectory { get; private set; }
|
||||
|
||||
@@ -61,56 +61,6 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
}
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
var zip64 = CompressedSize >= uint.MaxValue || UncompressedSize >= uint.MaxValue || RelativeOffsetOfEntryHeader >= uint.MaxValue;
|
||||
if (zip64)
|
||||
Version = (ushort)(Version > 45 ? Version : 45);
|
||||
|
||||
writer.Write(Version);
|
||||
writer.Write(VersionNeededToExtract);
|
||||
writer.Write((ushort)Flags);
|
||||
writer.Write((ushort)CompressionMethod);
|
||||
writer.Write(LastModifiedTime);
|
||||
writer.Write(LastModifiedDate);
|
||||
writer.Write(Crc);
|
||||
writer.Write(zip64 ? uint.MaxValue : CompressedSize);
|
||||
writer.Write(zip64 ? uint.MaxValue : UncompressedSize);
|
||||
|
||||
byte[] nameBytes = EncodeString(Name);
|
||||
writer.Write((ushort)nameBytes.Length);
|
||||
|
||||
if (zip64)
|
||||
{
|
||||
writer.Write((ushort)(2 + 2 + 8 + 8 + 8 + 4));
|
||||
}
|
||||
else
|
||||
{
|
||||
//writer.Write((ushort)Extra.Length);
|
||||
writer.Write((ushort)0);
|
||||
}
|
||||
writer.Write((ushort)Comment.Length);
|
||||
|
||||
writer.Write(DiskNumberStart);
|
||||
writer.Write(InternalFileAttributes);
|
||||
writer.Write(ExternalFileAttributes);
|
||||
writer.Write(zip64 ? uint.MaxValue : RelativeOffsetOfEntryHeader);
|
||||
|
||||
writer.Write(nameBytes);
|
||||
|
||||
if (zip64)
|
||||
{
|
||||
writer.Write((ushort)0x0001);
|
||||
writer.Write((ushort)((8 + 8 + 8 + 4)));
|
||||
|
||||
writer.Write((ulong)UncompressedSize);
|
||||
writer.Write((ulong)CompressedSize);
|
||||
writer.Write((ulong)RelativeOffsetOfEntryHeader);
|
||||
writer.Write((uint)0); // VolumeNumber = 0
|
||||
}
|
||||
writer.Write(Comment);
|
||||
}
|
||||
|
||||
internal ushort Version { get; private set; }
|
||||
|
||||
public ushort VersionNeededToExtract { get; set; }
|
||||
|
||||
@@ -13,10 +13,5 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
internal override void Read(BinaryReader reader)
|
||||
{
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -47,56 +47,6 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
}
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
if (IsZip64)
|
||||
Version = (ushort)(Version > 45 ? Version : 45);
|
||||
|
||||
writer.Write(Version);
|
||||
|
||||
writer.Write((ushort)Flags);
|
||||
writer.Write((ushort)CompressionMethod);
|
||||
writer.Write(LastModifiedTime);
|
||||
writer.Write(LastModifiedDate);
|
||||
writer.Write(Crc);
|
||||
|
||||
if (IsZip64)
|
||||
{
|
||||
writer.Write(uint.MaxValue);
|
||||
writer.Write(uint.MaxValue);
|
||||
}
|
||||
else
|
||||
{
|
||||
writer.Write(CompressedSize);
|
||||
writer.Write(UncompressedSize);
|
||||
}
|
||||
|
||||
byte[] nameBytes = EncodeString(Name);
|
||||
|
||||
writer.Write((ushort)nameBytes.Length);
|
||||
if (IsZip64)
|
||||
{
|
||||
writer.Write((ushort)(2 + 2 + (2 * 8)));
|
||||
}
|
||||
else
|
||||
{
|
||||
writer.Write((ushort)0);
|
||||
}
|
||||
|
||||
//if (Extra != null)
|
||||
//{
|
||||
// writer.Write(Extra);
|
||||
//}
|
||||
writer.Write(nameBytes);
|
||||
if (IsZip64)
|
||||
{
|
||||
writer.Write((ushort)0x0001);
|
||||
writer.Write((ushort)(2 * 8));
|
||||
writer.Write((ulong)CompressedSize);
|
||||
writer.Write((ulong)UncompressedSize);
|
||||
}
|
||||
}
|
||||
|
||||
internal ushort Version { get; private set; }
|
||||
}
|
||||
}
|
||||
@@ -14,10 +14,5 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -26,11 +26,6 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
|
||||
const int SizeOfFixedHeaderDataExceptSignatureAndSizeFields = 44;
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public long SizeOfDirectoryEndRecord { get; private set; }
|
||||
|
||||
public ushort VersionMadeBy { get; private set; }
|
||||
|
||||
@@ -16,11 +16,6 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
TotalNumberOfVolumes = reader.ReadUInt32();
|
||||
}
|
||||
|
||||
internal override void Write(BinaryWriter writer)
|
||||
{
|
||||
throw new System.NotImplementedException();
|
||||
}
|
||||
|
||||
public uint FirstVolumeWithDirectory { get; private set; }
|
||||
|
||||
public long RelativeOffsetOfTheEndOfDirectoryRecord { get; private set; }
|
||||
|
||||
@@ -10,12 +10,10 @@ namespace SharpCompress.Common.Zip.Headers
|
||||
HasData = true;
|
||||
}
|
||||
|
||||
internal ZipHeaderType ZipHeaderType { get; private set; }
|
||||
internal ZipHeaderType ZipHeaderType { get; }
|
||||
|
||||
internal abstract void Read(BinaryReader reader);
|
||||
|
||||
internal abstract void Write(BinaryWriter writer);
|
||||
|
||||
internal bool HasData { get; set; }
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return Stream.Null;
|
||||
}
|
||||
decompressionStream = CreateDecompressionStream(GetCryptoStream(CreateBaseStream()));
|
||||
decompressionStream = CreateDecompressionStream(GetCryptoStream(CreateBaseStream()), Header.CompressionMethod);
|
||||
if (LeaveStreamOpen)
|
||||
{
|
||||
return new NonDisposingStream(decompressionStream);
|
||||
|
||||
@@ -21,7 +21,7 @@ namespace SharpCompress.Common.Zip
|
||||
BaseStream = stream;
|
||||
}
|
||||
|
||||
internal Stream BaseStream { get; private set; }
|
||||
internal Stream BaseStream { get; }
|
||||
internal ZipFileEntry Header { get; set; }
|
||||
|
||||
internal override string FilePartName => Header.Name;
|
||||
@@ -32,7 +32,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
return Stream.Null;
|
||||
}
|
||||
Stream decompressionStream = CreateDecompressionStream(GetCryptoStream(CreateBaseStream()));
|
||||
Stream decompressionStream = CreateDecompressionStream(GetCryptoStream(CreateBaseStream()), Header.CompressionMethod);
|
||||
if (LeaveStreamOpen)
|
||||
{
|
||||
return new NonDisposingStream(decompressionStream);
|
||||
@@ -53,9 +53,9 @@ namespace SharpCompress.Common.Zip
|
||||
|
||||
protected bool LeaveStreamOpen => FlagUtility.HasFlag(Header.Flags, HeaderFlags.UsePostDataDescriptor) || Header.IsZip64;
|
||||
|
||||
protected Stream CreateDecompressionStream(Stream stream)
|
||||
protected Stream CreateDecompressionStream(Stream stream, ZipCompressionMethod method)
|
||||
{
|
||||
switch (Header.CompressionMethod)
|
||||
switch (method)
|
||||
{
|
||||
case ZipCompressionMethod.None:
|
||||
{
|
||||
@@ -102,9 +102,9 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
throw new InvalidFormatException("Winzip data length is not 7.");
|
||||
}
|
||||
ushort method = DataConverter.LittleEndian.GetUInt16(data.DataBytes, 0);
|
||||
ushort compressedMethod = DataConverter.LittleEndian.GetUInt16(data.DataBytes, 0);
|
||||
|
||||
if (method != 0x01 && method != 0x02)
|
||||
if (compressedMethod != 0x01 && compressedMethod != 0x02)
|
||||
{
|
||||
throw new InvalidFormatException("Unexpected vendor version number for WinZip AES metadata");
|
||||
}
|
||||
@@ -114,8 +114,7 @@ namespace SharpCompress.Common.Zip
|
||||
{
|
||||
throw new InvalidFormatException("Unexpected vendor ID for WinZip AES metadata");
|
||||
}
|
||||
Header.CompressionMethod = (ZipCompressionMethod)DataConverter.LittleEndian.GetUInt16(data.DataBytes, 5);
|
||||
return CreateDecompressionStream(stream);
|
||||
return CreateDecompressionStream(stream, (ZipCompressionMethod)DataConverter.LittleEndian.GetUInt16(data.DataBytes, 5));
|
||||
}
|
||||
default:
|
||||
{
|
||||
|
||||
@@ -28,7 +28,6 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Converters;
|
||||
|
||||
namespace SharpCompress.Compressors.Deflate
|
||||
|
||||
@@ -7,7 +7,7 @@ namespace SharpCompress.Compressors.Rar.Decode
|
||||
Dif = new int[11];
|
||||
}
|
||||
|
||||
internal int[] Dif { get; private set; }
|
||||
internal int[] Dif { get; }
|
||||
internal int ByteCount { get; set; }
|
||||
internal int D1 { get; set; }
|
||||
|
||||
|
||||
@@ -17,17 +17,17 @@ namespace SharpCompress.Compressors.Rar.Decode
|
||||
/// <summary> returns the decode Length array</summary>
|
||||
/// <returns> decodeLength
|
||||
/// </returns>
|
||||
internal int[] DecodeLen { get; private set; }
|
||||
internal int[] DecodeLen { get; }
|
||||
|
||||
/// <summary> returns the decode num array</summary>
|
||||
/// <returns> decodeNum
|
||||
/// </returns>
|
||||
internal int[] DecodeNum { get; private set; }
|
||||
internal int[] DecodeNum { get; }
|
||||
|
||||
/// <summary> returns the decodePos array</summary>
|
||||
/// <returns> decodePos
|
||||
/// </returns>
|
||||
internal int[] DecodePos { get; private set; }
|
||||
internal int[] DecodePos { get; }
|
||||
|
||||
internal int MaxNum { get; set; }
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@ namespace SharpCompress.Compressors.Rar.VM
|
||||
|
||||
internal VMCommands OpCode { get; set; }
|
||||
internal bool IsByteMode { get; set; }
|
||||
internal VMPreparedOperand Op1 { get; private set; }
|
||||
internal VMPreparedOperand Op1 { get; }
|
||||
|
||||
internal VMPreparedOperand Op2 { get; private set; }
|
||||
internal VMPreparedOperand Op2 { get; }
|
||||
}
|
||||
}
|
||||
@@ -9,10 +9,10 @@ namespace SharpCompress.Compressors.Rar.VM
|
||||
Type = type;
|
||||
}
|
||||
|
||||
internal int Length { get; private set; }
|
||||
internal int Length { get; }
|
||||
|
||||
internal uint CRC { get; private set; }
|
||||
internal uint CRC { get; }
|
||||
|
||||
internal VMStandardFilters Type { get; private set; }
|
||||
internal VMStandardFilters Type { get; }
|
||||
}
|
||||
}
|
||||
@@ -8,8 +8,8 @@ namespace SharpCompress.Readers
|
||||
public class ReaderProgress
|
||||
{
|
||||
private readonly IEntry _entry;
|
||||
public long BytesTransferred { get; private set; }
|
||||
public int Iterations { get; private set; }
|
||||
public long BytesTransferred { get; }
|
||||
public int Iterations { get; }
|
||||
|
||||
public int PercentageRead => (int)Math.Round(PercentageReadExact);
|
||||
public double PercentageReadExact => (float)BytesTransferred / _entry.Size * 100;
|
||||
|
||||
@@ -3,11 +3,12 @@
|
||||
<PropertyGroup>
|
||||
<AssemblyTitle>SharpCompress - Pure C# Decompression/Compression</AssemblyTitle>
|
||||
<NeutralLanguage>en-US</NeutralLanguage>
|
||||
<VersionPrefix>0.16.0</VersionPrefix>
|
||||
<AssemblyVersion>0.16.0.0</AssemblyVersion>
|
||||
<FileVersion>0.16.0.0</FileVersion>
|
||||
<VersionPrefix>0.16.1</VersionPrefix>
|
||||
<AssemblyVersion>0.16.1.0</AssemblyVersion>
|
||||
<FileVersion>0.16.1.0</FileVersion>
|
||||
<Authors>Adam Hathcock</Authors>
|
||||
<TargetFrameworks>netstandard1.0;netstandard1.3</TargetFrameworks>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'==''">net45;net35;netstandard1.0;netstandard1.3</TargetFrameworks>
|
||||
<TargetFrameworks Condition="'$(LibraryFrameworks)'!=''">$(LibraryFrameworks)</TargetFrameworks>
|
||||
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
|
||||
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
|
||||
<AssemblyName>SharpCompress</AssemblyName>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Tar.Headers;
|
||||
using SharpCompress.Common.Tar;
|
||||
using SharpCompress.Compressors;
|
||||
using SharpCompress.Compressors.BZip2;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
@@ -67,10 +67,10 @@ namespace SharpCompress.Writers.Tar
|
||||
long realSize = size ?? source.Length;
|
||||
|
||||
TarHeader header = new TarHeader();
|
||||
header.LastModifiedTime = modificationTime ?? TarHeader.Epoch;
|
||||
header.ModTime = modificationTime ?? TarHeader.Epoch;
|
||||
header.Name = NormalizeFilename(filename);
|
||||
header.Size = realSize;
|
||||
header.Write(OutputStream);
|
||||
header.WriteHeader(OutputStream);
|
||||
size = source.TransferTo(OutputStream);
|
||||
PadTo512(size.Value, false);
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ namespace SharpCompress.Writers.Zip
|
||||
var decompressedvalue = zip64 ? uint.MaxValue : (uint)Decompressed;
|
||||
var headeroffsetvalue = zip64 ? uint.MaxValue : (uint)HeaderOffset;
|
||||
var extralength = zip64 ? (2 + 2 + 8 + 8 + 8 + 4) : 0;
|
||||
var version = (byte)(zip64 ? 45 : 10);
|
||||
var version = (byte)(zip64 ? 45 : 20); // Version 20 required for deflate/encryption
|
||||
|
||||
HeaderFlags flags = HeaderFlags.UTF8;
|
||||
if (!outputStream.CanSeek)
|
||||
|
||||
@@ -95,9 +95,9 @@ namespace SharpCompress.Test
|
||||
ResetScratch();
|
||||
using (var archive = ArchiveFactory.Open(path))
|
||||
{
|
||||
archive.EntryExtractionBegin += archive_EntryExtractionBegin;
|
||||
archive.FilePartExtractionBegin += archive_FilePartExtractionBegin;
|
||||
archive.CompressedBytesRead += archive_CompressedBytesRead;
|
||||
//archive.EntryExtractionBegin += archive_EntryExtractionBegin;
|
||||
//archive.FilePartExtractionBegin += archive_FilePartExtractionBegin;
|
||||
//archive.CompressedBytesRead += archive_CompressedBytesRead;
|
||||
|
||||
foreach (var entry in archive.Entries.Where(entry => !entry.IsDirectory))
|
||||
{
|
||||
@@ -157,9 +157,9 @@ namespace SharpCompress.Test
|
||||
using (var archive = ArchiveFactory.Open(path))
|
||||
{
|
||||
totalSize = archive.TotalUncompressSize;
|
||||
archive.EntryExtractionBegin += Archive_EntryExtractionBeginEx;
|
||||
archive.EntryExtractionEnd += Archive_EntryExtractionEndEx;
|
||||
archive.CompressedBytesRead += Archive_CompressedBytesReadEx;
|
||||
//archive.EntryExtractionBegin += Archive_EntryExtractionBeginEx;
|
||||
//archive.EntryExtractionEnd += Archive_EntryExtractionEndEx;
|
||||
//archive.CompressedBytesRead += Archive_CompressedBytesReadEx;
|
||||
|
||||
foreach (var entry in archive.Entries.Where(entry => !entry.IsDirectory))
|
||||
{
|
||||
|
||||
@@ -5,7 +5,7 @@ using SharpCompress.Archives;
|
||||
using SharpCompress.Archives.GZip;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.GZip
|
||||
{
|
||||
public class GZipArchiveTests : ArchiveTests
|
||||
{
|
||||
|
||||
@@ -4,7 +4,7 @@ using SharpCompress.Writers;
|
||||
using SharpCompress.Writers.GZip;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.GZip
|
||||
{
|
||||
public class GZipWriterTests : WriterTests
|
||||
{
|
||||
|
||||
@@ -6,7 +6,7 @@ using SharpCompress.Common;
|
||||
using SharpCompress.Readers;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.Rar
|
||||
{
|
||||
public class RarArchiveTests : ArchiveTests
|
||||
{
|
||||
|
||||
@@ -5,7 +5,7 @@ using SharpCompress.Readers;
|
||||
using SharpCompress.Readers.Rar;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.Rar
|
||||
{
|
||||
public class RarReaderTests : ReaderTests
|
||||
{
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
|
||||
using System;
|
||||
using System;
|
||||
using SharpCompress.Common;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.SevenZip
|
||||
{
|
||||
public class SevenZipArchiveTests : ArchiveTests
|
||||
{
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
|
||||
using System.IO;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.Archives;
|
||||
using SharpCompress.Archives.Tar;
|
||||
@@ -7,7 +6,7 @@ using SharpCompress.Common;
|
||||
using SharpCompress.Writers;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.Tar
|
||||
{
|
||||
public class TarArchiveTests : ArchiveTests
|
||||
{
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.Common;
|
||||
using Xunit;
|
||||
using System.Linq;
|
||||
using SharpCompress.Readers;
|
||||
using SharpCompress.Readers.Tar;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.Tar
|
||||
{
|
||||
public class TarReaderTests : ReaderTests
|
||||
{
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
using SharpCompress.Common;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.Tar
|
||||
{
|
||||
public class TarWriterTests : WriterTests
|
||||
{
|
||||
|
||||
@@ -9,7 +9,7 @@ using SharpCompress.Writers;
|
||||
using SharpCompress.Writers.Zip;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.Zip
|
||||
{
|
||||
public class Zip64Tests : WriterTests
|
||||
{
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
|
||||
using System;
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
@@ -10,7 +9,7 @@ using SharpCompress.Readers;
|
||||
using SharpCompress.Writers;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.Zip
|
||||
{
|
||||
public class ZipArchiveTests : ArchiveTests
|
||||
{
|
||||
@@ -324,6 +323,24 @@ namespace SharpCompress.Test
|
||||
VerifyFiles();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Zip_Deflate_WinzipAES_MultiOpenEntryStream()
|
||||
{
|
||||
ResetScratch();
|
||||
using (var reader = ZipArchive.Open(Path.Combine(TEST_ARCHIVES_PATH, "Zip.deflate.WinzipAES2.zip"), new ReaderOptions()
|
||||
{
|
||||
Password = "test"
|
||||
}))
|
||||
{
|
||||
foreach (var entry in reader.Entries.Where(x => !x.IsDirectory))
|
||||
{
|
||||
var stream = entry.OpenEntryStream();
|
||||
Assert.NotNull(stream);
|
||||
var ex = Record.Exception(() => stream = entry.OpenEntryStream());
|
||||
Assert.Null(ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Zip_BZip2_Pkware_Read()
|
||||
|
||||
@@ -6,7 +6,7 @@ using SharpCompress.Readers.Zip;
|
||||
using SharpCompress.Writers;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.Zip
|
||||
{
|
||||
public class ZipReaderTests : ReaderTests
|
||||
{
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
using SharpCompress.Common;
|
||||
using Xunit;
|
||||
|
||||
namespace SharpCompress.Test
|
||||
namespace SharpCompress.Test.Zip
|
||||
{
|
||||
public class ZipWriterTests : WriterTests
|
||||
{
|
||||
|
||||
BIN
tests/TestArchives/Archives/Zip.deflate.WinzipAES2.zip
Normal file
BIN
tests/TestArchives/Archives/Zip.deflate.WinzipAES2.zip
Normal file
Binary file not shown.
Reference in New Issue
Block a user