mirror of
https://github.com/adamhathcock/sharpcompress.git
synced 2026-02-06 21:26:07 +00:00
Compare commits
296 Commits
copilot/se
...
0.43.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
72b3948f43 | ||
|
|
5d47bfaeb6 | ||
|
|
b2f2ea65ba | ||
|
|
7afa468e15 | ||
|
|
29cb1fed12 | ||
|
|
29f8b512c4 | ||
|
|
9794c8ba72 | ||
|
|
f1b305f682 | ||
|
|
091a800c9d | ||
|
|
975f5f4b4c | ||
|
|
9d6cd930ea | ||
|
|
d5913e8371 | ||
|
|
7f71f76f6e | ||
|
|
caa82a6146 | ||
|
|
bcf7137073 | ||
|
|
9238cf1128 | ||
|
|
2f874ace51 | ||
|
|
2feabed297 | ||
|
|
9001e28b36 | ||
|
|
24d651d7ae | ||
|
|
5667595587 | ||
|
|
32d5b61c4a | ||
|
|
128c9e639f | ||
|
|
5e3f01dc03 | ||
|
|
c97c05a3a7 | ||
|
|
b2beea9c4e | ||
|
|
41fbaa1c28 | ||
|
|
d9274cf794 | ||
|
|
583b048046 | ||
|
|
ead5916eae | ||
|
|
d15ab92da3 | ||
|
|
1ab30f2af5 | ||
|
|
4dbe0b91f1 | ||
|
|
a972d3784e | ||
|
|
6991900eb0 | ||
|
|
d614beb9eb | ||
|
|
253a46d458 | ||
|
|
32b1ec32c6 | ||
|
|
eb2cba09b2 | ||
|
|
e79dceb67e | ||
|
|
87c38d6dab | ||
|
|
9e98d9c45c | ||
|
|
0e9a4b0511 | ||
|
|
eae25aff64 | ||
|
|
b8c06ff36e | ||
|
|
6cf2e054bf | ||
|
|
95749234f5 | ||
|
|
b976961434 | ||
|
|
e1aa727513 | ||
|
|
1f71ce1be2 | ||
|
|
cf13de6ac1 | ||
|
|
c2e01798f8 | ||
|
|
8fc8295a89 | ||
|
|
d392991764 | ||
|
|
e57e87090f | ||
|
|
c701bbbee3 | ||
|
|
2f0eb0bd4b | ||
|
|
17bde8da8a | ||
|
|
99d355e6ca | ||
|
|
c790fd21a4 | ||
|
|
bee51af48b | ||
|
|
ca743eae22 | ||
|
|
93504cf82f | ||
|
|
6d3e4e842b | ||
|
|
54b64a8c3b | ||
|
|
0e59bf39f4 | ||
|
|
8b95e0a76d | ||
|
|
48a2ad7b57 | ||
|
|
cfc6651fff | ||
|
|
b23827a8db | ||
|
|
3f9986c13c | ||
|
|
224989f19b | ||
|
|
c7010b75c1 | ||
|
|
00cfeee56e | ||
|
|
aaa97e2ce2 | ||
|
|
1d52618137 | ||
|
|
34309f17f4 | ||
|
|
220ba67faa | ||
|
|
230f96e8e8 | ||
|
|
930c8899d2 | ||
|
|
7c0cef7dd8 | ||
|
|
951ebb3fa2 | ||
|
|
2a4d098b41 | ||
|
|
5839b87f98 | ||
|
|
425a2bd680 | ||
|
|
939c2497c8 | ||
|
|
8995ba56b8 | ||
|
|
e941ab60ca | ||
|
|
48860f1349 | ||
|
|
94b2c5c593 | ||
|
|
f1d8ae5a22 | ||
|
|
e44d2093e5 | ||
|
|
9fa1201a4c | ||
|
|
7800808648 | ||
|
|
2789e86d21 | ||
|
|
afd1e39b88 | ||
|
|
aec4c738ef | ||
|
|
63ecc8c842 | ||
|
|
5f6d583521 | ||
|
|
0341984f10 | ||
|
|
fe757486ae | ||
|
|
46d480c9a1 | ||
|
|
8fe3cba7a8 | ||
|
|
aa19f4da8b | ||
|
|
a08f95326c | ||
|
|
b4c5437c92 | ||
|
|
8680e3b39e | ||
|
|
1b3002c8df | ||
|
|
394fd2e7db | ||
|
|
d83af56d28 | ||
|
|
28c93d6841 | ||
|
|
5f52fc2176 | ||
|
|
8fba579e3a | ||
|
|
40b1aadeb2 | ||
|
|
40e72ad199 | ||
|
|
618b4bbb83 | ||
|
|
1eaf3e6294 | ||
|
|
fd453e946d | ||
|
|
c294071015 | ||
|
|
c2f6055e33 | ||
|
|
5161f4df33 | ||
|
|
3396f8fe00 | ||
|
|
9291f58091 | ||
|
|
85f3b17c42 | ||
|
|
2a3086a0d7 | ||
|
|
41c3cc1a18 | ||
|
|
1b1df86a11 | ||
|
|
e0660e7775 | ||
|
|
99a6c4de88 | ||
|
|
ffa765bd97 | ||
|
|
b1696524b3 | ||
|
|
14d432e22d | ||
|
|
6a37c55085 | ||
|
|
9c1c6fff9f | ||
|
|
db8c6f4bcb | ||
|
|
ff17ecda7d | ||
|
|
692058677c | ||
|
|
1e90d69912 | ||
|
|
64a1cc68e1 | ||
|
|
0fdf9c74a8 | ||
|
|
20353f35ff | ||
|
|
e2df7894f9 | ||
|
|
7af029b5de | ||
|
|
8fc5ca5a71 | ||
|
|
aa0356de9f | ||
|
|
e44a43d2b1 | ||
|
|
8997f00b9b | ||
|
|
c5da416764 | ||
|
|
840e58fc03 | ||
|
|
7f911c5219 | ||
|
|
a887390c23 | ||
|
|
f4dddcec8e | ||
|
|
0d9d82d7e6 | ||
|
|
3a6d24b1d9 | ||
|
|
b9b159be4c | ||
|
|
40212083a5 | ||
|
|
d3428b066e | ||
|
|
94c64b2a45 | ||
|
|
0d671a0bb2 | ||
|
|
d34a47c148 | ||
|
|
5aa216bd21 | ||
|
|
8af47548fe | ||
|
|
131bd2b7b8 | ||
|
|
1993673a22 | ||
|
|
30e036f9ec | ||
|
|
0f374b27cf | ||
|
|
0d487df61b | ||
|
|
c082d4203b | ||
|
|
d4380b6bb6 | ||
|
|
095c871174 | ||
|
|
6d73c5b295 | ||
|
|
cc4d28193c | ||
|
|
9433e06b93 | ||
|
|
a92aaa51d5 | ||
|
|
d41908adeb | ||
|
|
81ca15b567 | ||
|
|
b81d0fd730 | ||
|
|
3a1bb187e8 | ||
|
|
3fee14a070 | ||
|
|
5bf789ac65 | ||
|
|
be06049db3 | ||
|
|
a0435f6a60 | ||
|
|
2321e2c90b | ||
|
|
97e98d8629 | ||
|
|
d96e7362d2 | ||
|
|
7dd46fe5ed | ||
|
|
04c044cb2b | ||
|
|
cc10a12fbc | ||
|
|
8b0a1c699f | ||
|
|
15ca7c9807 | ||
|
|
2b4da7e39b | ||
|
|
31f81f38af | ||
|
|
72cf77b7c7 | ||
|
|
0fe48c647e | ||
|
|
7b06652bff | ||
|
|
434ce05416 | ||
|
|
0698031ed4 | ||
|
|
51237a34eb | ||
|
|
b8264a8131 | ||
|
|
cad923018e | ||
|
|
db94b49941 | ||
|
|
72d15d9cbf | ||
|
|
e0186eadc0 | ||
|
|
4cfa5b04af | ||
|
|
f2c54b1f8b | ||
|
|
d7d0bc6582 | ||
|
|
dd9dc2500b | ||
|
|
4efb109da8 | ||
|
|
bcf9a6bdf1 | ||
|
|
e3a25ecdc0 | ||
|
|
783521928d | ||
|
|
9a876abd31 | ||
|
|
97f58b412e | ||
|
|
4c61628078 | ||
|
|
99a8b0f750 | ||
|
|
a9017d7c25 | ||
|
|
d9e4b26648 | ||
|
|
0d03bafe49 | ||
|
|
fee15a31f9 | ||
|
|
997d3910d4 | ||
|
|
a3918cc0d7 | ||
|
|
f056986b07 | ||
|
|
59c1f02f98 | ||
|
|
3a71a2b1f8 | ||
|
|
2ef1215b49 | ||
|
|
130ac83076 | ||
|
|
dd606a0702 | ||
|
|
84cd772f50 | ||
|
|
fa1d7af22f | ||
|
|
a771ba3bc0 | ||
|
|
8b612c658d | ||
|
|
7dd0da5fd7 | ||
|
|
f7b3525c4e | ||
|
|
de83bdae48 | ||
|
|
d90b610767 | ||
|
|
2d41de6b72 | ||
|
|
f391c3caf3 | ||
|
|
9bdf150676 | ||
|
|
0c199609eb | ||
|
|
6eff9d3753 | ||
|
|
7ab16457c7 | ||
|
|
e7ad8132b5 | ||
|
|
da87e45534 | ||
|
|
2ffaef5563 | ||
|
|
55cb350d2c | ||
|
|
7fa271a1b4 | ||
|
|
c53ca372f2 | ||
|
|
75bc8501f4 | ||
|
|
1e22b47fe1 | ||
|
|
74e2dca207 | ||
|
|
a669de24b7 | ||
|
|
e1e9c449e9 | ||
|
|
60e1dc0239 | ||
|
|
10eb94fd82 | ||
|
|
ccc8587e5f | ||
|
|
53c96193c1 | ||
|
|
d4f11e00b1 | ||
|
|
321233b82c | ||
|
|
eb188051d4 | ||
|
|
a136084e11 | ||
|
|
bc06f3179d | ||
|
|
ee84d971b2 | ||
|
|
264d80ef4c | ||
|
|
dba68187ac | ||
|
|
ca4a1936b3 | ||
|
|
77c8d31a90 | ||
|
|
ab7196f86c | ||
|
|
88b3a66bf9 | ||
|
|
ea77666b4a | ||
|
|
db98e5f39b | ||
|
|
df59c5cb9d | ||
|
|
e786e95358 | ||
|
|
75ada5623c | ||
|
|
ad5c655c45 | ||
|
|
65e607454e | ||
|
|
f238be6003 | ||
|
|
dc31e4c5fa | ||
|
|
665d8cd266 | ||
|
|
8324114e84 | ||
|
|
b83e6ee4ce | ||
|
|
58bab0d310 | ||
|
|
1af51aaaba | ||
|
|
a09327b831 | ||
|
|
16543bf74c | ||
|
|
aa4cd373ac | ||
|
|
351e294362 | ||
|
|
a0c5b1cd9d | ||
|
|
df2ed1e584 | ||
|
|
b354f7a3a5 | ||
|
|
bb53d1e1c6 | ||
|
|
2aabd8d0e1 | ||
|
|
aca97c2c6c | ||
|
|
8e7d959cf4 | ||
|
|
b23f031db9 | ||
|
|
1ba529a9d5 | ||
|
|
3d29c183ef |
@@ -3,11 +3,11 @@
|
||||
"isRoot": true,
|
||||
"tools": {
|
||||
"csharpier": {
|
||||
"version": "1.1.2",
|
||||
"version": "1.2.4",
|
||||
"commands": [
|
||||
"csharpier"
|
||||
],
|
||||
"rollForward": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
15
.github/COPILOT_AGENT_README.md
vendored
15
.github/COPILOT_AGENT_README.md
vendored
@@ -1,15 +0,0 @@
|
||||
# Copilot Coding Agent Configuration
|
||||
|
||||
This repository includes a minimal opt-in configuration and CI workflow to allow the GitHub Copilot coding agent to open and validate PRs.
|
||||
|
||||
- .copilot-agent.yml: opt-in config for automated agents
|
||||
- .github/agents/copilot-agent.yml: detailed agent policy configuration
|
||||
- .github/workflows/dotnetcore.yml: CI runs on PRs touching the solution, source, or tests to validate changes
|
||||
- AGENTS.md: general instructions for Copilot coding agent with project-specific guidelines
|
||||
|
||||
Maintainers can adjust the allowed paths or disable the agent by editing or removing .copilot-agent.yml.
|
||||
|
||||
Notes:
|
||||
- The agent can create, modify, and delete files within the allowed paths (src, tests, README.md, AGENTS.md)
|
||||
- All changes require review before merge
|
||||
- If build/test paths are different, update the workflow accordingly; this workflow targets SharpCompress.sln and the SharpCompress.Test test project.
|
||||
25
.github/prompts/plan-async.prompt.md
vendored
Normal file
25
.github/prompts/plan-async.prompt.md
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Plan: Implement Missing Async Functionality in SharpCompress
|
||||
|
||||
SharpCompress has async support for low-level stream operations and Reader/Writer APIs, but critical entry points (Archive.Open, factory methods, initialization) remain synchronous. This plan adds async overloads for all user-facing I/O operations and fixes existing async bugs, enabling full end-to-end async workflows.
|
||||
|
||||
## Steps
|
||||
|
||||
1. **Add async factory methods** to [ArchiveFactory.cs](src/SharpCompress/Factories/ArchiveFactory.cs), [ReaderFactory.cs](src/SharpCompress/Factories/ReaderFactory.cs), and [WriterFactory.cs](src/SharpCompress/Factories/WriterFactory.cs) with `OpenAsync` and `CreateAsync` overloads accepting `CancellationToken`
|
||||
|
||||
2. **Implement async Open methods** on concrete archive types ([ZipArchive.cs](src/SharpCompress/Archives/Zip/ZipArchive.cs), [TarArchive.cs](src/SharpCompress/Archives/Tar/TarArchive.cs), [RarArchive.cs](src/SharpCompress/Archives/Rar/RarArchive.cs), [GZipArchive.cs](src/SharpCompress/Archives/GZip/GZipArchive.cs), [SevenZipArchive.cs](src/SharpCompress/Archives/SevenZip/SevenZipArchive.cs)) and reader types ([ZipReader.cs](src/SharpCompress/Readers/Zip/ZipReader.cs), [TarReader.cs](src/SharpCompress/Readers/Tar/TarReader.cs), etc.)
|
||||
|
||||
3. **Convert archive initialization logic to async** including header reading, volume loading, and format signature detection across archive constructors and internal initialization methods
|
||||
|
||||
4. **Fix LZMA decoder async bugs** in [LzmaStream.cs](src/SharpCompress/Compressors/LZMA/LzmaStream.cs), [Decoder.cs](src/SharpCompress/Compressors/LZMA/Decoder.cs), and [OutWindow.cs](src/SharpCompress/Compressors/LZMA/OutWindow.cs) to enable true async 7Zip support and remove `NonDisposingStream` workaround
|
||||
|
||||
5. **Complete Rar async implementation** by converting `UnpackV2017` methods to async in [UnpackV2017.cs](src/SharpCompress/Compressors/Rar/UnpackV2017.cs) and updating Rar20 decompression
|
||||
|
||||
6. **Add comprehensive async tests** covering all new async entry points, cancellation scenarios, and concurrent operations across all archive formats in test files
|
||||
|
||||
## Further Considerations
|
||||
|
||||
1. **Breaking changes** - Should new async methods be added alongside existing sync methods (non-breaking), or should sync methods eventually be deprecated? Recommend additive approach for backward compatibility.
|
||||
|
||||
2. **Performance impact** - Header parsing for formats like Zip/Tar is often small; consider whether truly async parsing adds value vs sync parsing wrapped in Task, or make it conditional based on stream type (network vs file).
|
||||
|
||||
3. **7Zip complexity** - The LZMA async bug fix (Step 4) may be challenging due to state management in the decoder; consider whether to scope it separately or implement a simpler workaround that maintains correctness.
|
||||
123
.github/prompts/plan-for-next.prompt.md
vendored
Normal file
123
.github/prompts/plan-for-next.prompt.md
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
# Plan: Modernize SharpCompress Public API
|
||||
|
||||
Based on comprehensive analysis, the API has several inconsistencies around factory patterns, async support, format capabilities, and options classes. Most improvements can be done incrementally without breaking changes.
|
||||
|
||||
## Steps
|
||||
|
||||
1. **Standardize factory patterns** by deprecating format-specific static `Open` methods in [Archives/Zip/ZipArchive.cs](src/SharpCompress/Archives/Zip/ZipArchive.cs), [Archives/Tar/TarArchive.cs](src/SharpCompress/Archives/Tar/TarArchive.cs), etc. in favor of centralized [Factories/ArchiveFactory.cs](src/SharpCompress/Factories/ArchiveFactory.cs)
|
||||
|
||||
2. **Complete async implementation** in [Writers/Zip/ZipWriter.cs](src/SharpCompress/Writers/Zip/ZipWriter.cs) and other writers that currently use sync-over-async, implementing true async I/O throughout the writer hierarchy
|
||||
|
||||
3. **Unify options classes** by making [Common/ExtractionOptions.cs](src/SharpCompress/Common/ExtractionOptions.cs) inherit from `OptionsBase` and adding progress reporting to extraction methods consistently
|
||||
|
||||
4. **Clarify GZip semantics** in [Archives/GZip/GZipArchive.cs](src/SharpCompress/Archives/GZip/GZipArchive.cs) by adding XML documentation explaining single-entry limitation and relationship to GZip compression used in Tar.gz
|
||||
|
||||
## Further Considerations
|
||||
|
||||
1. **Breaking changes roadmap** - Should we plan a major version (2.0) to remove deprecated factory methods, clean up `ArchiveType` enum (remove Arc/Arj or add full support), and consolidate naming patterns?
|
||||
|
||||
2. **Progress reporting consistency** - Should `IProgress<ArchiveExtractionProgress<IEntry>>` be added to all extraction extension methods or consolidated into options classes?
|
||||
|
||||
## Detailed Analysis
|
||||
|
||||
### Factory Pattern Issues
|
||||
|
||||
Three different factory patterns exist with overlapping functionality:
|
||||
|
||||
1. **Static Factories**: ArchiveFactory, ReaderFactory, WriterFactory
|
||||
2. **Instance Factories**: IArchiveFactory, IReaderFactory, IWriterFactory
|
||||
3. **Format-specific static methods**: Each archive class has static `Open` methods
|
||||
|
||||
**Example confusion:**
|
||||
```csharp
|
||||
// Three ways to open a Zip archive - which is recommended?
|
||||
var archive1 = ArchiveFactory.Open("file.zip");
|
||||
var archive2 = ZipArchive.Open("file.zip");
|
||||
var archive3 = ArchiveFactory.AutoFactory.Open(fileInfo, options);
|
||||
```
|
||||
|
||||
### Async Support Gaps
|
||||
|
||||
Base `IWriter` interface has async methods, but writer implementations provide minimal async support. Most writers just call synchronous methods:
|
||||
|
||||
```csharp
|
||||
public virtual async Task WriteAsync(...)
|
||||
{
|
||||
// Default implementation calls synchronous version
|
||||
Write(filename, source, modificationTime);
|
||||
await Task.CompletedTask.ConfigureAwait(false);
|
||||
}
|
||||
```
|
||||
|
||||
Real async implementations only in:
|
||||
- `TarWriter` - Proper async implementation
|
||||
- Most other writers use sync-over-async
|
||||
|
||||
### GZip Archive Special Case
|
||||
|
||||
GZip is treated as both a compression format and an archive format, but only supports single-entry archives:
|
||||
|
||||
```csharp
|
||||
protected override GZipArchiveEntry CreateEntryInternal(...)
|
||||
{
|
||||
if (Entries.Any())
|
||||
{
|
||||
throw new InvalidFormatException("Only one entry is allowed in a GZip Archive");
|
||||
}
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
### Options Class Hierarchy
|
||||
|
||||
```
|
||||
OptionsBase (LeaveStreamOpen, ArchiveEncoding)
|
||||
├─ ReaderOptions (LookForHeader, Password, DisableCheckIncomplete, BufferSize, ExtensionHint, Progress)
|
||||
├─ WriterOptions (CompressionType, CompressionLevel, Progress)
|
||||
│ ├─ ZipWriterOptions (ArchiveComment, UseZip64)
|
||||
│ ├─ TarWriterOptions (FinalizeArchiveOnClose, HeaderFormat)
|
||||
│ └─ GZipWriterOptions (no additional properties)
|
||||
└─ ExtractionOptions (standalone - Overwrite, ExtractFullPath, PreserveFileTime, PreserveAttributes)
|
||||
```
|
||||
|
||||
**Issues:**
|
||||
- `ExtractionOptions` doesn't inherit from `OptionsBase` - no encoding support during extraction
|
||||
- Progress reporting inconsistency between readers and extraction
|
||||
- Obsolete properties (`ChecksumIsValid`, `Version`) with unclear migration path
|
||||
|
||||
### Implementation Priorities
|
||||
|
||||
**High Priority (Non-Breaking):**
|
||||
1. Add API usage guide (Archive vs Reader, factory recommendations, async best practices)
|
||||
2. Fix progress reporting consistency
|
||||
3. Complete async implementation in writers
|
||||
|
||||
**Medium Priority (Next Major Version):**
|
||||
1. Unify factory pattern - deprecate format-specific static `Open` methods
|
||||
2. Clean up options classes - make `ExtractionOptions` inherit from `OptionsBase`
|
||||
3. Clarify archive types - remove Arc/Arj from `ArchiveType` enum or add full support
|
||||
4. Standardize naming across archive types
|
||||
|
||||
**Low Priority:**
|
||||
1. Add BZip2 archive support similar to GZipArchive
|
||||
2. Complete obsolete property cleanup with migration guide
|
||||
|
||||
### Backward Compatibility Strategy
|
||||
|
||||
**Safe (Non-Breaking) Changes:**
|
||||
- Add new methods to interfaces (use default implementations)
|
||||
- Add new options properties (with defaults)
|
||||
- Add new factory methods
|
||||
- Improve async implementations
|
||||
- Add progress reporting support
|
||||
|
||||
**Breaking Changes to Avoid:**
|
||||
- ❌ Removing format-specific `Open` methods (deprecate instead)
|
||||
- ❌ Changing `LeaveStreamOpen` default (currently `true`)
|
||||
- ❌ Removing obsolete properties before major version bump
|
||||
- ❌ Changing return types or signatures of existing methods
|
||||
|
||||
**Deprecation Pattern:**
|
||||
- Use `[Obsolete]` for one major version
|
||||
- Use `[EditorBrowsable(EditorBrowsableState.Never)]` in next major version
|
||||
- Remove in following major version
|
||||
155
.github/workflows/NUGET_RELEASE.md
vendored
Normal file
155
.github/workflows/NUGET_RELEASE.md
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
# NuGet Release Workflow
|
||||
|
||||
This document describes the automated NuGet release workflow for SharpCompress.
|
||||
|
||||
## Overview
|
||||
|
||||
The `nuget-release.yml` workflow automatically builds, tests, and publishes SharpCompress packages to NuGet.org when:
|
||||
- Changes are pushed to the `master` or `release` branch
|
||||
- A version tag (format: `MAJOR.MINOR.PATCH`) is pushed
|
||||
|
||||
The workflow runs on both Windows and Ubuntu, but only the Windows build publishes to NuGet.
|
||||
|
||||
## How It Works
|
||||
|
||||
### Version Determination
|
||||
|
||||
The workflow automatically determines the version based on whether the commit is tagged using C# code in the build project:
|
||||
|
||||
1. **Tagged Release (Stable)**:
|
||||
- If the current commit has a version tag (e.g., `0.42.1`)
|
||||
- Uses the tag as the version number
|
||||
- Published as a stable release
|
||||
|
||||
2. **Untagged Release (Prerelease)**:
|
||||
- If the current commit is NOT tagged
|
||||
- Creates a prerelease version based on the next minor version
|
||||
- Format: `{NEXT_MINOR_VERSION}-beta.{COMMIT_COUNT}`
|
||||
- Example: `0.43.0-beta.123` (if last tag is 0.42.x)
|
||||
- Published as a prerelease to NuGet.org (Windows build only)
|
||||
|
||||
### Workflow Steps
|
||||
|
||||
The workflow runs on a matrix of operating systems (Windows and Ubuntu):
|
||||
|
||||
1. **Checkout**: Fetches the repository with full history for version detection
|
||||
2. **Setup .NET**: Installs .NET 10.0
|
||||
3. **Determine Version**: Runs `determine-version` build target to check for tags and determine version
|
||||
4. **Update Version**: Runs `update-version` build target to update the version in the project file
|
||||
5. **Build and Test**: Runs the full build and test suite on both platforms
|
||||
6. **Upload Artifacts**: Uploads the generated `.nupkg` files as workflow artifacts (separate for each OS)
|
||||
7. **Push to NuGet**: (Windows only) Runs `push-to-nuget` build target to publish the package to NuGet.org using the API key
|
||||
|
||||
All version detection, file updates, and publishing logic is implemented in C# in the `build/Program.cs` file using build targets.
|
||||
|
||||
## Setup Requirements
|
||||
|
||||
### 1. NuGet API Key Secret
|
||||
|
||||
The workflow requires a `NUGET_API_KEY` secret to be configured in the repository settings:
|
||||
|
||||
1. Go to https://www.nuget.org/account/apikeys
|
||||
2. Create a new API key with "Push" permission for the SharpCompress package
|
||||
3. In GitHub, go to: **Settings** → **Secrets and variables** → **Actions**
|
||||
4. Create a new secret named `NUGET_API_KEY` with the API key value
|
||||
|
||||
### 2. Branch Protection (Recommended)
|
||||
|
||||
Consider enabling branch protection rules for the `release` branch to ensure:
|
||||
- Code reviews are required before merging
|
||||
- Status checks pass before merging
|
||||
- Only authorized users can push to the branch
|
||||
|
||||
## Usage
|
||||
|
||||
### Creating a Stable Release
|
||||
|
||||
There are two ways to trigger a stable release:
|
||||
|
||||
**Method 1: Push tag to trigger workflow**
|
||||
1. Ensure all changes are committed on the `master` or `release` branch
|
||||
2. Create and push a version tag:
|
||||
```bash
|
||||
git checkout master # or release
|
||||
git tag 0.43.0
|
||||
git push origin 0.43.0
|
||||
```
|
||||
3. The workflow will automatically trigger, build, test, and publish `SharpCompress 0.43.0` to NuGet.org (Windows build)
|
||||
|
||||
**Method 2: Tag after pushing to branch**
|
||||
1. Ensure all changes are merged and pushed to the `master` or `release` branch
|
||||
2. Create and push a version tag on the already-pushed commit:
|
||||
```bash
|
||||
git checkout master # or release
|
||||
git tag 0.43.0
|
||||
git push origin 0.43.0
|
||||
```
|
||||
3. The workflow will automatically trigger, build, test, and publish `SharpCompress 0.43.0` to NuGet.org (Windows build)
|
||||
|
||||
### Creating a Prerelease
|
||||
|
||||
1. Push changes to the `master` or `release` branch without tagging:
|
||||
```bash
|
||||
git checkout master # or release
|
||||
git push origin master # or release
|
||||
```
|
||||
2. The workflow will automatically:
|
||||
- Build and test the project on both Windows and Ubuntu
|
||||
- Publish a prerelease version like `0.43.0-beta.456` to NuGet.org (Windows build)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Workflow Fails to Push to NuGet
|
||||
|
||||
- **Check the API Key**: Ensure `NUGET_API_KEY` is set correctly in repository secrets
|
||||
- **Check API Key Permissions**: Verify the API key has "Push" permission for SharpCompress
|
||||
- **Check API Key Expiration**: NuGet API keys may expire; create a new one if needed
|
||||
|
||||
### Version Conflict
|
||||
|
||||
If you see "Package already exists" errors:
|
||||
- The workflow uses `--skip-duplicate` flag to handle this gracefully
|
||||
- If you need to republish the same version, delete it from NuGet.org first (if allowed)
|
||||
|
||||
### Build or Test Failures
|
||||
|
||||
- The workflow will not push to NuGet if build or tests fail
|
||||
- Check the workflow logs in GitHub Actions for details
|
||||
- Fix the issues and push again
|
||||
|
||||
## Manual Package Creation
|
||||
|
||||
If you need to create a package manually without publishing:
|
||||
|
||||
```bash
|
||||
dotnet run --project build/build.csproj -- publish
|
||||
```
|
||||
|
||||
The package will be created in the `artifacts/` directory.
|
||||
|
||||
## Build Targets
|
||||
|
||||
The workflow uses the following C# build targets defined in `build/Program.cs`:
|
||||
|
||||
- **determine-version**: Detects version from git tags and outputs VERSION and PRERELEASE variables
|
||||
- **update-version**: Updates VersionPrefix, AssemblyVersion, and FileVersion in the project file
|
||||
- **push-to-nuget**: Pushes the generated NuGet packages to NuGet.org (requires NUGET_API_KEY)
|
||||
|
||||
These targets can be run manually for testing:
|
||||
|
||||
```bash
|
||||
# Determine the version
|
||||
dotnet run --project build/build.csproj -- determine-version
|
||||
|
||||
# Update version in project file
|
||||
VERSION=0.43.0 dotnet run --project build/build.csproj -- update-version
|
||||
|
||||
# Push to NuGet (requires NUGET_API_KEY environment variable)
|
||||
NUGET_API_KEY=your-key dotnet run --project build/build.csproj -- push-to-nuget
|
||||
```
|
||||
|
||||
## Related Files
|
||||
|
||||
- `.github/workflows/nuget-release.yml` - The workflow definition
|
||||
- `build/Program.cs` - Build script with version detection and publishing logic
|
||||
- `src/SharpCompress/SharpCompress.csproj` - Project file with version information
|
||||
120
.github/workflows/TESTING.md
vendored
Normal file
120
.github/workflows/TESTING.md
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
# Testing Guide for NuGet Release Workflow
|
||||
|
||||
This document describes how to test the NuGet release workflow.
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
Since this workflow publishes to NuGet.org and requires repository secrets, testing should be done carefully. The workflow runs on both Windows and Ubuntu, but only the Windows build publishes to NuGet.
|
||||
|
||||
## Pre-Testing Checklist
|
||||
|
||||
- [x] Workflow YAML syntax validated
|
||||
- [x] Version determination logic tested locally
|
||||
- [x] Version update logic tested locally
|
||||
- [x] Build script works (`dotnet run --project build/build.csproj`)
|
||||
|
||||
## Manual Testing Steps
|
||||
|
||||
### 1. Test Prerelease Publishing (Recommended First Test)
|
||||
|
||||
This tests the workflow on untagged commits to the master or release branch.
|
||||
|
||||
**Steps:**
|
||||
1. Ensure `NUGET_API_KEY` secret is configured in repository settings
|
||||
2. Create a test commit on the `master` or `release` branch (e.g., update a comment or README)
|
||||
3. Push to the `master` or `release` branch
|
||||
4. Monitor the GitHub Actions workflow at: https://github.com/adamhathcock/sharpcompress/actions
|
||||
5. Verify:
|
||||
- Workflow triggers and runs successfully on both Windows and Ubuntu
|
||||
- Version is determined correctly (e.g., `0.43.0-beta.XXX` if last tag is 0.42.x)
|
||||
- Build and tests pass on both platforms
|
||||
- Package artifacts are uploaded for both platforms
|
||||
- Package is pushed to NuGet.org as prerelease (Windows build only)
|
||||
|
||||
**Expected Outcome:**
|
||||
- A new prerelease package appears on NuGet.org: https://www.nuget.org/packages/SharpCompress/
|
||||
- Package version follows pattern: `{NEXT_MINOR_VERSION}-beta.{COMMIT_COUNT}`
|
||||
|
||||
### 2. Test Tagged Release Publishing
|
||||
|
||||
This tests the workflow when a version tag is pushed.
|
||||
|
||||
**Steps:**
|
||||
1. Prepare the `master` or `release` branch with all desired changes
|
||||
2. Create a version tag (must be a pure semantic version like `MAJOR.MINOR.PATCH`):
|
||||
```bash
|
||||
git checkout master # or release
|
||||
git tag 0.42.2
|
||||
git push origin 0.42.2
|
||||
```
|
||||
3. Monitor the GitHub Actions workflow
|
||||
4. Verify:
|
||||
- Workflow triggers and runs successfully on both Windows and Ubuntu
|
||||
- Version is determined as the tag (e.g., `0.42.2`)
|
||||
- Build and tests pass on both platforms
|
||||
- Package artifacts are uploaded for both platforms
|
||||
- Package is pushed to NuGet.org as stable release (Windows build only)
|
||||
|
||||
**Expected Outcome:**
|
||||
- A new stable release package appears on NuGet.org
|
||||
- Package version matches the tag
|
||||
|
||||
### 3. Test Duplicate Package Handling
|
||||
|
||||
This tests the `--skip-duplicate` flag behavior.
|
||||
|
||||
**Steps:**
|
||||
1. Push to the `release` branch without making changes
|
||||
2. Monitor the workflow
|
||||
3. Verify:
|
||||
- Workflow runs but NuGet push is skipped with "duplicate" message
|
||||
- No errors occur
|
||||
|
||||
### 4. Test Build Failure Handling
|
||||
|
||||
This tests that failed builds don't publish packages.
|
||||
|
||||
**Steps:**
|
||||
1. Introduce a breaking change in a test or code
|
||||
2. Push to the `release` branch
|
||||
3. Verify:
|
||||
- Workflow runs and detects the failure
|
||||
- Build or test step fails
|
||||
- NuGet push step is skipped
|
||||
- No package is published
|
||||
|
||||
## Verification
|
||||
|
||||
After each test, verify:
|
||||
|
||||
1. **GitHub Actions Logs**: Check the workflow logs for any errors or warnings
|
||||
2. **NuGet.org**: Verify the package appears with correct version and metadata
|
||||
3. **Artifacts**: Download and inspect the uploaded artifacts
|
||||
|
||||
## Rollback/Cleanup
|
||||
|
||||
If testing produces unwanted packages:
|
||||
|
||||
1. **Prerelease packages**: Can be unlisted on NuGet.org (Settings → Unlist)
|
||||
2. **Stable packages**: Cannot be deleted, only unlisted (use test versions)
|
||||
3. **Tags**: Can be deleted with:
|
||||
```bash
|
||||
git tag -d 0.42.2
|
||||
git push origin :refs/tags/0.42.2
|
||||
```
|
||||
|
||||
## Known Limitations
|
||||
|
||||
- NuGet.org does not allow re-uploading the same version
|
||||
- Deleted packages on NuGet.org reserve the version number
|
||||
- The workflow requires the `NUGET_API_KEY` secret to be set
|
||||
|
||||
## Success Criteria
|
||||
|
||||
The workflow is considered successful if:
|
||||
|
||||
- ✅ Prerelease versions are published correctly with beta suffix
|
||||
- ✅ Tagged versions are published as stable releases
|
||||
- ✅ Build and test failures prevent publishing
|
||||
- ✅ Duplicate packages are handled gracefully
|
||||
- ✅ Workflow logs are clear and informative
|
||||
6
.github/workflows/dotnetcore.yml
vendored
6
.github/workflows/dotnetcore.yml
vendored
@@ -14,12 +14,12 @@ jobs:
|
||||
os: [windows-latest, ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-dotnet@v5
|
||||
with:
|
||||
dotnet-version: 8.0.x
|
||||
dotnet-version: 10.0.x
|
||||
- run: dotnet run --project build/build.csproj
|
||||
- uses: actions/upload-artifact@v5
|
||||
- uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: ${{ matrix.os }}-sharpcompress.nupkg
|
||||
path: artifacts/*
|
||||
|
||||
57
.github/workflows/nuget-release.yml
vendored
Normal file
57
.github/workflows/nuget-release.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: NuGet Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'release'
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-and-publish:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [windows-latest, ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for versioning
|
||||
|
||||
- uses: actions/setup-dotnet@v5
|
||||
with:
|
||||
dotnet-version: 10.0.x
|
||||
|
||||
# Determine version using C# build target
|
||||
- name: Determine Version
|
||||
id: version
|
||||
run: dotnet run --project build/build.csproj -- determine-version
|
||||
|
||||
# Update version in project file using C# build target
|
||||
- name: Update Version in Project
|
||||
run: dotnet run --project build/build.csproj -- update-version
|
||||
env:
|
||||
VERSION: ${{ steps.version.outputs.version }}
|
||||
|
||||
# Build and test
|
||||
- name: Build and Test
|
||||
run: dotnet run --project build/build.csproj
|
||||
|
||||
# Upload artifacts for verification
|
||||
- name: Upload NuGet Package
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: ${{ matrix.os }}-nuget-package
|
||||
path: artifacts/*.nupkg
|
||||
|
||||
# Push to NuGet.org using C# build target (Windows only)
|
||||
- name: Push to NuGet
|
||||
if: success() && matrix.os == 'windows-latest'
|
||||
run: dotnet run --project build/build.csproj -- push-to-nuget
|
||||
env:
|
||||
NUGET_API_KEY: ${{ secrets.NUGET_API_KEY }}
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -11,11 +11,12 @@ TestResults/
|
||||
packages/*/
|
||||
project.lock.json
|
||||
tests/TestArchives/Scratch
|
||||
tests/TestArchives/*/Scratch
|
||||
tests/TestArchives/*/Scratch2
|
||||
.vs
|
||||
tools
|
||||
.vscode
|
||||
.idea/
|
||||
artifacts/
|
||||
|
||||
.DS_Store
|
||||
*.snupkg
|
||||
/tests/TestArchives/6d23a38c-f064-4ef1-ad89-b942396f53b9/Scratch
|
||||
|
||||
9
.vscode/extensions.json
vendored
Normal file
9
.vscode/extensions.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"ms-dotnettools.csdevkit",
|
||||
"ms-dotnettools.csharp",
|
||||
"ms-dotnettools.vscode-dotnet-runtime",
|
||||
"csharpier.csharpier-vscode",
|
||||
"formulahendry.dotnet-test-explorer"
|
||||
]
|
||||
}
|
||||
97
.vscode/launch.json
vendored
Normal file
97
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Debug Tests (net10.0)",
|
||||
"type": "coreclr",
|
||||
"request": "launch",
|
||||
"preLaunchTask": "build",
|
||||
"program": "dotnet",
|
||||
"args": [
|
||||
"test",
|
||||
"${workspaceFolder}/tests/SharpCompress.Test/SharpCompress.Test.csproj",
|
||||
"-f",
|
||||
"net10.0",
|
||||
"--no-build",
|
||||
"--verbosity=normal"
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"console": "internalConsole",
|
||||
"stopAtEntry": false
|
||||
},
|
||||
{
|
||||
"name": "Debug Specific Test (net10.0)",
|
||||
"type": "coreclr",
|
||||
"request": "launch",
|
||||
"preLaunchTask": "build",
|
||||
"program": "dotnet",
|
||||
"args": [
|
||||
"test",
|
||||
"${workspaceFolder}/tests/SharpCompress.Test/SharpCompress.Test.csproj",
|
||||
"-f",
|
||||
"net10.0",
|
||||
"--no-build",
|
||||
"--filter",
|
||||
"FullyQualifiedName~${input:testName}"
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"console": "internalConsole",
|
||||
"stopAtEntry": false
|
||||
},
|
||||
{
|
||||
"name": "Debug Performance Tests",
|
||||
"type": "coreclr",
|
||||
"request": "launch",
|
||||
"preLaunchTask": "build",
|
||||
"program": "dotnet",
|
||||
"args": [
|
||||
"run",
|
||||
"--project",
|
||||
"${workspaceFolder}/tests/SharpCompress.Performance/SharpCompress.Performance.csproj",
|
||||
"--no-build"
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"console": "internalConsole",
|
||||
"stopAtEntry": false
|
||||
},
|
||||
{
|
||||
"name": "Debug Build Script",
|
||||
"type": "coreclr",
|
||||
"request": "launch",
|
||||
"program": "dotnet",
|
||||
"args": [
|
||||
"run",
|
||||
"--project",
|
||||
"${workspaceFolder}/build/build.csproj",
|
||||
"--",
|
||||
"${input:buildTarget}"
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"console": "internalConsole",
|
||||
"stopAtEntry": false
|
||||
}
|
||||
],
|
||||
"inputs": [
|
||||
{
|
||||
"id": "testName",
|
||||
"type": "promptString",
|
||||
"description": "Enter test name or pattern (e.g., TestMethodName or ClassName)",
|
||||
"default": ""
|
||||
},
|
||||
{
|
||||
"id": "buildTarget",
|
||||
"type": "pickString",
|
||||
"description": "Select build target",
|
||||
"options": [
|
||||
"clean",
|
||||
"restore",
|
||||
"build",
|
||||
"test",
|
||||
"format",
|
||||
"publish",
|
||||
"default"
|
||||
],
|
||||
"default": "build"
|
||||
}
|
||||
]
|
||||
}
|
||||
29
.vscode/settings.json
vendored
Normal file
29
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"dotnet.defaultSolution": "SharpCompress.sln",
|
||||
"files.exclude": {
|
||||
"**/bin": true,
|
||||
"**/obj": true
|
||||
},
|
||||
"files.watcherExclude": {
|
||||
"**/bin/**": true,
|
||||
"**/obj/**": true,
|
||||
"**/artifacts/**": true
|
||||
},
|
||||
"search.exclude": {
|
||||
"**/bin": true,
|
||||
"**/obj": true,
|
||||
"**/artifacts": true
|
||||
},
|
||||
"editor.formatOnSave": false,
|
||||
"[csharp]": {
|
||||
"editor.defaultFormatter": "csharpier.csharpier-vscode",
|
||||
"editor.formatOnSave": true,
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.fixAll": "explicit"
|
||||
}
|
||||
},
|
||||
"csharpier.enableDebugLogs": false,
|
||||
"omnisharp.enableRoslynAnalyzers": true,
|
||||
"omnisharp.enableEditorConfigSupport": true,
|
||||
"dotnet-test-explorer.testProjectPath": "tests/**/*.csproj"
|
||||
}
|
||||
178
.vscode/tasks.json
vendored
Normal file
178
.vscode/tasks.json
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "build",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"build",
|
||||
"${workspaceFolder}/SharpCompress.sln",
|
||||
"/property:GenerateFullPaths=true",
|
||||
"/consoleloggerparameters:NoSummary;ForceNoAlign"
|
||||
],
|
||||
"problemMatcher": "$msCompile",
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"label": "build-release",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"build",
|
||||
"${workspaceFolder}/SharpCompress.sln",
|
||||
"-c",
|
||||
"Release",
|
||||
"/property:GenerateFullPaths=true",
|
||||
"/consoleloggerparameters:NoSummary;ForceNoAlign"
|
||||
],
|
||||
"problemMatcher": "$msCompile",
|
||||
"group": "build"
|
||||
},
|
||||
{
|
||||
"label": "build-library",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"build",
|
||||
"${workspaceFolder}/src/SharpCompress/SharpCompress.csproj",
|
||||
"/property:GenerateFullPaths=true",
|
||||
"/consoleloggerparameters:NoSummary;ForceNoAlign"
|
||||
],
|
||||
"problemMatcher": "$msCompile",
|
||||
"group": "build"
|
||||
},
|
||||
{
|
||||
"label": "restore",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"restore",
|
||||
"${workspaceFolder}/SharpCompress.sln"
|
||||
],
|
||||
"problemMatcher": "$msCompile"
|
||||
},
|
||||
{
|
||||
"label": "clean",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"clean",
|
||||
"${workspaceFolder}/SharpCompress.sln"
|
||||
],
|
||||
"problemMatcher": "$msCompile"
|
||||
},
|
||||
{
|
||||
"label": "test",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"test",
|
||||
"${workspaceFolder}/tests/SharpCompress.Test/SharpCompress.Test.csproj",
|
||||
"--no-build",
|
||||
"--verbosity=normal"
|
||||
],
|
||||
"problemMatcher": "$msCompile",
|
||||
"group": {
|
||||
"kind": "test",
|
||||
"isDefault": true
|
||||
},
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test-net10",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"test",
|
||||
"${workspaceFolder}/tests/SharpCompress.Test/SharpCompress.Test.csproj",
|
||||
"-f",
|
||||
"net10.0",
|
||||
"--no-build",
|
||||
"--verbosity=normal"
|
||||
],
|
||||
"problemMatcher": "$msCompile",
|
||||
"group": "test",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "test-net48",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"test",
|
||||
"${workspaceFolder}/tests/SharpCompress.Test/SharpCompress.Test.csproj",
|
||||
"-f",
|
||||
"net48",
|
||||
"--no-build",
|
||||
"--verbosity=normal"
|
||||
],
|
||||
"problemMatcher": "$msCompile",
|
||||
"group": "test",
|
||||
"dependsOn": "build"
|
||||
},
|
||||
{
|
||||
"label": "format",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"csharpier",
|
||||
"."
|
||||
],
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"label": "format-check",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"csharpier",
|
||||
"check",
|
||||
"."
|
||||
],
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"label": "run-build-script",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"run",
|
||||
"--project",
|
||||
"${workspaceFolder}/build/build.csproj"
|
||||
],
|
||||
"problemMatcher": "$msCompile"
|
||||
},
|
||||
{
|
||||
"label": "pack",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"pack",
|
||||
"${workspaceFolder}/src/SharpCompress/SharpCompress.csproj",
|
||||
"-c",
|
||||
"Release",
|
||||
"-o",
|
||||
"${workspaceFolder}/artifacts/"
|
||||
],
|
||||
"problemMatcher": "$msCompile",
|
||||
"dependsOn": "build-release"
|
||||
},
|
||||
{
|
||||
"label": "performance-tests",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"run",
|
||||
"--project",
|
||||
"${workspaceFolder}/tests/SharpCompress.Performance/SharpCompress.Performance.csproj",
|
||||
"-c",
|
||||
"Release"
|
||||
],
|
||||
"problemMatcher": "$msCompile"
|
||||
}
|
||||
]
|
||||
}
|
||||
71
AGENTS.md
71
AGENTS.md
@@ -28,14 +28,38 @@ SharpCompress is a pure C# compression library supporting multiple archive forma
|
||||
|
||||
## Code Formatting
|
||||
|
||||
**Copilot agents: You MUST run the `format` task after making code changes to ensure consistency.**
|
||||
|
||||
- Use CSharpier for code formatting to ensure consistent style across the project
|
||||
- CSharpier is configured as a local tool in `.config/dotnet-tools.json`
|
||||
- Restore tools with: `dotnet tool restore`
|
||||
- Format files from the project root with: `dotnet csharpier .`
|
||||
- **Run `dotnet csharpier .` from the project root after making code changes before committing**
|
||||
- Configure your IDE to format on save using CSharpier for the best experience
|
||||
|
||||
### Commands
|
||||
|
||||
1. **Restore tools** (first time only):
|
||||
```bash
|
||||
dotnet tool restore
|
||||
```
|
||||
|
||||
2. **Check if files are formatted correctly** (doesn't modify files):
|
||||
```bash
|
||||
dotnet csharpier check .
|
||||
```
|
||||
- Exit code 0: All files are properly formatted
|
||||
- Exit code 1: Some files need formatting (will show which files and differences)
|
||||
|
||||
3. **Format files** (modifies files):
|
||||
```bash
|
||||
dotnet csharpier format .
|
||||
```
|
||||
- Formats all files in the project to match CSharpier style
|
||||
- Run from project root directory
|
||||
|
||||
4. **Configure your IDE** to format on save using CSharpier for the best experience
|
||||
|
||||
### Additional Notes
|
||||
- The project also uses `.editorconfig` for editor settings (indentation, encoding, etc.)
|
||||
- Let CSharpier handle code style while `.editorconfig` handles editor behavior
|
||||
- Always run `dotnet csharpier check .` before committing to verify formatting
|
||||
|
||||
## Project Setup and Structure
|
||||
|
||||
@@ -49,6 +73,30 @@ SharpCompress is a pure C# compression library supporting multiple archive forma
|
||||
- Use `dotnet test` to run tests
|
||||
- Solution file: `SharpCompress.sln`
|
||||
|
||||
### Directory Structure
|
||||
```
|
||||
src/SharpCompress/
|
||||
├── Archives/ # IArchive implementations (Zip, Tar, Rar, 7Zip, GZip)
|
||||
├── Readers/ # IReader implementations (forward-only)
|
||||
├── Writers/ # IWriter implementations (forward-only)
|
||||
├── Compressors/ # Low-level compression streams (BZip2, Deflate, LZMA, etc.)
|
||||
├── Factories/ # Format detection and factory pattern
|
||||
├── Common/ # Shared types (ArchiveType, Entry, Options)
|
||||
├── Crypto/ # Encryption implementations
|
||||
└── IO/ # Stream utilities and wrappers
|
||||
|
||||
tests/SharpCompress.Test/
|
||||
├── Zip/, Tar/, Rar/, SevenZip/, GZip/, BZip2/ # Format-specific tests
|
||||
├── TestBase.cs # Base test class with helper methods
|
||||
└── TestArchives/ # Test data (not checked into main test project)
|
||||
```
|
||||
|
||||
### Factory Pattern
|
||||
All format types implement factory interfaces (`IArchiveFactory`, `IReaderFactory`, `IWriterFactory`) for auto-detection:
|
||||
- `ReaderFactory.Open()` - Auto-detects format by probing stream
|
||||
- `WriterFactory.Open()` - Creates writer for specified `ArchiveType`
|
||||
- Factories located in: `src/SharpCompress/Factories/`
|
||||
|
||||
## Nullable Reference Types
|
||||
|
||||
- Declare variables non-nullable, and check for `null` at entry points.
|
||||
@@ -116,3 +164,18 @@ SharpCompress supports multiple archive and compression formats:
|
||||
- Use test archives from `tests/TestArchives` directory for consistency.
|
||||
- Test stream disposal and `LeaveStreamOpen` behavior.
|
||||
- Test edge cases: empty archives, large files, corrupted archives, encrypted archives.
|
||||
|
||||
### Test Organization
|
||||
- Base class: `TestBase` - Provides `TEST_ARCHIVES_PATH`, `SCRATCH_FILES_PATH`, temp directory management
|
||||
- Framework: xUnit with AwesomeAssertions
|
||||
- Test archives: `tests/TestArchives/` - Use existing archives, don't create new ones unnecessarily
|
||||
- Match naming style of nearby test files
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. **Don't mix Archive and Reader APIs** - Archive needs seekable stream, Reader doesn't
|
||||
2. **Solid archives (Rar, 7Zip)** - Use `ExtractAllEntries()` for best performance, not individual entry extraction
|
||||
3. **Stream disposal** - Always set `LeaveStreamOpen` explicitly when needed (default is to close)
|
||||
4. **Tar + non-seekable stream** - Must provide file size or it will throw
|
||||
5. **Multi-framework differences** - Some features differ between .NET Framework and modern .NET (e.g., Mono.Posix)
|
||||
6. **Format detection** - Use `ReaderFactory.Open()` for auto-detection, test with actual archive files
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
<Project>
|
||||
<ItemGroup>
|
||||
<PackageVersion Include="Bullseye" Version="6.0.0" />
|
||||
<PackageVersion Include="AwesomeAssertions" Version="9.2.1" />
|
||||
<PackageVersion Include="Bullseye" Version="6.1.0" />
|
||||
<PackageVersion Include="AwesomeAssertions" Version="9.3.0" />
|
||||
<PackageVersion Include="Glob" Version="1.1.9" />
|
||||
<PackageVersion Include="JetBrains.Profiler.SelfApi" Version="2.5.14" />
|
||||
<PackageVersion Include="Microsoft.Bcl.AsyncInterfaces" Version="8.0.0" />
|
||||
<PackageVersion Include="Microsoft.NET.Test.Sdk" Version="18.0.0" />
|
||||
<PackageVersion Include="JetBrains.Profiler.SelfApi" Version="2.5.15" />
|
||||
<PackageVersion Include="Microsoft.Bcl.AsyncInterfaces" Version="10.0.0" />
|
||||
<PackageVersion Include="Microsoft.NET.Test.Sdk" Version="18.0.1" />
|
||||
<PackageVersion Include="Mono.Posix.NETStandard" Version="1.0.0" />
|
||||
<PackageVersion Include="SimpleExec" Version="12.0.0" />
|
||||
<PackageVersion Include="SimpleExec" Version="12.1.0" />
|
||||
<PackageVersion Include="System.Text.Encoding.CodePages" Version="10.0.0" />
|
||||
<PackageVersion Include="System.Buffers" Version="4.6.1" />
|
||||
<PackageVersion Include="System.Memory" Version="4.6.3" />
|
||||
<PackageVersion Include="System.Text.Encoding.CodePages" Version="8.0.0" />
|
||||
<PackageVersion Include="xunit" Version="2.9.3" />
|
||||
<PackageVersion Include="xunit.runner.visualstudio" Version="3.1.5" />
|
||||
<PackageVersion Include="ZstdSharp.Port" Version="0.8.6" />
|
||||
<PackageVersion Include="Microsoft.NET.ILLink.Tasks" Version="10.0.0" />
|
||||
<PackageVersion Include="Microsoft.SourceLink.GitHub" Version="8.0.0" />
|
||||
<PackageVersion Include="Microsoft.NETFramework.ReferenceAssemblies" Version="1.0.3" />
|
||||
</ItemGroup>
|
||||
|
||||
21
FORMATS.md
21
FORMATS.md
@@ -22,11 +22,28 @@
|
||||
| 7Zip (4) | LZMA, LZMA2, BZip2, PPMd, BCJ, BCJ2, Deflate | Decompress | SevenZipArchive | N/A | N/A |
|
||||
|
||||
1. SOLID Rars are only supported in the RarReader API.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading/writing is supported but only with seekable streams as the Zip spec doesn't support Zip64 data in post data descriptors. Deflate64 is only supported for reading.
|
||||
2. Zip format supports pkware and WinzipAES encryption. However, encrypted LZMA is not supported. Zip64 reading/writing is supported but only with seekable streams as the Zip spec doesn't support Zip64 data in post data descriptors. Deflate64 is only supported for reading. See [Zip Format Notes](#zip-format-notes) for details on multi-volume archives and streaming behavior.
|
||||
3. The Tar format requires a file size in the header. If no size is specified to the TarWriter and the stream is not seekable, then an exception will be thrown.
|
||||
4. The 7Zip format doesn't allow for reading as a forward-only stream so 7Zip is only supported through the Archive API
|
||||
4. The 7Zip format doesn't allow for reading as a forward-only stream so 7Zip is only supported through the Archive API. See [7Zip Format Notes](#7zip-format-notes) for details on async extraction behavior.
|
||||
5. LZip has no support for extra data like the file name or timestamp. There is a default filename used when looking at the entry Key on the archive.
|
||||
|
||||
### Zip Format Notes
|
||||
|
||||
- Multi-volume/split ZIP archives require ZipArchive (seekable streams) as ZipReader cannot seek across volume files.
|
||||
- ZipReader processes entries from LocalEntry headers (which include directory entries ending with `/`) and intentionally skips DirectoryEntry headers from the central directory, as they are redundant in streaming mode - all entry data comes from LocalEntry headers which ZipReader has already processed.
|
||||
|
||||
### 7Zip Format Notes
|
||||
|
||||
- **Async Extraction Performance**: When using async extraction methods (e.g., `ExtractAllEntries()` with `MoveToNextEntryAsync()`), each file creates its own decompression stream to avoid state corruption in the LZMA decoder. This is less efficient than synchronous extraction, which can reuse a single decompression stream for multiple files in the same folder.
|
||||
|
||||
**Performance Impact**: For archives with many small files in the same compression folder, async extraction will be slower than synchronous extraction because it must:
|
||||
1. Create a new LZMA decoder for each file
|
||||
2. Skip through the decompressed data to reach each file's starting position
|
||||
|
||||
**Recommendation**: For best performance with 7Zip archives, use synchronous extraction methods (`MoveToNextEntry()` and `WriteEntryToDirectory()`) when possible. Use async methods only when you need to avoid blocking the thread (e.g., in UI applications or async-only contexts).
|
||||
|
||||
**Technical Details**: 7Zip archives group files into "folders" (compression units), where all files in a folder share one continuous LZMA-compressed stream. The LZMA decoder maintains internal state (dictionary window, decoder positions) that assumes sequential, non-interruptible processing. Async operations can yield control during awaits, which would corrupt this shared state. To avoid this, async extraction creates a fresh decoder stream for each file.
|
||||
|
||||
## Compression Streams
|
||||
|
||||
For those who want to directly compress/decompress bits. The single file formats are represented here as well. However, BZip2, LZip and XZ have no metadata (GZip has a little) so using them without something like a Tar file makes little sense.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# SharpCompress
|
||||
|
||||
SharpCompress is a compression library in pure C# for .NET Framework 4.62, .NET Standard 2.1, .NET 6.0 and NET 8.0 that can unrar, un7zip, unzip, untar unbzip2, ungzip, unlzip, unzstd with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip/lzip are implemented.
|
||||
SharpCompress is a compression library in pure C# for .NET Framework 4.8, .NET 8.0 and .NET 10.0 that can unrar, un7zip, unzip, untar unbzip2, ungzip, unlzip, unzstd, unarc and unarj with forward-only reading and file random access APIs. Write support for zip/tar/bzip2/gzip/lzip are implemented.
|
||||
|
||||
The major feature is support for non-seekable streams so large files can be processed on the fly (i.e. download stream).
|
||||
|
||||
|
||||
64
USAGE.md
64
USAGE.md
@@ -87,20 +87,17 @@ memoryStream.Position = 0;
|
||||
### Extract all files from a rar file to a directory using RarArchive
|
||||
|
||||
Note: Extracting a solid rar or 7z file needs to be done in sequential order to get acceptable decompression speed.
|
||||
It is explicitly recommended to use `ExtractAllEntries` when extracting an entire `IArchive` instead of iterating over all its `Entries`.
|
||||
Alternatively, use `IArchive.WriteToDirectory`.
|
||||
`ExtractAllEntries` is primarily intended for solid archives (like solid Rar) or 7Zip archives, where sequential extraction provides the best performance. For general/simple extraction with any supported archive type, use `archive.WriteToDirectory()` instead.
|
||||
|
||||
```C#
|
||||
using (var archive = RarArchive.Open("Test.rar"))
|
||||
{
|
||||
using (var reader = archive.ExtractAllEntries())
|
||||
// Simple extraction with RarArchive; this WriteToDirectory pattern works for all archive types
|
||||
archive.WriteToDirectory(@"D:\temp", new ExtractionOptions()
|
||||
{
|
||||
reader.WriteAllToDirectory(@"D:\temp", new ExtractionOptions()
|
||||
{
|
||||
ExtractFullPath = true,
|
||||
Overwrite = true
|
||||
});
|
||||
}
|
||||
ExtractFullPath = true,
|
||||
Overwrite = true
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
@@ -116,6 +113,41 @@ using (var archive = RarArchive.Open("Test.rar"))
|
||||
}
|
||||
```
|
||||
|
||||
### Extract solid Rar or 7Zip archives with manual progress reporting
|
||||
|
||||
`ExtractAllEntries` only works for solid archives (Rar) or 7Zip archives. For optimal performance with these archive types, use this method:
|
||||
|
||||
```C#
|
||||
using (var archive = RarArchive.Open("archive.rar")) // Must be solid Rar or 7Zip
|
||||
{
|
||||
if (archive.IsSolid || archive.Type == ArchiveType.SevenZip)
|
||||
{
|
||||
// Calculate total size for progress reporting
|
||||
double totalSize = archive.Entries.Where(e => !e.IsDirectory).Sum(e => e.Size);
|
||||
long completed = 0;
|
||||
|
||||
using (var reader = archive.ExtractAllEntries())
|
||||
{
|
||||
while (reader.MoveToNextEntry())
|
||||
{
|
||||
if (!reader.Entry.IsDirectory)
|
||||
{
|
||||
reader.WriteEntryToDirectory(@"D:\output", new ExtractionOptions()
|
||||
{
|
||||
ExtractFullPath = true,
|
||||
Overwrite = true
|
||||
});
|
||||
|
||||
completed += reader.Entry.Size;
|
||||
double progress = completed / totalSize;
|
||||
Console.WriteLine($"Progress: {progress:P}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Use ReaderFactory to autodetect archive type and Open the entry stream
|
||||
|
||||
```C#
|
||||
@@ -298,14 +330,12 @@ using (var writer = WriterFactory.Open(stream, ArchiveType.Zip, CompressionType.
|
||||
```C#
|
||||
using (var archive = ZipArchive.Open("archive.zip"))
|
||||
{
|
||||
using (var reader = archive.ExtractAllEntries())
|
||||
{
|
||||
await reader.WriteAllToDirectoryAsync(
|
||||
@"C:\output",
|
||||
new ExtractionOptions() { ExtractFullPath = true, Overwrite = true },
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
// Simple async extraction - works for all archive types
|
||||
await archive.WriteToDirectoryAsync(
|
||||
@"C:\output",
|
||||
new ExtractionOptions() { ExtractFullPath = true, Overwrite = true },
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
177
build/Program.cs
177
build/Program.cs
@@ -1,7 +1,10 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Runtime.InteropServices;
|
||||
using System.Text.RegularExpressions;
|
||||
using System.Threading.Tasks;
|
||||
using GlobExpressions;
|
||||
using static Bullseye.Targets;
|
||||
using static SimpleExec.Command;
|
||||
@@ -11,7 +14,11 @@ const string Restore = "restore";
|
||||
const string Build = "build";
|
||||
const string Test = "test";
|
||||
const string Format = "format";
|
||||
const string CheckFormat = "check-format";
|
||||
const string Publish = "publish";
|
||||
const string DetermineVersion = "determine-version";
|
||||
const string UpdateVersion = "update-version";
|
||||
const string PushToNuGet = "push-to-nuget";
|
||||
|
||||
Target(
|
||||
Clean,
|
||||
@@ -42,12 +49,20 @@ Target(
|
||||
Target(
|
||||
Format,
|
||||
() =>
|
||||
{
|
||||
Run("dotnet", "tool restore");
|
||||
Run("dotnet", "csharpier format .");
|
||||
}
|
||||
);
|
||||
Target(
|
||||
CheckFormat,
|
||||
() =>
|
||||
{
|
||||
Run("dotnet", "tool restore");
|
||||
Run("dotnet", "csharpier check .");
|
||||
}
|
||||
);
|
||||
Target(Restore, [Format], () => Run("dotnet", "restore"));
|
||||
Target(Restore, [CheckFormat], () => Run("dotnet", "restore"));
|
||||
|
||||
Target(
|
||||
Build,
|
||||
@@ -61,7 +76,7 @@ Target(
|
||||
Target(
|
||||
Test,
|
||||
[Build],
|
||||
["net8.0", "net48"],
|
||||
["net10.0", "net48"],
|
||||
framework =>
|
||||
{
|
||||
IEnumerable<string> GetFiles(string d)
|
||||
@@ -90,6 +105,164 @@ Target(
|
||||
}
|
||||
);
|
||||
|
||||
Target(
|
||||
DetermineVersion,
|
||||
async () =>
|
||||
{
|
||||
var (version, isPrerelease) = await GetVersion();
|
||||
Console.WriteLine($"VERSION={version}");
|
||||
Console.WriteLine($"PRERELEASE={isPrerelease.ToString().ToLower()}");
|
||||
|
||||
// Write to environment file for GitHub Actions
|
||||
var githubOutput = Environment.GetEnvironmentVariable("GITHUB_OUTPUT");
|
||||
if (!string.IsNullOrEmpty(githubOutput))
|
||||
{
|
||||
File.AppendAllText(githubOutput, $"version={version}\n");
|
||||
File.AppendAllText(githubOutput, $"prerelease={isPrerelease.ToString().ToLower()}\n");
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
Target(
|
||||
UpdateVersion,
|
||||
async () =>
|
||||
{
|
||||
var version = Environment.GetEnvironmentVariable("VERSION");
|
||||
if (string.IsNullOrEmpty(version))
|
||||
{
|
||||
var (detectedVersion, _) = await GetVersion();
|
||||
version = detectedVersion;
|
||||
}
|
||||
|
||||
Console.WriteLine($"Updating project file with version: {version}");
|
||||
|
||||
var projectPath = "src/SharpCompress/SharpCompress.csproj";
|
||||
var content = File.ReadAllText(projectPath);
|
||||
|
||||
// Get base version (without prerelease suffix)
|
||||
var baseVersion = version.Split('-')[0];
|
||||
|
||||
// Update VersionPrefix
|
||||
content = Regex.Replace(
|
||||
content,
|
||||
@"<VersionPrefix>[^<]*</VersionPrefix>",
|
||||
$"<VersionPrefix>{version}</VersionPrefix>"
|
||||
);
|
||||
|
||||
// Update AssemblyVersion
|
||||
content = Regex.Replace(
|
||||
content,
|
||||
@"<AssemblyVersion>[^<]*</AssemblyVersion>",
|
||||
$"<AssemblyVersion>{baseVersion}</AssemblyVersion>"
|
||||
);
|
||||
|
||||
// Update FileVersion
|
||||
content = Regex.Replace(
|
||||
content,
|
||||
@"<FileVersion>[^<]*</FileVersion>",
|
||||
$"<FileVersion>{baseVersion}</FileVersion>"
|
||||
);
|
||||
|
||||
File.WriteAllText(projectPath, content);
|
||||
Console.WriteLine($"Updated VersionPrefix to: {version}");
|
||||
Console.WriteLine($"Updated AssemblyVersion and FileVersion to: {baseVersion}");
|
||||
}
|
||||
);
|
||||
|
||||
Target(
|
||||
PushToNuGet,
|
||||
() =>
|
||||
{
|
||||
var apiKey = Environment.GetEnvironmentVariable("NUGET_API_KEY");
|
||||
if (string.IsNullOrEmpty(apiKey))
|
||||
{
|
||||
Console.WriteLine(
|
||||
"NUGET_API_KEY environment variable is not set. Skipping NuGet push."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
var packages = Directory.GetFiles("artifacts", "*.nupkg");
|
||||
if (packages.Length == 0)
|
||||
{
|
||||
Console.WriteLine("No packages found in artifacts directory.");
|
||||
return;
|
||||
}
|
||||
|
||||
foreach (var package in packages)
|
||||
{
|
||||
Console.WriteLine($"Pushing {package} to NuGet.org");
|
||||
try
|
||||
{
|
||||
// Note: API key is passed via command line argument which is standard practice for dotnet nuget push
|
||||
// The key is already in an environment variable and not displayed in normal output
|
||||
Run(
|
||||
"dotnet",
|
||||
$"nuget push \"{package}\" --api-key {apiKey} --source https://api.nuget.org/v3/index.json --skip-duplicate"
|
||||
);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
Console.WriteLine($"Failed to push {package}: {ex.Message}");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
Target("default", [Publish], () => Console.WriteLine("Done!"));
|
||||
|
||||
await RunTargetsAndExitAsync(args);
|
||||
|
||||
static async Task<(string version, bool isPrerelease)> GetVersion()
|
||||
{
|
||||
// Check if current commit has a version tag
|
||||
var currentTag = (await GetGitOutput("tag", "--points-at HEAD"))
|
||||
.Split('\n', StringSplitOptions.RemoveEmptyEntries)
|
||||
.FirstOrDefault(tag => Regex.IsMatch(tag.Trim(), @"^\d+\.\d+\.\d+$"));
|
||||
|
||||
if (!string.IsNullOrEmpty(currentTag))
|
||||
{
|
||||
// Tagged release - use the tag as version
|
||||
var version = currentTag.Trim();
|
||||
Console.WriteLine($"Building tagged release version: {version}");
|
||||
return (version, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Not tagged - create prerelease version based on next minor version
|
||||
var allTags = (await GetGitOutput("tag", "--list"))
|
||||
.Split('\n', StringSplitOptions.RemoveEmptyEntries)
|
||||
.Where(tag => Regex.IsMatch(tag.Trim(), @"^\d+\.\d+\.\d+$"))
|
||||
.Select(tag => tag.Trim())
|
||||
.ToList();
|
||||
|
||||
var lastTag = allTags.OrderBy(tag => Version.Parse(tag)).LastOrDefault() ?? "0.0.0";
|
||||
var lastVersion = Version.Parse(lastTag);
|
||||
|
||||
// Increment minor version for next release
|
||||
var nextVersion = new Version(lastVersion.Major, lastVersion.Minor + 1, 0);
|
||||
|
||||
// Use commit count since the last version tag if available; otherwise, fall back to total count
|
||||
var revListArgs = allTags.Any() ? $"--count {lastTag}..HEAD" : "--count HEAD";
|
||||
var commitCount = (await GetGitOutput("rev-list", revListArgs)).Trim();
|
||||
|
||||
var version = $"{nextVersion}-beta.{commitCount}";
|
||||
Console.WriteLine($"Building prerelease version: {version}");
|
||||
return (version, true);
|
||||
}
|
||||
}
|
||||
|
||||
static async Task<string> GetGitOutput(string command, string args)
|
||||
{
|
||||
try
|
||||
{
|
||||
// Use SimpleExec's Read to execute git commands in a cross-platform way
|
||||
var (output, _) = await ReadAsync("git", $"{command} {args}");
|
||||
return output;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new Exception($"Git command failed: git {command} {args}\n{ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<OutputType>Exe</OutputType>
|
||||
<TargetFramework>net8.0</TargetFramework>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Bullseye" />
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"version": 2,
|
||||
"dependencies": {
|
||||
"net8.0": {
|
||||
"net10.0": {
|
||||
"Bullseye": {
|
||||
"type": "Direct",
|
||||
"requested": "[6.0.0, )",
|
||||
"resolved": "6.0.0",
|
||||
"contentHash": "vgwwXfzs7jJrskWH7saHRMgPzziq/e86QZNWY1MnMxd7e+De7E7EX4K3C7yrvaK9y02SJoLxNxcLG/q5qUAghw=="
|
||||
"requested": "[6.1.0, )",
|
||||
"resolved": "6.1.0",
|
||||
"contentHash": "fltnAJDe0BEX5eymXGUq+il2rSUA0pHqUonNDRH2TrvRu8SkU17mYG0IVpdmG2ibtfhdjNrv4CuTCxHOwcozCA=="
|
||||
},
|
||||
"Glob": {
|
||||
"type": "Direct",
|
||||
@@ -16,9 +16,9 @@
|
||||
},
|
||||
"SimpleExec": {
|
||||
"type": "Direct",
|
||||
"requested": "[12.0.0, )",
|
||||
"resolved": "12.0.0",
|
||||
"contentHash": "ptxlWtxC8vM6Y6e3h9ZTxBBkOWnWrm/Sa1HT+2i1xcXY3Hx2hmKDZP5RShPf8Xr9D+ivlrXNy57ktzyH8kyt+Q=="
|
||||
"requested": "[12.1.0, )",
|
||||
"resolved": "12.1.0",
|
||||
"contentHash": "PcCSAlMcKr5yTd571MgEMoGmoSr+omwziq2crB47lKP740lrmjuBocAUXHj+Q6LR6aUDFyhszot2wbtFJTClkA=="
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"sdk": {
|
||||
"version": "8.0.100",
|
||||
"version": "10.0.100",
|
||||
"rollForward": "latestFeature"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Archives;
|
||||
|
||||
public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtractionListener
|
||||
public abstract class AbstractArchive<TEntry, TVolume> : IArchive
|
||||
where TEntry : IArchiveEntry
|
||||
where TVolume : IVolume
|
||||
{
|
||||
@@ -17,11 +17,6 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
private bool _disposed;
|
||||
private readonly SourceStream? _sourceStream;
|
||||
|
||||
public event EventHandler<ArchiveExtractionEventArgs<IArchiveEntry>>? EntryExtractionBegin;
|
||||
public event EventHandler<ArchiveExtractionEventArgs<IArchiveEntry>>? EntryExtractionEnd;
|
||||
|
||||
public event EventHandler<CompressedBytesReadEventArgs>? CompressedBytesRead;
|
||||
public event EventHandler<FilePartExtractionBeginEventArgs>? FilePartExtractionBegin;
|
||||
protected ReaderOptions ReaderOptions { get; }
|
||||
|
||||
internal AbstractArchive(ArchiveType type, SourceStream sourceStream)
|
||||
@@ -43,12 +38,6 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
|
||||
public ArchiveType Type { get; }
|
||||
|
||||
void IArchiveExtractionListener.FireEntryExtractionBegin(IArchiveEntry entry) =>
|
||||
EntryExtractionBegin?.Invoke(this, new ArchiveExtractionEventArgs<IArchiveEntry>(entry));
|
||||
|
||||
void IArchiveExtractionListener.FireEntryExtractionEnd(IArchiveEntry entry) =>
|
||||
EntryExtractionEnd?.Invoke(this, new ArchiveExtractionEventArgs<IArchiveEntry>(entry));
|
||||
|
||||
private static Stream CheckStreams(Stream stream)
|
||||
{
|
||||
if (!stream.CanSeek || !stream.CanRead)
|
||||
@@ -99,38 +88,12 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
}
|
||||
}
|
||||
|
||||
void IArchiveExtractionListener.EnsureEntriesLoaded()
|
||||
private void EnsureEntriesLoaded()
|
||||
{
|
||||
_lazyEntries.EnsureFullyLoaded();
|
||||
_lazyVolumes.EnsureFullyLoaded();
|
||||
}
|
||||
|
||||
void IExtractionListener.FireCompressedBytesRead(
|
||||
long currentPartCompressedBytes,
|
||||
long compressedReadBytes
|
||||
) =>
|
||||
CompressedBytesRead?.Invoke(
|
||||
this,
|
||||
new CompressedBytesReadEventArgs(
|
||||
currentFilePartCompressedBytesRead: currentPartCompressedBytes,
|
||||
compressedBytesRead: compressedReadBytes
|
||||
)
|
||||
);
|
||||
|
||||
void IExtractionListener.FireFilePartExtractionBegin(
|
||||
string name,
|
||||
long size,
|
||||
long compressedSize
|
||||
) =>
|
||||
FilePartExtractionBegin?.Invoke(
|
||||
this,
|
||||
new FilePartExtractionBeginEventArgs(
|
||||
compressedSize: compressedSize,
|
||||
size: size,
|
||||
name: name
|
||||
)
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Use this method to extract all entries in an archive in order.
|
||||
/// This is primarily for SOLID Rar Archives or 7Zip Archives as they need to be
|
||||
@@ -146,11 +109,11 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
{
|
||||
if (!IsSolid && Type != ArchiveType.SevenZip)
|
||||
{
|
||||
throw new InvalidOperationException(
|
||||
throw new SharpCompressException(
|
||||
"ExtractAllEntries can only be used on solid archives or 7Zip archives (which require random access)."
|
||||
);
|
||||
}
|
||||
((IArchiveExtractionListener)this).EnsureEntriesLoaded();
|
||||
EnsureEntriesLoaded();
|
||||
return CreateReaderForSolidExtraction();
|
||||
}
|
||||
|
||||
@@ -161,6 +124,11 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
/// </summary>
|
||||
public virtual bool IsSolid => false;
|
||||
|
||||
/// <summary>
|
||||
/// Archive is ENCRYPTED (this means the Archive has password-protected files).
|
||||
/// </summary>
|
||||
public virtual bool IsEncrypted => false;
|
||||
|
||||
/// <summary>
|
||||
/// The archive can find all the parts of the archive needed to fully extract the archive. This forces the parsing of the entire archive.
|
||||
/// </summary>
|
||||
@@ -168,7 +136,7 @@ public abstract class AbstractArchive<TEntry, TVolume> : IArchive, IArchiveExtra
|
||||
{
|
||||
get
|
||||
{
|
||||
((IArchiveExtractionListener)this).EnsureEntriesLoaded();
|
||||
EnsureEntriesLoaded();
|
||||
return Entries.All(x => x.IsComplete);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ public static class ArchiveFactory
|
||||
public static IArchive Open(Stream stream, ReaderOptions? readerOptions = null)
|
||||
{
|
||||
readerOptions ??= new ReaderOptions();
|
||||
stream = new SharpCompressStream(stream, bufferSize: readerOptions.BufferSize);
|
||||
stream = SharpCompressStream.Create(stream, bufferSize: readerOptions.BufferSize);
|
||||
return FindFactory<IArchiveFactory>(stream).Open(stream, readerOptions);
|
||||
}
|
||||
|
||||
|
||||
@@ -7,12 +7,6 @@ namespace SharpCompress.Archives;
|
||||
|
||||
public interface IArchive : IDisposable
|
||||
{
|
||||
event EventHandler<ArchiveExtractionEventArgs<IArchiveEntry>> EntryExtractionBegin;
|
||||
event EventHandler<ArchiveExtractionEventArgs<IArchiveEntry>> EntryExtractionEnd;
|
||||
|
||||
event EventHandler<CompressedBytesReadEventArgs> CompressedBytesRead;
|
||||
event EventHandler<FilePartExtractionBeginEventArgs> FilePartExtractionBegin;
|
||||
|
||||
IEnumerable<IArchiveEntry> Entries { get; }
|
||||
IEnumerable<IVolume> Volumes { get; }
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
@@ -8,126 +9,153 @@ namespace SharpCompress.Archives;
|
||||
|
||||
public static class IArchiveEntryExtensions
|
||||
{
|
||||
public static void WriteTo(this IArchiveEntry archiveEntry, Stream streamToWriteTo)
|
||||
private const int BufferSize = 81920;
|
||||
|
||||
/// <param name="archiveEntry">The archive entry to extract.</param>
|
||||
extension(IArchiveEntry archiveEntry)
|
||||
{
|
||||
if (archiveEntry.IsDirectory)
|
||||
/// <summary>
|
||||
/// Extract entry to the specified stream.
|
||||
/// </summary>
|
||||
/// <param name="streamToWriteTo">The stream to write the entry content to.</param>
|
||||
/// <param name="progress">Optional progress reporter for tracking extraction progress.</param>
|
||||
public void WriteTo(Stream streamToWriteTo, IProgress<ProgressReport>? progress = null)
|
||||
{
|
||||
throw new ExtractionException("Entry is a file directory and cannot be extracted.");
|
||||
if (archiveEntry.IsDirectory)
|
||||
{
|
||||
throw new ExtractionException("Entry is a file directory and cannot be extracted.");
|
||||
}
|
||||
|
||||
using var entryStream = archiveEntry.OpenEntryStream();
|
||||
var sourceStream = WrapWithProgress(entryStream, archiveEntry, progress);
|
||||
sourceStream.CopyTo(streamToWriteTo, BufferSize);
|
||||
}
|
||||
|
||||
var streamListener = (IArchiveExtractionListener)archiveEntry.Archive;
|
||||
streamListener.EnsureEntriesLoaded();
|
||||
streamListener.FireEntryExtractionBegin(archiveEntry);
|
||||
streamListener.FireFilePartExtractionBegin(
|
||||
archiveEntry.Key ?? "Key",
|
||||
archiveEntry.Size,
|
||||
archiveEntry.CompressedSize
|
||||
);
|
||||
var entryStream = archiveEntry.OpenEntryStream();
|
||||
using (entryStream)
|
||||
/// <summary>
|
||||
/// Extract entry to the specified stream asynchronously.
|
||||
/// </summary>
|
||||
/// <param name="streamToWriteTo">The stream to write the entry content to.</param>
|
||||
/// <param name="cancellationToken">Cancellation token.</param>
|
||||
/// <param name="progress">Optional progress reporter for tracking extraction progress.</param>
|
||||
public async Task WriteToAsync(
|
||||
Stream streamToWriteTo,
|
||||
IProgress<ProgressReport>? progress = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
using Stream s = new ListeningStream(streamListener, entryStream);
|
||||
s.CopyTo(streamToWriteTo);
|
||||
if (archiveEntry.IsDirectory)
|
||||
{
|
||||
throw new ExtractionException("Entry is a file directory and cannot be extracted.");
|
||||
}
|
||||
|
||||
using var entryStream = await archiveEntry.OpenEntryStreamAsync(cancellationToken);
|
||||
var sourceStream = WrapWithProgress(entryStream, archiveEntry, progress);
|
||||
await sourceStream
|
||||
.CopyToAsync(streamToWriteTo, BufferSize, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
streamListener.FireEntryExtractionEnd(archiveEntry);
|
||||
}
|
||||
|
||||
public static async Task WriteToAsync(
|
||||
this IArchiveEntry archiveEntry,
|
||||
Stream streamToWriteTo,
|
||||
CancellationToken cancellationToken = default
|
||||
private static Stream WrapWithProgress(
|
||||
Stream source,
|
||||
IArchiveEntry entry,
|
||||
IProgress<ProgressReport>? progress
|
||||
)
|
||||
{
|
||||
if (archiveEntry.IsDirectory)
|
||||
if (progress is null)
|
||||
{
|
||||
throw new ExtractionException("Entry is a file directory and cannot be extracted.");
|
||||
return source;
|
||||
}
|
||||
|
||||
var streamListener = (IArchiveExtractionListener)archiveEntry.Archive;
|
||||
streamListener.EnsureEntriesLoaded();
|
||||
streamListener.FireEntryExtractionBegin(archiveEntry);
|
||||
streamListener.FireFilePartExtractionBegin(
|
||||
archiveEntry.Key ?? "Key",
|
||||
archiveEntry.Size,
|
||||
archiveEntry.CompressedSize
|
||||
var entryPath = entry.Key ?? string.Empty;
|
||||
var totalBytes = GetEntrySizeSafe(entry);
|
||||
return new ProgressReportingStream(
|
||||
source,
|
||||
progress,
|
||||
entryPath,
|
||||
totalBytes,
|
||||
leaveOpen: true
|
||||
);
|
||||
var entryStream = archiveEntry.OpenEntryStream();
|
||||
using (entryStream)
|
||||
{
|
||||
using Stream s = new ListeningStream(streamListener, entryStream);
|
||||
await s.CopyToAsync(streamToWriteTo, 81920, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
streamListener.FireEntryExtractionEnd(archiveEntry);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific directory, retaining filename
|
||||
/// </summary>
|
||||
public static void WriteToDirectory(
|
||||
this IArchiveEntry entry,
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToDirectory(
|
||||
entry,
|
||||
destinationDirectory,
|
||||
options,
|
||||
entry.WriteToFile
|
||||
);
|
||||
private static long? GetEntrySizeSafe(IArchiveEntry entry)
|
||||
{
|
||||
try
|
||||
{
|
||||
var size = entry.Size;
|
||||
return size >= 0 ? size : null;
|
||||
}
|
||||
catch (NotImplementedException)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific directory asynchronously, retaining filename
|
||||
/// </summary>
|
||||
public static Task WriteToDirectoryAsync(
|
||||
this IArchiveEntry entry,
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToDirectoryAsync(
|
||||
entry,
|
||||
destinationDirectory,
|
||||
options,
|
||||
(x, opt) => entry.WriteToFileAsync(x, opt, cancellationToken),
|
||||
cancellationToken
|
||||
);
|
||||
extension(IArchiveEntry entry)
|
||||
{
|
||||
/// <summary>
|
||||
/// Extract to specific directory, retaining filename
|
||||
/// </summary>
|
||||
public void WriteToDirectory(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToDirectory(
|
||||
entry,
|
||||
destinationDirectory,
|
||||
options,
|
||||
entry.WriteToFile
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific file
|
||||
/// </summary>
|
||||
public static void WriteToFile(
|
||||
this IArchiveEntry entry,
|
||||
string destinationFileName,
|
||||
ExtractionOptions? options = null
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToFile(
|
||||
entry,
|
||||
destinationFileName,
|
||||
options,
|
||||
(x, fm) =>
|
||||
{
|
||||
using var fs = File.Open(destinationFileName, fm);
|
||||
entry.WriteTo(fs);
|
||||
}
|
||||
);
|
||||
/// <summary>
|
||||
/// Extract to specific directory asynchronously, retaining filename
|
||||
/// </summary>
|
||||
public Task WriteToDirectoryAsync(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToDirectoryAsync(
|
||||
entry,
|
||||
destinationDirectory,
|
||||
options,
|
||||
entry.WriteToFileAsync,
|
||||
cancellationToken
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific file asynchronously
|
||||
/// </summary>
|
||||
public static Task WriteToFileAsync(
|
||||
this IArchiveEntry entry,
|
||||
string destinationFileName,
|
||||
ExtractionOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToFileAsync(
|
||||
entry,
|
||||
destinationFileName,
|
||||
options,
|
||||
async (x, fm) =>
|
||||
{
|
||||
using var fs = File.Open(destinationFileName, fm);
|
||||
await entry.WriteToAsync(fs, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
);
|
||||
/// <summary>
|
||||
/// Extract to specific file
|
||||
/// </summary>
|
||||
public void WriteToFile(string destinationFileName, ExtractionOptions? options = null) =>
|
||||
ExtractionMethods.WriteEntryToFile(
|
||||
entry,
|
||||
destinationFileName,
|
||||
options,
|
||||
(x, fm) =>
|
||||
{
|
||||
using var fs = File.Open(destinationFileName, fm);
|
||||
entry.WriteTo(fs);
|
||||
}
|
||||
);
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific file asynchronously
|
||||
/// </summary>
|
||||
public Task WriteToFileAsync(
|
||||
string destinationFileName,
|
||||
ExtractionOptions? options = null,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
ExtractionMethods.WriteEntryToFileAsync(
|
||||
entry,
|
||||
destinationFileName,
|
||||
options,
|
||||
async (x, fm, ct) =>
|
||||
{
|
||||
using var fs = File.Open(destinationFileName, fm);
|
||||
await entry.WriteToAsync(fs, null, ct).ConfigureAwait(false);
|
||||
},
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
@@ -10,76 +10,159 @@ namespace SharpCompress.Archives;
|
||||
|
||||
public static class IArchiveExtensions
|
||||
{
|
||||
/// <summary>
|
||||
/// Extract to specific directory, retaining filename
|
||||
/// </summary>
|
||||
public static void WriteToDirectory(
|
||||
this IArchive archive,
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null
|
||||
)
|
||||
{
|
||||
using var reader = archive.ExtractAllEntries();
|
||||
reader.WriteAllToDirectory(destinationDirectory, options);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Extracts the archive to the destination directory. Directories will be created as needed.
|
||||
/// </summary>
|
||||
/// <param name="archive">The archive to extract.</param>
|
||||
/// <param name="destination">The folder to extract into.</param>
|
||||
/// <param name="progressReport">Optional progress report callback.</param>
|
||||
/// <param name="cancellationToken">Optional cancellation token.</param>
|
||||
public static void ExtractToDirectory(
|
||||
this IArchive archive,
|
||||
string destination,
|
||||
Action<double>? progressReport = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
extension(IArchive archive)
|
||||
{
|
||||
// Prepare for progress reporting
|
||||
var totalBytes = archive.TotalUncompressSize;
|
||||
var bytesRead = 0L;
|
||||
|
||||
// Tracking for created directories.
|
||||
var seenDirectories = new HashSet<string>();
|
||||
|
||||
// Extract
|
||||
foreach (var entry in archive.Entries)
|
||||
/// <summary>
|
||||
/// Extract to specific directory with progress reporting
|
||||
/// </summary>
|
||||
/// <param name="destinationDirectory">The folder to extract into.</param>
|
||||
/// <param name="options">Extraction options.</param>
|
||||
/// <param name="progress">Optional progress reporter for tracking extraction progress.</param>
|
||||
public void WriteToDirectory(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null,
|
||||
IProgress<ProgressReport>? progress = null
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
if (entry.IsDirectory)
|
||||
// For solid archives (Rar, 7Zip), use the optimized reader-based approach
|
||||
if (archive.IsSolid || archive.Type == ArchiveType.SevenZip)
|
||||
{
|
||||
var dirPath = Path.Combine(destination, entry.Key.NotNull("Entry Key is null"));
|
||||
if (
|
||||
Path.GetDirectoryName(dirPath + "/") is { } emptyDirectory
|
||||
&& seenDirectories.Add(dirPath)
|
||||
)
|
||||
{
|
||||
Directory.CreateDirectory(emptyDirectory);
|
||||
}
|
||||
continue;
|
||||
using var reader = archive.ExtractAllEntries();
|
||||
reader.WriteAllToDirectory(destinationDirectory, options);
|
||||
}
|
||||
|
||||
// Create each directory if not already created
|
||||
var path = Path.Combine(destination, entry.Key.NotNull("Entry Key is null"));
|
||||
if (Path.GetDirectoryName(path) is { } directory)
|
||||
else
|
||||
{
|
||||
if (!Directory.Exists(directory) && !seenDirectories.Contains(directory))
|
||||
{
|
||||
Directory.CreateDirectory(directory);
|
||||
seenDirectories.Add(directory);
|
||||
}
|
||||
// For non-solid archives, extract entries directly
|
||||
archive.WriteToDirectoryInternal(destinationDirectory, options, progress);
|
||||
}
|
||||
}
|
||||
|
||||
// Write file
|
||||
using var fs = File.OpenWrite(path);
|
||||
entry.WriteTo(fs);
|
||||
private void WriteToDirectoryInternal(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options,
|
||||
IProgress<ProgressReport>? progress
|
||||
)
|
||||
{
|
||||
// Prepare for progress reporting
|
||||
var totalBytes = archive.TotalUncompressSize;
|
||||
var bytesRead = 0L;
|
||||
|
||||
// Update progress
|
||||
bytesRead += entry.Size;
|
||||
progressReport?.Invoke(bytesRead / (double)totalBytes);
|
||||
// Tracking for created directories.
|
||||
var seenDirectories = new HashSet<string>();
|
||||
|
||||
// Extract
|
||||
foreach (var entry in archive.Entries)
|
||||
{
|
||||
if (entry.IsDirectory)
|
||||
{
|
||||
var dirPath = Path.Combine(
|
||||
destinationDirectory,
|
||||
entry.Key.NotNull("Entry Key is null")
|
||||
);
|
||||
if (
|
||||
Path.GetDirectoryName(dirPath + "/") is { } parentDirectory
|
||||
&& seenDirectories.Add(dirPath)
|
||||
)
|
||||
{
|
||||
Directory.CreateDirectory(parentDirectory);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Use the entry's WriteToDirectory method which respects ExtractionOptions
|
||||
entry.WriteToDirectory(destinationDirectory, options);
|
||||
|
||||
// Update progress
|
||||
bytesRead += entry.Size;
|
||||
progress?.Report(
|
||||
new ProgressReport(entry.Key ?? string.Empty, bytesRead, totalBytes)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Extract to specific directory asynchronously with progress reporting and cancellation support
|
||||
/// </summary>
|
||||
/// <param name="destinationDirectory">The folder to extract into.</param>
|
||||
/// <param name="options">Extraction options.</param>
|
||||
/// <param name="progress">Optional progress reporter for tracking extraction progress.</param>
|
||||
/// <param name="cancellationToken">Optional cancellation token.</param>
|
||||
public async Task WriteToDirectoryAsync(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options = null,
|
||||
IProgress<ProgressReport>? progress = null,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
// For solid archives (Rar, 7Zip), use the optimized reader-based approach
|
||||
if (archive.IsSolid || archive.Type == ArchiveType.SevenZip)
|
||||
{
|
||||
using var reader = archive.ExtractAllEntries();
|
||||
await reader.WriteAllToDirectoryAsync(
|
||||
destinationDirectory,
|
||||
options,
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
// For non-solid archives, extract entries directly
|
||||
await archive.WriteToDirectoryAsyncInternal(
|
||||
destinationDirectory,
|
||||
options,
|
||||
progress,
|
||||
cancellationToken
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task WriteToDirectoryAsyncInternal(
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options,
|
||||
IProgress<ProgressReport>? progress,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
// Prepare for progress reporting
|
||||
var totalBytes = archive.TotalUncompressSize;
|
||||
var bytesRead = 0L;
|
||||
|
||||
// Tracking for created directories.
|
||||
var seenDirectories = new HashSet<string>();
|
||||
|
||||
// Extract
|
||||
foreach (var entry in archive.Entries)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
if (entry.IsDirectory)
|
||||
{
|
||||
var dirPath = Path.Combine(
|
||||
destinationDirectory,
|
||||
entry.Key.NotNull("Entry Key is null")
|
||||
);
|
||||
if (
|
||||
Path.GetDirectoryName(dirPath + "/") is { } parentDirectory
|
||||
&& seenDirectories.Add(dirPath)
|
||||
)
|
||||
{
|
||||
Directory.CreateDirectory(parentDirectory);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Use the entry's WriteToDirectoryAsync method which respects ExtractionOptions
|
||||
await entry
|
||||
.WriteToDirectoryAsync(destinationDirectory, options, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
// Update progress
|
||||
bytesRead += entry.Size;
|
||||
progress?.Report(
|
||||
new ProgressReport(entry.Key ?? string.Empty, bytesRead, totalBytes)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
using SharpCompress.Common;
|
||||
|
||||
namespace SharpCompress.Archives;
|
||||
|
||||
internal interface IArchiveExtractionListener : IExtractionListener
|
||||
{
|
||||
void EnsureEntriesLoaded();
|
||||
void FireEntryExtractionBegin(IArchiveEntry entry);
|
||||
void FireEntryExtractionEnd(IArchiveEntry entry);
|
||||
}
|
||||
@@ -84,6 +84,8 @@ public class RarArchive : AbstractArchive<RarArchiveEntry, RarVolume>
|
||||
|
||||
public override bool IsSolid => Volumes.First().IsSolidArchive;
|
||||
|
||||
public override bool IsEncrypted => Entries.First(x => !x.IsDirectory).IsEncrypted;
|
||||
|
||||
public virtual int MinVersion => Volumes.First().MinVersion;
|
||||
public virtual int MaxVersion => Volumes.First().MaxVersion;
|
||||
|
||||
|
||||
@@ -70,24 +70,51 @@ public class RarArchiveEntry : RarEntry, IArchiveEntry
|
||||
|
||||
public Stream OpenEntryStream()
|
||||
{
|
||||
RarStream stream;
|
||||
if (IsRarV3)
|
||||
{
|
||||
return new RarStream(
|
||||
stream = new RarStream(
|
||||
archive.UnpackV1.Value,
|
||||
FileHeader,
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>(), archive)
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>())
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
stream = new RarStream(
|
||||
archive.UnpackV2017.Value,
|
||||
FileHeader,
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>())
|
||||
);
|
||||
}
|
||||
|
||||
return new RarStream(
|
||||
archive.UnpackV2017.Value,
|
||||
FileHeader,
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>(), archive)
|
||||
);
|
||||
stream.Initialize();
|
||||
return stream;
|
||||
}
|
||||
|
||||
public Task<Stream> OpenEntryStreamAsync(CancellationToken cancellationToken = default) =>
|
||||
Task.FromResult(OpenEntryStream());
|
||||
public async Task<Stream> OpenEntryStreamAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
RarStream stream;
|
||||
if (IsRarV3)
|
||||
{
|
||||
stream = new RarStream(
|
||||
archive.UnpackV1.Value,
|
||||
FileHeader,
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>())
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
stream = new RarStream(
|
||||
archive.UnpackV2017.Value,
|
||||
FileHeader,
|
||||
new MultiVolumeReadOnlyStream(Parts.Cast<RarFilePart>())
|
||||
);
|
||||
}
|
||||
|
||||
await stream.InitializeAsync(cancellationToken);
|
||||
return stream;
|
||||
}
|
||||
|
||||
public bool IsComplete
|
||||
{
|
||||
|
||||
@@ -2,6 +2,8 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.SevenZip;
|
||||
using SharpCompress.Compressors.LZMA.Utilites;
|
||||
@@ -205,15 +207,15 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
.GroupBy(x => x.FilePart.Folder)
|
||||
.Any(folder => folder.Count() > 1);
|
||||
|
||||
public override bool IsEncrypted => Entries.First(x => !x.IsDirectory).IsEncrypted;
|
||||
|
||||
public override long TotalSize =>
|
||||
_database?._packSizes.Aggregate(0L, (total, packSize) => total + packSize) ?? 0;
|
||||
|
||||
private sealed class SevenZipReader : AbstractReader<SevenZipEntry, SevenZipVolume>
|
||||
{
|
||||
private readonly SevenZipArchive _archive;
|
||||
private CFolder? _currentFolder;
|
||||
private Stream? _currentStream;
|
||||
private CFileItem? _currentItem;
|
||||
private SevenZipEntry? _currentEntry;
|
||||
|
||||
internal SevenZipReader(ReaderOptions readerOptions, SevenZipArchive archive)
|
||||
: base(readerOptions, ArchiveType.SevenZip) => this._archive = archive;
|
||||
@@ -226,40 +228,135 @@ public class SevenZipArchive : AbstractArchive<SevenZipArchiveEntry, SevenZipVol
|
||||
stream.Position = 0;
|
||||
foreach (var dir in entries.Where(x => x.IsDirectory))
|
||||
{
|
||||
_currentEntry = dir;
|
||||
yield return dir;
|
||||
}
|
||||
foreach (
|
||||
var group in entries.Where(x => !x.IsDirectory).GroupBy(x => x.FilePart.Folder)
|
||||
)
|
||||
// For non-directory entries, yield them without creating shared streams
|
||||
// Each call to GetEntryStream() will create a fresh decompression stream
|
||||
// to avoid state corruption issues with async operations
|
||||
foreach (var entry in entries.Where(x => !x.IsDirectory))
|
||||
{
|
||||
_currentFolder = group.Key;
|
||||
if (group.Key is null)
|
||||
{
|
||||
_currentStream = Stream.Null;
|
||||
}
|
||||
else
|
||||
{
|
||||
_currentStream = _archive._database?.GetFolderStream(
|
||||
stream,
|
||||
_currentFolder,
|
||||
new PasswordProvider(Options.Password)
|
||||
);
|
||||
}
|
||||
foreach (var entry in group)
|
||||
{
|
||||
_currentItem = entry.FilePart.Header;
|
||||
yield return entry;
|
||||
}
|
||||
_currentEntry = entry;
|
||||
yield return entry;
|
||||
}
|
||||
}
|
||||
|
||||
protected override EntryStream GetEntryStream() =>
|
||||
CreateEntryStream(
|
||||
new ReadOnlySubStream(
|
||||
_currentStream.NotNull("currentStream is not null"),
|
||||
_currentItem?.Size ?? 0
|
||||
)
|
||||
);
|
||||
protected override EntryStream GetEntryStream()
|
||||
{
|
||||
// Create a fresh decompression stream for each file (no state sharing).
|
||||
// However, the LZMA decoder has bugs in its async implementation that cause
|
||||
// state corruption even on fresh streams. The SyncOnlyStream wrapper
|
||||
// works around these bugs by forcing async operations to use sync equivalents.
|
||||
//
|
||||
// TODO: Fix the LZMA decoder async bugs (in LzmaStream, Decoder, OutWindow)
|
||||
// so this wrapper is no longer necessary.
|
||||
var entry = _currentEntry.NotNull("currentEntry is not null");
|
||||
if (entry.IsDirectory)
|
||||
{
|
||||
return CreateEntryStream(Stream.Null);
|
||||
}
|
||||
return CreateEntryStream(new SyncOnlyStream(entry.FilePart.GetCompressedStream()));
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// WORKAROUND: Forces async operations to use synchronous equivalents.
|
||||
/// This is necessary because the LZMA decoder has bugs in its async implementation
|
||||
/// that cause state corruption (IndexOutOfRangeException, DataErrorException).
|
||||
///
|
||||
/// The proper fix would be to repair the LZMA decoder's async methods
|
||||
/// (LzmaStream.ReadAsync, Decoder.CodeAsync, OutWindow async operations),
|
||||
/// but that requires deep changes to the decoder state machine.
|
||||
/// </summary>
|
||||
private sealed class SyncOnlyStream : Stream
|
||||
{
|
||||
private readonly Stream _baseStream;
|
||||
|
||||
public SyncOnlyStream(Stream baseStream) => _baseStream = baseStream;
|
||||
|
||||
public override bool CanRead => _baseStream.CanRead;
|
||||
public override bool CanSeek => _baseStream.CanSeek;
|
||||
public override bool CanWrite => _baseStream.CanWrite;
|
||||
public override long Length => _baseStream.Length;
|
||||
public override long Position
|
||||
{
|
||||
get => _baseStream.Position;
|
||||
set => _baseStream.Position = value;
|
||||
}
|
||||
|
||||
public override void Flush() => _baseStream.Flush();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) =>
|
||||
_baseStream.Read(buffer, offset, count);
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
_baseStream.Seek(offset, origin);
|
||||
|
||||
public override void SetLength(long value) => _baseStream.SetLength(value);
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
_baseStream.Write(buffer, offset, count);
|
||||
|
||||
// Force async operations to use sync equivalents to avoid LZMA decoder bugs
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return Task.FromResult(_baseStream.Read(buffer, offset, count));
|
||||
}
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
_baseStream.Write(buffer, offset, count);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public override Task FlushAsync(CancellationToken cancellationToken)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
_baseStream.Flush();
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
return new ValueTask<int>(_baseStream.Read(buffer.Span));
|
||||
}
|
||||
|
||||
public override ValueTask WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
_baseStream.Write(buffer.Span);
|
||||
return ValueTask.CompletedTask;
|
||||
}
|
||||
#endif
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (disposing)
|
||||
{
|
||||
_baseStream.Dispose();
|
||||
}
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
}
|
||||
|
||||
private class PasswordProvider : IPasswordProvider
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
[assembly: CLSCompliant(true)]
|
||||
// CLSCompliant(false) is required because ZStandard integration uses unsafe code
|
||||
[assembly: CLSCompliant(false)]
|
||||
[assembly: InternalsVisibleTo(
|
||||
"SharpCompress.Test,PublicKey=0024000004800000940000000602000000240000525341310004000001000100158bebf1433f76dffc356733c138babea7a47536c65ed8009b16372c6f4edbb20554db74a62687f56b97c20a6ce8c4b123280279e33c894e7b3aa93ab3c573656fde4db576cfe07dba09619ead26375b25d2c4a8e43f7be257d712b0dd2eb546f67adb09281338618a58ac834fc038dd7e2740a7ab3591826252e4f4516306dc"
|
||||
)]
|
||||
|
||||
@@ -57,7 +57,7 @@ namespace SharpCompress.Common.Arc
|
||||
return value switch
|
||||
{
|
||||
1 or 2 => CompressionType.None,
|
||||
3 => CompressionType.RLE90,
|
||||
3 => CompressionType.Packed,
|
||||
4 => CompressionType.Squeezed,
|
||||
5 or 6 or 7 or 8 => CompressionType.Crunched,
|
||||
9 => CompressionType.Squashed,
|
||||
|
||||
@@ -44,7 +44,7 @@ namespace SharpCompress.Common.Arc
|
||||
Header.CompressedSize
|
||||
);
|
||||
break;
|
||||
case CompressionType.RLE90:
|
||||
case CompressionType.Packed:
|
||||
compressedStream = new RunLength90Stream(
|
||||
_stream,
|
||||
(int)Header.CompressedSize
|
||||
@@ -54,6 +54,14 @@ namespace SharpCompress.Common.Arc
|
||||
compressedStream = new SqueezeStream(_stream, (int)Header.CompressedSize);
|
||||
break;
|
||||
case CompressionType.Crunched:
|
||||
if (Header.OriginalSize > 128 * 1024)
|
||||
{
|
||||
throw new NotSupportedException(
|
||||
"CompressionMethod: "
|
||||
+ Header.CompressionMethod
|
||||
+ " with size > 128KB"
|
||||
);
|
||||
}
|
||||
compressedStream = new ArcLzwStream(
|
||||
_stream,
|
||||
(int)Header.CompressedSize,
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
public class ArchiveExtractionEventArgs<T> : EventArgs
|
||||
{
|
||||
internal ArchiveExtractionEventArgs(T entry) => Item = entry;
|
||||
|
||||
public T Item { get; }
|
||||
}
|
||||
@@ -8,4 +8,5 @@ public enum ArchiveType
|
||||
SevenZip,
|
||||
GZip,
|
||||
Arc,
|
||||
Arj,
|
||||
}
|
||||
|
||||
58
src/SharpCompress/Common/Arj/ArjEntry.cs
Normal file
58
src/SharpCompress/Common/Arj/ArjEntry.cs
Normal file
@@ -0,0 +1,58 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Arc;
|
||||
using SharpCompress.Common.Arj.Headers;
|
||||
|
||||
namespace SharpCompress.Common.Arj
|
||||
{
|
||||
public class ArjEntry : Entry
|
||||
{
|
||||
private readonly ArjFilePart _filePart;
|
||||
|
||||
internal ArjEntry(ArjFilePart filePart)
|
||||
{
|
||||
_filePart = filePart;
|
||||
}
|
||||
|
||||
public override long Crc => _filePart.Header.OriginalCrc32;
|
||||
|
||||
public override string? Key => _filePart?.Header.Name;
|
||||
|
||||
public override string? LinkTarget => null;
|
||||
|
||||
public override long CompressedSize => _filePart?.Header.CompressedSize ?? 0;
|
||||
|
||||
public override CompressionType CompressionType
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_filePart.Header.CompressionMethod == CompressionMethod.Stored)
|
||||
{
|
||||
return CompressionType.None;
|
||||
}
|
||||
return CompressionType.ArjLZ77;
|
||||
}
|
||||
}
|
||||
|
||||
public override long Size => _filePart?.Header.OriginalSize ?? 0;
|
||||
|
||||
public override DateTime? LastModifiedTime => _filePart.Header.DateTimeModified.DateTime;
|
||||
|
||||
public override DateTime? CreatedTime => _filePart.Header.DateTimeCreated.DateTime;
|
||||
|
||||
public override DateTime? LastAccessedTime => _filePart.Header.DateTimeAccessed.DateTime;
|
||||
|
||||
public override DateTime? ArchivedTime => null;
|
||||
|
||||
public override bool IsEncrypted => false;
|
||||
|
||||
public override bool IsDirectory => _filePart.Header.FileType == FileType.Directory;
|
||||
|
||||
public override bool IsSplitAfter => false;
|
||||
|
||||
internal override IEnumerable<FilePart> Parts => _filePart.Empty();
|
||||
}
|
||||
}
|
||||
72
src/SharpCompress/Common/Arj/ArjFilePart.cs
Normal file
72
src/SharpCompress/Common/Arj/ArjFilePart.cs
Normal file
@@ -0,0 +1,72 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Arj.Headers;
|
||||
using SharpCompress.Compressors.Arj;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Common.Arj
|
||||
{
|
||||
public class ArjFilePart : FilePart
|
||||
{
|
||||
private readonly Stream _stream;
|
||||
internal ArjLocalHeader Header { get; set; }
|
||||
|
||||
internal ArjFilePart(ArjLocalHeader localArjHeader, Stream seekableStream)
|
||||
: base(localArjHeader.ArchiveEncoding)
|
||||
{
|
||||
_stream = seekableStream;
|
||||
Header = localArjHeader;
|
||||
}
|
||||
|
||||
internal override string? FilePartName => Header.Name;
|
||||
|
||||
internal override Stream GetCompressedStream()
|
||||
{
|
||||
if (_stream != null)
|
||||
{
|
||||
Stream compressedStream;
|
||||
switch (Header.CompressionMethod)
|
||||
{
|
||||
case CompressionMethod.Stored:
|
||||
compressedStream = new ReadOnlySubStream(
|
||||
_stream,
|
||||
Header.DataStartPosition,
|
||||
Header.CompressedSize
|
||||
);
|
||||
break;
|
||||
case CompressionMethod.CompressedMost:
|
||||
case CompressionMethod.Compressed:
|
||||
case CompressionMethod.CompressedFaster:
|
||||
if (Header.OriginalSize > 128 * 1024)
|
||||
{
|
||||
throw new NotSupportedException(
|
||||
"CompressionMethod: "
|
||||
+ Header.CompressionMethod
|
||||
+ " with size > 128KB"
|
||||
);
|
||||
}
|
||||
compressedStream = new LhaStream<Lh7DecoderCfg>(
|
||||
_stream,
|
||||
(int)Header.OriginalSize
|
||||
);
|
||||
break;
|
||||
case CompressionMethod.CompressedFastest:
|
||||
compressedStream = new LHDecoderStream(_stream, (int)Header.OriginalSize);
|
||||
break;
|
||||
default:
|
||||
throw new NotSupportedException(
|
||||
"CompressionMethod: " + Header.CompressionMethod
|
||||
);
|
||||
}
|
||||
return compressedStream;
|
||||
}
|
||||
return _stream.NotNull();
|
||||
}
|
||||
|
||||
internal override Stream GetRawStream() => _stream;
|
||||
}
|
||||
}
|
||||
36
src/SharpCompress/Common/Arj/ArjVolume.cs
Normal file
36
src/SharpCompress/Common/Arj/ArjVolume.cs
Normal file
@@ -0,0 +1,36 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Rar;
|
||||
using SharpCompress.Common.Rar.Headers;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Common.Arj
|
||||
{
|
||||
public class ArjVolume : Volume
|
||||
{
|
||||
public ArjVolume(Stream stream, ReaderOptions readerOptions, int index = 0)
|
||||
: base(stream, readerOptions, index) { }
|
||||
|
||||
public override bool IsFirstVolume
|
||||
{
|
||||
get { return true; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// ArjArchive is part of a multi-part archive.
|
||||
/// </summary>
|
||||
public override bool IsMultiVolume
|
||||
{
|
||||
get { return false; }
|
||||
}
|
||||
|
||||
internal IEnumerable<ArjFilePart> GetVolumeFileParts()
|
||||
{
|
||||
return new List<ArjFilePart>();
|
||||
}
|
||||
}
|
||||
}
|
||||
142
src/SharpCompress/Common/Arj/Headers/ArjHeader.cs
Normal file
142
src/SharpCompress/Common/Arj/Headers/ArjHeader.cs
Normal file
@@ -0,0 +1,142 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Zip.Headers;
|
||||
using SharpCompress.Crypto;
|
||||
|
||||
namespace SharpCompress.Common.Arj.Headers
|
||||
{
|
||||
public enum ArjHeaderType
|
||||
{
|
||||
MainHeader,
|
||||
LocalHeader,
|
||||
}
|
||||
|
||||
public abstract class ArjHeader
|
||||
{
|
||||
private const int FIRST_HDR_SIZE = 34;
|
||||
private const ushort ARJ_MAGIC = 0xEA60;
|
||||
|
||||
public ArjHeader(ArjHeaderType type)
|
||||
{
|
||||
ArjHeaderType = type;
|
||||
}
|
||||
|
||||
public ArjHeaderType ArjHeaderType { get; }
|
||||
public byte Flags { get; set; }
|
||||
public FileType FileType { get; set; }
|
||||
|
||||
public abstract ArjHeader? Read(Stream reader);
|
||||
|
||||
public byte[] ReadHeader(Stream stream)
|
||||
{
|
||||
// check for magic bytes
|
||||
Span<byte> magic = stackalloc byte[2];
|
||||
if (stream.Read(magic) != 2)
|
||||
{
|
||||
return Array.Empty<byte>();
|
||||
}
|
||||
|
||||
var magicValue = (ushort)(magic[0] | magic[1] << 8);
|
||||
if (magicValue != ARJ_MAGIC)
|
||||
{
|
||||
throw new InvalidDataException("Not an ARJ file (wrong magic bytes)");
|
||||
}
|
||||
|
||||
// read header_size
|
||||
byte[] headerBytes = new byte[2];
|
||||
stream.Read(headerBytes, 0, 2);
|
||||
var headerSize = (ushort)(headerBytes[0] | headerBytes[1] << 8);
|
||||
if (headerSize < 1)
|
||||
{
|
||||
return Array.Empty<byte>();
|
||||
}
|
||||
|
||||
var body = new byte[headerSize];
|
||||
var read = stream.Read(body, 0, headerSize);
|
||||
if (read < headerSize)
|
||||
{
|
||||
return Array.Empty<byte>();
|
||||
}
|
||||
|
||||
byte[] crc = new byte[4];
|
||||
read = stream.Read(crc, 0, 4);
|
||||
var checksum = Crc32Stream.Compute(body);
|
||||
// Compute the hash value
|
||||
if (checksum != BitConverter.ToUInt32(crc, 0))
|
||||
{
|
||||
throw new InvalidDataException("Header checksum is invalid");
|
||||
}
|
||||
return body;
|
||||
}
|
||||
|
||||
protected List<byte[]> ReadExtendedHeaders(Stream reader)
|
||||
{
|
||||
List<byte[]> extendedHeader = new List<byte[]>();
|
||||
byte[] buffer = new byte[2];
|
||||
|
||||
while (true)
|
||||
{
|
||||
int bytesRead = reader.Read(buffer, 0, 2);
|
||||
if (bytesRead < 2)
|
||||
{
|
||||
throw new EndOfStreamException(
|
||||
"Unexpected end of stream while reading extended header size."
|
||||
);
|
||||
}
|
||||
|
||||
var extHeaderSize = (ushort)(buffer[0] | (buffer[1] << 8));
|
||||
if (extHeaderSize == 0)
|
||||
{
|
||||
return extendedHeader;
|
||||
}
|
||||
|
||||
byte[] header = new byte[extHeaderSize];
|
||||
bytesRead = reader.Read(header, 0, extHeaderSize);
|
||||
if (bytesRead < extHeaderSize)
|
||||
{
|
||||
throw new EndOfStreamException(
|
||||
"Unexpected end of stream while reading extended header data."
|
||||
);
|
||||
}
|
||||
|
||||
byte[] crc = new byte[4];
|
||||
bytesRead = reader.Read(crc, 0, 4);
|
||||
if (bytesRead < 4)
|
||||
{
|
||||
throw new EndOfStreamException(
|
||||
"Unexpected end of stream while reading extended header CRC."
|
||||
);
|
||||
}
|
||||
|
||||
var checksum = Crc32Stream.Compute(header);
|
||||
if (checksum != BitConverter.ToUInt32(crc, 0))
|
||||
{
|
||||
throw new InvalidDataException("Extended header checksum is invalid");
|
||||
}
|
||||
|
||||
extendedHeader.Add(header);
|
||||
}
|
||||
}
|
||||
|
||||
// Flag helpers
|
||||
public bool IsGabled => (Flags & 0x01) != 0;
|
||||
public bool IsAnsiPage => (Flags & 0x02) != 0;
|
||||
public bool IsVolume => (Flags & 0x04) != 0;
|
||||
public bool IsArjProtected => (Flags & 0x08) != 0;
|
||||
public bool IsPathSym => (Flags & 0x10) != 0;
|
||||
public bool IsBackup => (Flags & 0x20) != 0;
|
||||
public bool IsSecured => (Flags & 0x40) != 0;
|
||||
public bool IsAltName => (Flags & 0x80) != 0;
|
||||
|
||||
public static FileType FileTypeFromByte(byte value)
|
||||
{
|
||||
return Enum.IsDefined(typeof(FileType), value)
|
||||
? (FileType)value
|
||||
: Headers.FileType.Unknown;
|
||||
}
|
||||
}
|
||||
}
|
||||
161
src/SharpCompress/Common/Arj/Headers/ArjLocalHeader.cs
Normal file
161
src/SharpCompress/Common/Arj/Headers/ArjLocalHeader.cs
Normal file
@@ -0,0 +1,161 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Arj.Headers
|
||||
{
|
||||
public class ArjLocalHeader : ArjHeader
|
||||
{
|
||||
public ArchiveEncoding ArchiveEncoding { get; }
|
||||
public long DataStartPosition { get; protected set; }
|
||||
|
||||
public byte ArchiverVersionNumber { get; set; }
|
||||
public byte MinVersionToExtract { get; set; }
|
||||
public HostOS HostOS { get; set; }
|
||||
public CompressionMethod CompressionMethod { get; set; }
|
||||
public DosDateTime DateTimeModified { get; set; } = new DosDateTime(0);
|
||||
public long CompressedSize { get; set; }
|
||||
public long OriginalSize { get; set; }
|
||||
public long OriginalCrc32 { get; set; }
|
||||
public int FileSpecPosition { get; set; }
|
||||
public int FileAccessMode { get; set; }
|
||||
public byte FirstChapter { get; set; }
|
||||
public byte LastChapter { get; set; }
|
||||
public long ExtendedFilePosition { get; set; }
|
||||
public DosDateTime DateTimeAccessed { get; set; } = new DosDateTime(0);
|
||||
public DosDateTime DateTimeCreated { get; set; } = new DosDateTime(0);
|
||||
public long OriginalSizeEvenForVolumes { get; set; }
|
||||
public string Name { get; set; } = string.Empty;
|
||||
public string Comment { get; set; } = string.Empty;
|
||||
|
||||
private const byte StdHdrSize = 30;
|
||||
private const byte R9HdrSize = 46;
|
||||
|
||||
public ArjLocalHeader(ArchiveEncoding archiveEncoding)
|
||||
: base(ArjHeaderType.LocalHeader)
|
||||
{
|
||||
ArchiveEncoding =
|
||||
archiveEncoding ?? throw new ArgumentNullException(nameof(archiveEncoding));
|
||||
}
|
||||
|
||||
public override ArjHeader? Read(Stream stream)
|
||||
{
|
||||
var body = ReadHeader(stream);
|
||||
if (body.Length > 0)
|
||||
{
|
||||
ReadExtendedHeaders(stream);
|
||||
var header = LoadFrom(body);
|
||||
header.DataStartPosition = stream.Position;
|
||||
return header;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public ArjLocalHeader LoadFrom(byte[] headerBytes)
|
||||
{
|
||||
int offset = 0;
|
||||
|
||||
int ReadInt16()
|
||||
{
|
||||
if (offset + 1 >= headerBytes.Length)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
var v = headerBytes[offset] & 0xFF | (headerBytes[offset + 1] & 0xFF) << 8;
|
||||
offset += 2;
|
||||
return v;
|
||||
}
|
||||
long ReadInt32()
|
||||
{
|
||||
if (offset + 3 >= headerBytes.Length)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
long v =
|
||||
headerBytes[offset] & 0xFF
|
||||
| (headerBytes[offset + 1] & 0xFF) << 8
|
||||
| (headerBytes[offset + 2] & 0xFF) << 16
|
||||
| (headerBytes[offset + 3] & 0xFF) << 24;
|
||||
offset += 4;
|
||||
return v;
|
||||
}
|
||||
|
||||
byte headerSize = headerBytes[offset++];
|
||||
ArchiverVersionNumber = headerBytes[offset++];
|
||||
MinVersionToExtract = headerBytes[offset++];
|
||||
HostOS hostOS = (HostOS)headerBytes[offset++];
|
||||
Flags = headerBytes[offset++];
|
||||
CompressionMethod = CompressionMethodFromByte(headerBytes[offset++]);
|
||||
FileType = FileTypeFromByte(headerBytes[offset++]);
|
||||
|
||||
offset++; // Skip 1 byte
|
||||
|
||||
var rawTimestamp = ReadInt32();
|
||||
DateTimeModified =
|
||||
rawTimestamp != 0 ? new DosDateTime(rawTimestamp) : new DosDateTime(0);
|
||||
|
||||
CompressedSize = ReadInt32();
|
||||
OriginalSize = ReadInt32();
|
||||
OriginalCrc32 = ReadInt32();
|
||||
FileSpecPosition = ReadInt16();
|
||||
FileAccessMode = ReadInt16();
|
||||
|
||||
FirstChapter = headerBytes[offset++];
|
||||
LastChapter = headerBytes[offset++];
|
||||
|
||||
ExtendedFilePosition = 0;
|
||||
OriginalSizeEvenForVolumes = 0;
|
||||
|
||||
if (headerSize > StdHdrSize)
|
||||
{
|
||||
ExtendedFilePosition = ReadInt32();
|
||||
|
||||
if (headerSize >= R9HdrSize)
|
||||
{
|
||||
rawTimestamp = ReadInt32();
|
||||
DateTimeAccessed =
|
||||
rawTimestamp != 0 ? new DosDateTime(rawTimestamp) : new DosDateTime(0);
|
||||
rawTimestamp = ReadInt32();
|
||||
DateTimeCreated =
|
||||
rawTimestamp != 0 ? new DosDateTime(rawTimestamp) : new DosDateTime(0);
|
||||
OriginalSizeEvenForVolumes = ReadInt32();
|
||||
}
|
||||
}
|
||||
|
||||
Name = Encoding.ASCII.GetString(
|
||||
headerBytes,
|
||||
offset,
|
||||
Array.IndexOf(headerBytes, (byte)0, offset) - offset
|
||||
);
|
||||
offset += Name.Length + 1;
|
||||
|
||||
Comment = Encoding.ASCII.GetString(
|
||||
headerBytes,
|
||||
offset,
|
||||
Array.IndexOf(headerBytes, (byte)0, offset) - offset
|
||||
);
|
||||
offset += Comment.Length + 1;
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
public static CompressionMethod CompressionMethodFromByte(byte value)
|
||||
{
|
||||
return value switch
|
||||
{
|
||||
0 => CompressionMethod.Stored,
|
||||
1 => CompressionMethod.CompressedMost,
|
||||
2 => CompressionMethod.Compressed,
|
||||
3 => CompressionMethod.CompressedFaster,
|
||||
4 => CompressionMethod.CompressedFastest,
|
||||
8 => CompressionMethod.NoDataNoCrc,
|
||||
9 => CompressionMethod.NoData,
|
||||
_ => CompressionMethod.Unknown,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
138
src/SharpCompress/Common/Arj/Headers/ArjMainHeader.cs
Normal file
138
src/SharpCompress/Common/Arj/Headers/ArjMainHeader.cs
Normal file
@@ -0,0 +1,138 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Crypto;
|
||||
|
||||
namespace SharpCompress.Common.Arj.Headers
|
||||
{
|
||||
public class ArjMainHeader : ArjHeader
|
||||
{
|
||||
private const int FIRST_HDR_SIZE = 34;
|
||||
private const ushort ARJ_MAGIC = 0xEA60;
|
||||
|
||||
public ArchiveEncoding ArchiveEncoding { get; }
|
||||
|
||||
public int ArchiverVersionNumber { get; private set; }
|
||||
public int MinVersionToExtract { get; private set; }
|
||||
public HostOS HostOs { get; private set; }
|
||||
public int SecurityVersion { get; private set; }
|
||||
public DosDateTime CreationDateTime { get; private set; } = new DosDateTime(0);
|
||||
public long CompressedSize { get; private set; }
|
||||
public long ArchiveSize { get; private set; }
|
||||
public long SecurityEnvelope { get; private set; }
|
||||
public int FileSpecPosition { get; private set; }
|
||||
public int SecurityEnvelopeLength { get; private set; }
|
||||
public int EncryptionVersion { get; private set; }
|
||||
public int LastChapter { get; private set; }
|
||||
|
||||
public int ArjProtectionFactor { get; private set; }
|
||||
public int Flags2 { get; private set; }
|
||||
public string Name { get; private set; } = string.Empty;
|
||||
public string Comment { get; private set; } = string.Empty;
|
||||
|
||||
public ArjMainHeader(ArchiveEncoding archiveEncoding)
|
||||
: base(ArjHeaderType.MainHeader)
|
||||
{
|
||||
ArchiveEncoding =
|
||||
archiveEncoding ?? throw new ArgumentNullException(nameof(archiveEncoding));
|
||||
}
|
||||
|
||||
public override ArjHeader? Read(Stream stream)
|
||||
{
|
||||
var body = ReadHeader(stream);
|
||||
ReadExtendedHeaders(stream);
|
||||
return LoadFrom(body);
|
||||
}
|
||||
|
||||
public ArjMainHeader LoadFrom(byte[] headerBytes)
|
||||
{
|
||||
var offset = 1;
|
||||
|
||||
byte ReadByte()
|
||||
{
|
||||
if (offset >= headerBytes.Length)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
return (byte)(headerBytes[offset++] & 0xFF);
|
||||
}
|
||||
|
||||
int ReadInt16()
|
||||
{
|
||||
if (offset + 1 >= headerBytes.Length)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
var v = headerBytes[offset] & 0xFF | (headerBytes[offset + 1] & 0xFF) << 8;
|
||||
offset += 2;
|
||||
return v;
|
||||
}
|
||||
|
||||
long ReadInt32()
|
||||
{
|
||||
if (offset + 3 >= headerBytes.Length)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
long v =
|
||||
headerBytes[offset] & 0xFF
|
||||
| (headerBytes[offset + 1] & 0xFF) << 8
|
||||
| (headerBytes[offset + 2] & 0xFF) << 16
|
||||
| (headerBytes[offset + 3] & 0xFF) << 24;
|
||||
offset += 4;
|
||||
return v;
|
||||
}
|
||||
string ReadNullTerminatedString(byte[] x, int startIndex)
|
||||
{
|
||||
var result = new StringBuilder();
|
||||
int i = startIndex;
|
||||
|
||||
while (i < x.Length && x[i] != 0)
|
||||
{
|
||||
result.Append((char)x[i]);
|
||||
i++;
|
||||
}
|
||||
|
||||
// Skip the null terminator
|
||||
i++;
|
||||
if (i < x.Length)
|
||||
{
|
||||
byte[] remainder = new byte[x.Length - i];
|
||||
Array.Copy(x, i, remainder, 0, remainder.Length);
|
||||
x = remainder;
|
||||
}
|
||||
|
||||
return result.ToString();
|
||||
}
|
||||
|
||||
ArchiverVersionNumber = ReadByte();
|
||||
MinVersionToExtract = ReadByte();
|
||||
|
||||
var hostOsByte = ReadByte();
|
||||
HostOs = hostOsByte <= 11 ? (HostOS)hostOsByte : HostOS.Unknown;
|
||||
|
||||
Flags = ReadByte();
|
||||
SecurityVersion = ReadByte();
|
||||
FileType = FileTypeFromByte(ReadByte());
|
||||
|
||||
offset++; // skip reserved
|
||||
|
||||
CreationDateTime = new DosDateTime((int)ReadInt32());
|
||||
CompressedSize = ReadInt32();
|
||||
ArchiveSize = ReadInt32();
|
||||
|
||||
SecurityEnvelope = ReadInt32();
|
||||
FileSpecPosition = ReadInt16();
|
||||
SecurityEnvelopeLength = ReadInt16();
|
||||
|
||||
EncryptionVersion = ReadByte();
|
||||
LastChapter = ReadByte();
|
||||
|
||||
Name = ReadNullTerminatedString(headerBytes, offset);
|
||||
Comment = ReadNullTerminatedString(headerBytes, offset + 1 + Name.Length);
|
||||
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
20
src/SharpCompress/Common/Arj/Headers/CompressionMethod.cs
Normal file
20
src/SharpCompress/Common/Arj/Headers/CompressionMethod.cs
Normal file
@@ -0,0 +1,20 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Common.Arj.Headers
|
||||
{
|
||||
public enum CompressionMethod
|
||||
{
|
||||
Stored = 0,
|
||||
CompressedMost = 1,
|
||||
Compressed = 2,
|
||||
CompressedFaster = 3,
|
||||
CompressedFastest = 4,
|
||||
NoDataNoCrc = 8,
|
||||
NoData = 9,
|
||||
Unknown,
|
||||
}
|
||||
}
|
||||
37
src/SharpCompress/Common/Arj/Headers/DosDateTime.cs
Normal file
37
src/SharpCompress/Common/Arj/Headers/DosDateTime.cs
Normal file
@@ -0,0 +1,37 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common.Arj.Headers
|
||||
{
|
||||
public class DosDateTime
|
||||
{
|
||||
public DateTime DateTime { get; }
|
||||
|
||||
public DosDateTime(long dosValue)
|
||||
{
|
||||
// Ensure only the lower 32 bits are used
|
||||
int value = unchecked((int)(dosValue & 0xFFFFFFFF));
|
||||
|
||||
var date = (value >> 16) & 0xFFFF;
|
||||
var time = value & 0xFFFF;
|
||||
|
||||
var day = date & 0x1F;
|
||||
var month = (date >> 5) & 0x0F;
|
||||
var year = ((date >> 9) & 0x7F) + 1980;
|
||||
|
||||
var second = (time & 0x1F) * 2;
|
||||
var minute = (time >> 5) & 0x3F;
|
||||
var hour = (time >> 11) & 0x1F;
|
||||
|
||||
try
|
||||
{
|
||||
DateTime = new DateTime(year, month, day, hour, minute, second);
|
||||
}
|
||||
catch
|
||||
{
|
||||
DateTime = DateTime.MinValue;
|
||||
}
|
||||
}
|
||||
|
||||
public override string ToString() => DateTime.ToString("yyyy-MM-dd HH:mm:ss");
|
||||
}
|
||||
}
|
||||
13
src/SharpCompress/Common/Arj/Headers/FileType.cs
Normal file
13
src/SharpCompress/Common/Arj/Headers/FileType.cs
Normal file
@@ -0,0 +1,13 @@
|
||||
namespace SharpCompress.Common.Arj.Headers
|
||||
{
|
||||
public enum FileType : byte
|
||||
{
|
||||
Binary = 0,
|
||||
Text7Bit = 1,
|
||||
CommentHeader = 2,
|
||||
Directory = 3,
|
||||
VolumeLabel = 4,
|
||||
ChapterLabel = 5,
|
||||
Unknown = 255,
|
||||
}
|
||||
}
|
||||
19
src/SharpCompress/Common/Arj/Headers/HostOS.cs
Normal file
19
src/SharpCompress/Common/Arj/Headers/HostOS.cs
Normal file
@@ -0,0 +1,19 @@
|
||||
namespace SharpCompress.Common.Arj.Headers
|
||||
{
|
||||
public enum HostOS
|
||||
{
|
||||
MsDos = 0,
|
||||
PrimOS = 1,
|
||||
Unix = 2,
|
||||
Amiga = 3,
|
||||
MacOs = 4,
|
||||
OS2 = 5,
|
||||
AppleGS = 6,
|
||||
AtariST = 7,
|
||||
NeXT = 8,
|
||||
VaxVMS = 9,
|
||||
Win95 = 10,
|
||||
Win32 = 11,
|
||||
Unknown = 255,
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
public sealed class CompressedBytesReadEventArgs : EventArgs
|
||||
{
|
||||
public CompressedBytesReadEventArgs(
|
||||
long compressedBytesRead,
|
||||
long currentFilePartCompressedBytesRead
|
||||
)
|
||||
{
|
||||
CompressedBytesRead = compressedBytesRead;
|
||||
CurrentFilePartCompressedBytesRead = currentFilePartCompressedBytesRead;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Compressed bytes read for the current entry
|
||||
/// </summary>
|
||||
public long CompressedBytesRead { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Current file part read for Multipart files (e.g. Rar)
|
||||
/// </summary>
|
||||
public long CurrentFilePartCompressedBytesRead { get; }
|
||||
}
|
||||
@@ -23,10 +23,11 @@ public enum CompressionType
|
||||
Reduce4,
|
||||
Explode,
|
||||
Squeezed,
|
||||
RLE90,
|
||||
Packed,
|
||||
Crunched,
|
||||
Squashed,
|
||||
Crushed,
|
||||
Distilled,
|
||||
ZStandard,
|
||||
ArjLZ77,
|
||||
}
|
||||
|
||||
@@ -64,6 +64,11 @@ public class EntryStream : Stream, IStreamStack
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (_isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_isDisposed = true;
|
||||
if (!(_completed || _reader.Cancelled))
|
||||
{
|
||||
SkipEntry();
|
||||
@@ -81,12 +86,6 @@ public class EntryStream : Stream, IStreamStack
|
||||
lzmaStream.Flush(); //Lzma over reads. Knock it back
|
||||
}
|
||||
}
|
||||
|
||||
if (_isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_isDisposed = true;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(EntryStream));
|
||||
#endif
|
||||
@@ -97,6 +96,11 @@ public class EntryStream : Stream, IStreamStack
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
public override async ValueTask DisposeAsync()
|
||||
{
|
||||
if (_isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_isDisposed = true;
|
||||
if (!(_completed || _reader.Cancelled))
|
||||
{
|
||||
await SkipEntryAsync().ConfigureAwait(false);
|
||||
@@ -114,12 +118,6 @@ public class EntryStream : Stream, IStreamStack
|
||||
await lzmaStream.FlushAsync().ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
if (_isDisposed)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_isDisposed = true;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(EntryStream));
|
||||
#endif
|
||||
@@ -204,4 +202,11 @@ public class EntryStream : Stream, IStreamStack
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
) => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ internal static class ExtractionMethods
|
||||
IEntry entry,
|
||||
string destinationDirectory,
|
||||
ExtractionOptions? options,
|
||||
Func<string, ExtractionOptions?, Task> writeAsync,
|
||||
Func<string, ExtractionOptions?, CancellationToken, Task> writeAsync,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
@@ -189,7 +189,7 @@ internal static class ExtractionMethods
|
||||
"Entry is trying to write a file outside of the destination directory."
|
||||
);
|
||||
}
|
||||
await writeAsync(destinationFileName, options).ConfigureAwait(false);
|
||||
await writeAsync(destinationFileName, options, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
else if (options.ExtractFullPath && !Directory.Exists(destinationFileName))
|
||||
{
|
||||
@@ -201,7 +201,7 @@ internal static class ExtractionMethods
|
||||
IEntry entry,
|
||||
string destinationFileName,
|
||||
ExtractionOptions? options,
|
||||
Func<string, FileMode, Task> openAndWriteAsync,
|
||||
Func<string, FileMode, CancellationToken, Task> openAndWriteAsync,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
@@ -225,7 +225,8 @@ internal static class ExtractionMethods
|
||||
fm = FileMode.CreateNew;
|
||||
}
|
||||
|
||||
await openAndWriteAsync(destinationFileName, fm).ConfigureAwait(false);
|
||||
await openAndWriteAsync(destinationFileName, fm, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
entry.PreserveExtractionOptions(destinationFileName, options);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
using System;
|
||||
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
public sealed class FilePartExtractionBeginEventArgs : EventArgs
|
||||
{
|
||||
public FilePartExtractionBeginEventArgs(string name, long size, long compressedSize)
|
||||
{
|
||||
Name = name;
|
||||
Size = size;
|
||||
CompressedSize = compressedSize;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// File name for the part for the current entry
|
||||
/// </summary>
|
||||
public string Name { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Uncompressed size of the current entry in the part
|
||||
/// </summary>
|
||||
public long Size { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Compressed size of the current entry in the part
|
||||
/// </summary>
|
||||
public long CompressedSize { get; }
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
public interface IExtractionListener
|
||||
{
|
||||
void FireFilePartExtractionBegin(string name, long size, long compressedSize);
|
||||
void FireCompressedBytesRead(long currentPartCompressedBytes, long compressedReadBytes);
|
||||
}
|
||||
43
src/SharpCompress/Common/ProgressReport.cs
Normal file
43
src/SharpCompress/Common/ProgressReport.cs
Normal file
@@ -0,0 +1,43 @@
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
/// <summary>
|
||||
/// Represents progress information for compression or extraction operations.
|
||||
/// </summary>
|
||||
public sealed class ProgressReport
|
||||
{
|
||||
/// <summary>
|
||||
/// Initializes a new instance of the <see cref="ProgressReport"/> class.
|
||||
/// </summary>
|
||||
/// <param name="entryPath">The path of the entry being processed.</param>
|
||||
/// <param name="bytesTransferred">Number of bytes transferred so far.</param>
|
||||
/// <param name="totalBytes">Total bytes to be transferred, or null if unknown.</param>
|
||||
public ProgressReport(string entryPath, long bytesTransferred, long? totalBytes)
|
||||
{
|
||||
EntryPath = entryPath;
|
||||
BytesTransferred = bytesTransferred;
|
||||
TotalBytes = totalBytes;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Gets the path of the entry being processed.
|
||||
/// </summary>
|
||||
public string EntryPath { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Gets the number of bytes transferred so far.
|
||||
/// </summary>
|
||||
public long BytesTransferred { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Gets the total number of bytes to be transferred, or null if unknown.
|
||||
/// </summary>
|
||||
public long? TotalBytes { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Gets the progress percentage (0-100), or null if total bytes is unknown.
|
||||
/// </summary>
|
||||
public double? PercentComplete =>
|
||||
TotalBytes.HasValue && TotalBytes.Value > 0
|
||||
? (double)BytesTransferred / TotalBytes.Value * 100
|
||||
: null;
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
using System;
|
||||
using SharpCompress.Readers;
|
||||
|
||||
namespace SharpCompress.Common;
|
||||
|
||||
public sealed class ReaderExtractionEventArgs<T> : EventArgs
|
||||
{
|
||||
internal ReaderExtractionEventArgs(T entry, ReaderProgress? readerProgress = null)
|
||||
{
|
||||
Item = entry;
|
||||
ReaderProgress = readerProgress;
|
||||
}
|
||||
|
||||
public T Item { get; }
|
||||
|
||||
public ReaderProgress? ReaderProgress { get; }
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
@@ -9,8 +10,16 @@ internal sealed class TarHeader
|
||||
{
|
||||
internal static readonly DateTime EPOCH = new(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
||||
|
||||
public TarHeader(ArchiveEncoding archiveEncoding) => ArchiveEncoding = archiveEncoding;
|
||||
public TarHeader(
|
||||
ArchiveEncoding archiveEncoding,
|
||||
TarHeaderWriteFormat writeFormat = TarHeaderWriteFormat.GNU_TAR_LONG_LINK
|
||||
)
|
||||
{
|
||||
ArchiveEncoding = archiveEncoding;
|
||||
WriteFormat = writeFormat;
|
||||
}
|
||||
|
||||
internal TarHeaderWriteFormat WriteFormat { get; set; }
|
||||
internal string? Name { get; set; }
|
||||
internal string? LinkName { get; set; }
|
||||
|
||||
@@ -25,7 +34,119 @@ internal sealed class TarHeader
|
||||
|
||||
internal const int BLOCK_SIZE = 512;
|
||||
|
||||
// Maximum size for long name/link headers to prevent memory exhaustion attacks
|
||||
// This is generous enough for most real-world scenarios (32KB)
|
||||
private const int MAX_LONG_NAME_SIZE = 32768;
|
||||
|
||||
internal void Write(Stream output)
|
||||
{
|
||||
switch (WriteFormat)
|
||||
{
|
||||
case TarHeaderWriteFormat.GNU_TAR_LONG_LINK:
|
||||
WriteGnuTarLongLink(output);
|
||||
break;
|
||||
case TarHeaderWriteFormat.USTAR:
|
||||
WriteUstar(output);
|
||||
break;
|
||||
default:
|
||||
throw new Exception("This should be impossible...");
|
||||
}
|
||||
}
|
||||
|
||||
internal void WriteUstar(Stream output)
|
||||
{
|
||||
var buffer = new byte[BLOCK_SIZE];
|
||||
|
||||
WriteOctalBytes(511, buffer, 100, 8); // file mode
|
||||
WriteOctalBytes(0, buffer, 108, 8); // owner ID
|
||||
WriteOctalBytes(0, buffer, 116, 8); // group ID
|
||||
|
||||
//ArchiveEncoding.UTF8.GetBytes("magic").CopyTo(buffer, 257);
|
||||
var nameByteCount = ArchiveEncoding
|
||||
.GetEncoding()
|
||||
.GetByteCount(Name.NotNull("Name is null"));
|
||||
|
||||
if (nameByteCount > 100)
|
||||
{
|
||||
// if name is longer, try to split it into name and namePrefix
|
||||
|
||||
string fullName = Name.NotNull("Name is null");
|
||||
|
||||
// find all directory separators
|
||||
List<int> dirSeps = new List<int>();
|
||||
for (int i = 0; i < fullName.Length; i++)
|
||||
{
|
||||
if (fullName[i] == Path.DirectorySeparatorChar)
|
||||
{
|
||||
dirSeps.Add(i);
|
||||
}
|
||||
}
|
||||
|
||||
// find the right place to split the name
|
||||
int splitIndex = -1;
|
||||
for (int i = 0; i < dirSeps.Count; i++)
|
||||
{
|
||||
int count = ArchiveEncoding
|
||||
.GetEncoding()
|
||||
.GetByteCount(fullName.Substring(0, dirSeps[i]));
|
||||
if (count < 155)
|
||||
{
|
||||
splitIndex = dirSeps[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (splitIndex == -1)
|
||||
{
|
||||
throw new Exception(
|
||||
$"Tar header USTAR format can not fit file name \"{fullName}\" of length {nameByteCount}! Directory separator not found! Try using GNU Tar format instead!"
|
||||
);
|
||||
}
|
||||
|
||||
string namePrefix = fullName.Substring(0, splitIndex);
|
||||
string name = fullName.Substring(splitIndex + 1);
|
||||
|
||||
if (this.ArchiveEncoding.GetEncoding().GetByteCount(namePrefix) >= 155)
|
||||
throw new Exception(
|
||||
$"Tar header USTAR format can not fit file name \"{fullName}\" of length {nameByteCount}! Try using GNU Tar format instead!"
|
||||
);
|
||||
|
||||
if (this.ArchiveEncoding.GetEncoding().GetByteCount(name) >= 100)
|
||||
throw new Exception(
|
||||
$"Tar header USTAR format can not fit file name \"{fullName}\" of length {nameByteCount}! Try using GNU Tar format instead!"
|
||||
);
|
||||
|
||||
// write name prefix
|
||||
WriteStringBytes(ArchiveEncoding.Encode(namePrefix), buffer, 345, 100);
|
||||
// write partial name
|
||||
WriteStringBytes(ArchiveEncoding.Encode(name), buffer, 100);
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteStringBytes(ArchiveEncoding.Encode(Name.NotNull("Name is null")), buffer, 100);
|
||||
}
|
||||
|
||||
WriteOctalBytes(Size, buffer, 124, 12);
|
||||
var time = (long)(LastModifiedTime.ToUniversalTime() - EPOCH).TotalSeconds;
|
||||
WriteOctalBytes(time, buffer, 136, 12);
|
||||
buffer[156] = (byte)EntryType;
|
||||
|
||||
// write ustar magic field
|
||||
WriteStringBytes(Encoding.ASCII.GetBytes("ustar"), buffer, 257, 6);
|
||||
// write ustar version "00"
|
||||
buffer[263] = 0x30;
|
||||
buffer[264] = 0x30;
|
||||
|
||||
var crc = RecalculateChecksum(buffer);
|
||||
WriteOctalBytes(crc, buffer, 148, 8);
|
||||
|
||||
output.Write(buffer, 0, buffer.Length);
|
||||
}
|
||||
|
||||
internal void WriteGnuTarLongLink(Stream output)
|
||||
{
|
||||
var buffer = new byte[BLOCK_SIZE];
|
||||
|
||||
@@ -81,7 +202,7 @@ internal sealed class TarHeader
|
||||
0,
|
||||
100 - ArchiveEncoding.GetEncoding().GetMaxByteCount(1)
|
||||
);
|
||||
Write(output);
|
||||
WriteGnuTarLongLink(output);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,6 +307,15 @@ internal sealed class TarHeader
|
||||
private string ReadLongName(BinaryReader reader, byte[] buffer)
|
||||
{
|
||||
var size = ReadSize(buffer);
|
||||
|
||||
// Validate size to prevent memory exhaustion from malformed headers
|
||||
if (size < 0 || size > MAX_LONG_NAME_SIZE)
|
||||
{
|
||||
throw new InvalidFormatException(
|
||||
$"Long name size {size} is invalid or exceeds maximum allowed size of {MAX_LONG_NAME_SIZE} bytes"
|
||||
);
|
||||
}
|
||||
|
||||
var nameLength = (int)size;
|
||||
var nameBytes = reader.ReadBytes(nameLength);
|
||||
var remainingBytesToRead = BLOCK_SIZE - (nameLength % BLOCK_SIZE);
|
||||
@@ -228,6 +358,18 @@ internal sealed class TarHeader
|
||||
buffer.Slice(i, length - i).Clear();
|
||||
}
|
||||
|
||||
private static void WriteStringBytes(
|
||||
ReadOnlySpan<byte> name,
|
||||
Span<byte> buffer,
|
||||
int offset,
|
||||
int length
|
||||
)
|
||||
{
|
||||
name.CopyTo(buffer.Slice(offset));
|
||||
var i = Math.Min(length, name.Length);
|
||||
buffer.Slice(offset + i, length - i).Clear();
|
||||
}
|
||||
|
||||
private static void WriteStringBytes(string name, byte[] buffer, int offset, int length)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
namespace SharpCompress.Common.Tar.Headers;
|
||||
|
||||
public enum TarHeaderWriteFormat
|
||||
{
|
||||
GNU_TAR_LONG_LINK,
|
||||
USTAR,
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Common.Zip;
|
||||
|
||||
@@ -19,8 +20,24 @@ internal class WinzipAesEncryptionData
|
||||
{
|
||||
_keySize = keySize;
|
||||
|
||||
#if NETFRAMEWORK || NETSTANDARD2_0
|
||||
#if NETFRAMEWORK
|
||||
var rfc2898 = new Rfc2898DeriveBytes(password, salt, RFC2898_ITERATIONS);
|
||||
KeyBytes = rfc2898.GetBytes(KeySizeInBytes);
|
||||
IvBytes = rfc2898.GetBytes(KeySizeInBytes);
|
||||
var generatedVerifyValue = rfc2898.GetBytes(2);
|
||||
#elif NET10_0_OR_GREATER
|
||||
var derivedKeySize = (KeySizeInBytes * 2) + 2;
|
||||
var passwordBytes = Encoding.UTF8.GetBytes(password);
|
||||
var derivedKey = Rfc2898DeriveBytes.Pbkdf2(
|
||||
passwordBytes,
|
||||
salt,
|
||||
RFC2898_ITERATIONS,
|
||||
HashAlgorithmName.SHA1,
|
||||
derivedKeySize
|
||||
);
|
||||
KeyBytes = derivedKey.AsSpan(0, KeySizeInBytes).ToArray();
|
||||
IvBytes = derivedKey.AsSpan(KeySizeInBytes, KeySizeInBytes).ToArray();
|
||||
var generatedVerifyValue = derivedKey.AsSpan((KeySizeInBytes * 2), 2).ToArray();
|
||||
#else
|
||||
var rfc2898 = new Rfc2898DeriveBytes(
|
||||
password,
|
||||
@@ -28,11 +45,10 @@ internal class WinzipAesEncryptionData
|
||||
RFC2898_ITERATIONS,
|
||||
HashAlgorithmName.SHA1
|
||||
);
|
||||
#endif
|
||||
|
||||
KeyBytes = rfc2898.GetBytes(KeySizeInBytes); // 16 or 24 or 32 ???
|
||||
KeyBytes = rfc2898.GetBytes(KeySizeInBytes);
|
||||
IvBytes = rfc2898.GetBytes(KeySizeInBytes);
|
||||
var generatedVerifyValue = rfc2898.GetBytes(2);
|
||||
#endif
|
||||
|
||||
var verify = BinaryPrimitives.ReadInt16LittleEndian(passwordVerifyValue);
|
||||
var generated = BinaryPrimitives.ReadInt16LittleEndian(generatedVerifyValue);
|
||||
|
||||
@@ -13,8 +13,8 @@ using SharpCompress.Compressors.PPMd;
|
||||
using SharpCompress.Compressors.Reduce;
|
||||
using SharpCompress.Compressors.Shrink;
|
||||
using SharpCompress.Compressors.Xz;
|
||||
using SharpCompress.Compressors.ZStandard;
|
||||
using SharpCompress.IO;
|
||||
using ZstdSharp;
|
||||
|
||||
namespace SharpCompress.Common.Zip;
|
||||
|
||||
|
||||
@@ -24,10 +24,29 @@
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Compressors.ADC;
|
||||
|
||||
/// <summary>
|
||||
/// Result of an ADC decompression operation
|
||||
/// </summary>
|
||||
public class AdcDecompressResult
|
||||
{
|
||||
/// <summary>
|
||||
/// Number of bytes read from input
|
||||
/// </summary>
|
||||
public int BytesRead { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Decompressed output buffer
|
||||
/// </summary>
|
||||
public byte[]? Output { get; set; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Provides static methods for decompressing Apple Data Compression data
|
||||
/// </summary>
|
||||
@@ -78,6 +97,173 @@ public static class ADCBase
|
||||
public static int Decompress(byte[] input, out byte[]? output, int bufferSize = 262144) =>
|
||||
Decompress(new MemoryStream(input), out output, bufferSize);
|
||||
|
||||
/// <summary>
|
||||
/// Decompresses a byte buffer asynchronously that's compressed with ADC
|
||||
/// </summary>
|
||||
/// <param name="input">Compressed buffer</param>
|
||||
/// <param name="bufferSize">Max size for decompressed data</param>
|
||||
/// <param name="cancellationToken">Cancellation token</param>
|
||||
/// <returns>Result containing bytes read and decompressed data</returns>
|
||||
public static async Task<AdcDecompressResult> DecompressAsync(
|
||||
byte[] input,
|
||||
int bufferSize = 262144,
|
||||
CancellationToken cancellationToken = default
|
||||
) => await DecompressAsync(new MemoryStream(input), bufferSize, cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// Decompresses a stream asynchronously that's compressed with ADC
|
||||
/// </summary>
|
||||
/// <param name="input">Stream containing compressed data</param>
|
||||
/// <param name="bufferSize">Max size for decompressed data</param>
|
||||
/// <param name="cancellationToken">Cancellation token</param>
|
||||
/// <returns>Result containing bytes read and decompressed data</returns>
|
||||
public static async Task<AdcDecompressResult> DecompressAsync(
|
||||
Stream input,
|
||||
int bufferSize = 262144,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var result = new AdcDecompressResult();
|
||||
|
||||
if (input is null || input.Length == 0)
|
||||
{
|
||||
result.BytesRead = 0;
|
||||
result.Output = null;
|
||||
return result;
|
||||
}
|
||||
|
||||
var start = (int)input.Position;
|
||||
var position = (int)input.Position;
|
||||
int chunkSize;
|
||||
int offset;
|
||||
int chunkType;
|
||||
var buffer = ArrayPool<byte>.Shared.Rent(bufferSize);
|
||||
var outPosition = 0;
|
||||
var full = false;
|
||||
byte[] temp = ArrayPool<byte>.Shared.Rent(3);
|
||||
|
||||
try
|
||||
{
|
||||
while (position < input.Length)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
var readByte = input.ReadByte();
|
||||
if (readByte == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
chunkType = GetChunkType((byte)readByte);
|
||||
|
||||
switch (chunkType)
|
||||
{
|
||||
case PLAIN:
|
||||
chunkSize = GetChunkSize((byte)readByte);
|
||||
if (outPosition + chunkSize > bufferSize)
|
||||
{
|
||||
full = true;
|
||||
break;
|
||||
}
|
||||
|
||||
var readCount = await input.ReadAsync(
|
||||
buffer,
|
||||
outPosition,
|
||||
chunkSize,
|
||||
cancellationToken
|
||||
);
|
||||
outPosition += readCount;
|
||||
position += readCount + 1;
|
||||
break;
|
||||
case TWO_BYTE:
|
||||
chunkSize = GetChunkSize((byte)readByte);
|
||||
temp[0] = (byte)readByte;
|
||||
temp[1] = (byte)input.ReadByte();
|
||||
offset = GetOffset(temp.AsSpan(0, 2));
|
||||
if (outPosition + chunkSize > bufferSize)
|
||||
{
|
||||
full = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (offset == 0)
|
||||
{
|
||||
var lastByte = buffer[outPosition - 1];
|
||||
for (var i = 0; i < chunkSize; i++)
|
||||
{
|
||||
buffer[outPosition] = lastByte;
|
||||
outPosition++;
|
||||
}
|
||||
|
||||
position += 2;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (var i = 0; i < chunkSize; i++)
|
||||
{
|
||||
buffer[outPosition] = buffer[outPosition - offset - 1];
|
||||
outPosition++;
|
||||
}
|
||||
|
||||
position += 2;
|
||||
}
|
||||
|
||||
break;
|
||||
case THREE_BYTE:
|
||||
chunkSize = GetChunkSize((byte)readByte);
|
||||
temp[0] = (byte)readByte;
|
||||
temp[1] = (byte)input.ReadByte();
|
||||
temp[2] = (byte)input.ReadByte();
|
||||
offset = GetOffset(temp.AsSpan(0, 3));
|
||||
if (outPosition + chunkSize > bufferSize)
|
||||
{
|
||||
full = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (offset == 0)
|
||||
{
|
||||
var lastByte = buffer[outPosition - 1];
|
||||
for (var i = 0; i < chunkSize; i++)
|
||||
{
|
||||
buffer[outPosition] = lastByte;
|
||||
outPosition++;
|
||||
}
|
||||
|
||||
position += 3;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (var i = 0; i < chunkSize; i++)
|
||||
{
|
||||
buffer[outPosition] = buffer[outPosition - offset - 1];
|
||||
outPosition++;
|
||||
}
|
||||
|
||||
position += 3;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (full)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
var output = new byte[outPosition];
|
||||
Array.Copy(buffer, output, outPosition);
|
||||
result.BytesRead = position - start;
|
||||
result.Output = output;
|
||||
return result;
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(buffer);
|
||||
ArrayPool<byte>.Shared.Return(temp);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Decompresses a stream that's compressed with ADC
|
||||
/// </summary>
|
||||
|
||||
@@ -28,6 +28,8 @@
|
||||
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.ADC;
|
||||
@@ -187,6 +189,76 @@ public sealed class ADCStream : Stream, IStreamStack
|
||||
return copied;
|
||||
}
|
||||
|
||||
public override async Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (count == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if (buffer is null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
if (count < 0)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
if (offset < buffer.GetLowerBound(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
if ((offset + count) > buffer.GetLength(0))
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
|
||||
if (_outBuffer is null)
|
||||
{
|
||||
var result = await ADCBase.DecompressAsync(
|
||||
_stream,
|
||||
cancellationToken: cancellationToken
|
||||
);
|
||||
_outBuffer = result.Output;
|
||||
_outPosition = 0;
|
||||
}
|
||||
|
||||
var inPosition = offset;
|
||||
var toCopy = count;
|
||||
var copied = 0;
|
||||
|
||||
while (_outPosition + toCopy >= _outBuffer.Length)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
var piece = _outBuffer.Length - _outPosition;
|
||||
Array.Copy(_outBuffer, _outPosition, buffer, inPosition, piece);
|
||||
inPosition += piece;
|
||||
copied += piece;
|
||||
_position += piece;
|
||||
toCopy -= piece;
|
||||
var result = await ADCBase.DecompressAsync(
|
||||
_stream,
|
||||
cancellationToken: cancellationToken
|
||||
);
|
||||
_outBuffer = result.Output;
|
||||
_outPosition = 0;
|
||||
if (result.BytesRead == 0 || _outBuffer is null || _outBuffer.Length == 0)
|
||||
{
|
||||
return copied;
|
||||
}
|
||||
}
|
||||
|
||||
Array.Copy(_outBuffer, _outPosition, buffer, inPosition, toCopy);
|
||||
_outPosition += toCopy;
|
||||
_position += toCopy;
|
||||
copied += toCopy;
|
||||
return copied;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
72
src/SharpCompress/Compressors/Arj/BitReader.cs
Normal file
72
src/SharpCompress/Compressors/Arj/BitReader.cs
Normal file
@@ -0,0 +1,72 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Arj
|
||||
{
|
||||
[CLSCompliant(true)]
|
||||
public class BitReader
|
||||
{
|
||||
private readonly Stream _input;
|
||||
private int _bitBuffer; // currently buffered bits
|
||||
private int _bitCount; // number of bits in buffer
|
||||
|
||||
public BitReader(Stream input)
|
||||
{
|
||||
_input = input ?? throw new ArgumentNullException(nameof(input));
|
||||
_bitBuffer = 0;
|
||||
_bitCount = 0;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads a single bit from the stream. Returns 0 or 1.
|
||||
/// </summary>
|
||||
public int ReadBit()
|
||||
{
|
||||
if (_bitCount == 0)
|
||||
{
|
||||
int nextByte = _input.ReadByte();
|
||||
if (nextByte < 0)
|
||||
{
|
||||
throw new EndOfStreamException("No more data available in BitReader.");
|
||||
}
|
||||
|
||||
_bitBuffer = nextByte;
|
||||
_bitCount = 8;
|
||||
}
|
||||
|
||||
int bit = (_bitBuffer >> (_bitCount - 1)) & 1;
|
||||
_bitCount--;
|
||||
return bit;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads n bits (up to 32) from the stream.
|
||||
/// </summary>
|
||||
public int ReadBits(int count)
|
||||
{
|
||||
if (count < 0 || count > 32)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(
|
||||
nameof(count),
|
||||
"Count must be between 0 and 32."
|
||||
);
|
||||
}
|
||||
|
||||
int result = 0;
|
||||
for (int i = 0; i < count; i++)
|
||||
{
|
||||
result = (result << 1) | ReadBit();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Resets any buffered bits.
|
||||
/// </summary>
|
||||
public void AlignToByte()
|
||||
{
|
||||
_bitCount = 0;
|
||||
_bitBuffer = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
43
src/SharpCompress/Compressors/Arj/HistoryIterator.cs
Normal file
43
src/SharpCompress/Compressors/Arj/HistoryIterator.cs
Normal file
@@ -0,0 +1,43 @@
|
||||
using System;
|
||||
using System.Collections;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Compressors.Arj
|
||||
{
|
||||
/// <summary>
|
||||
/// Iterator that reads & pushes values back into the ring buffer.
|
||||
/// </summary>
|
||||
public class HistoryIterator : IEnumerator<byte>
|
||||
{
|
||||
private int _index;
|
||||
private readonly IRingBuffer _ring;
|
||||
|
||||
public HistoryIterator(IRingBuffer ring, int startIndex)
|
||||
{
|
||||
_ring = ring;
|
||||
_index = startIndex;
|
||||
}
|
||||
|
||||
public bool MoveNext()
|
||||
{
|
||||
Current = _ring[_index];
|
||||
_index = unchecked(_index + 1);
|
||||
|
||||
// Push value back into the ring buffer
|
||||
_ring.Push(Current);
|
||||
|
||||
return true; // iterator is infinite
|
||||
}
|
||||
|
||||
public void Reset()
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public byte Current { get; private set; }
|
||||
|
||||
object IEnumerator.Current => Current;
|
||||
|
||||
public void Dispose() { }
|
||||
}
|
||||
}
|
||||
218
src/SharpCompress/Compressors/Arj/HuffmanTree.cs
Normal file
218
src/SharpCompress/Compressors/Arj/HuffmanTree.cs
Normal file
@@ -0,0 +1,218 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace SharpCompress.Compressors.Arj
|
||||
{
|
||||
[CLSCompliant(true)]
|
||||
public enum NodeType
|
||||
{
|
||||
Leaf,
|
||||
Branch,
|
||||
}
|
||||
|
||||
[CLSCompliant(true)]
|
||||
public sealed class TreeEntry
|
||||
{
|
||||
public readonly NodeType Type;
|
||||
public readonly int LeafValue;
|
||||
public readonly int BranchIndex;
|
||||
|
||||
public const int MAX_INDEX = 4096;
|
||||
|
||||
private TreeEntry(NodeType type, int leafValue, int branchIndex)
|
||||
{
|
||||
Type = type;
|
||||
LeafValue = leafValue;
|
||||
BranchIndex = branchIndex;
|
||||
}
|
||||
|
||||
public static TreeEntry Leaf(int value)
|
||||
{
|
||||
return new TreeEntry(NodeType.Leaf, value, -1);
|
||||
}
|
||||
|
||||
public static TreeEntry Branch(int index)
|
||||
{
|
||||
if (index >= MAX_INDEX)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(
|
||||
nameof(index),
|
||||
"Branch index exceeds MAX_INDEX"
|
||||
);
|
||||
}
|
||||
return new TreeEntry(NodeType.Branch, 0, index);
|
||||
}
|
||||
}
|
||||
|
||||
[CLSCompliant(true)]
|
||||
public sealed class HuffTree
|
||||
{
|
||||
private readonly List<TreeEntry> _tree;
|
||||
|
||||
public HuffTree(int capacity = 0)
|
||||
{
|
||||
_tree = new List<TreeEntry>(capacity);
|
||||
}
|
||||
|
||||
public void SetSingle(int value)
|
||||
{
|
||||
_tree.Clear();
|
||||
_tree.Add(TreeEntry.Leaf(value));
|
||||
}
|
||||
|
||||
public void BuildTree(byte[] lengths, int count)
|
||||
{
|
||||
if (lengths == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(lengths));
|
||||
}
|
||||
|
||||
if (count < 0 || count > lengths.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(count));
|
||||
}
|
||||
|
||||
if (count > TreeEntry.MAX_INDEX / 2)
|
||||
{
|
||||
throw new ArgumentException(
|
||||
$"Count exceeds maximum allowed: {TreeEntry.MAX_INDEX / 2}"
|
||||
);
|
||||
}
|
||||
byte[] slice = new byte[count];
|
||||
Array.Copy(lengths, slice, count);
|
||||
|
||||
BuildTree(slice);
|
||||
}
|
||||
|
||||
public void BuildTree(byte[] valueLengths)
|
||||
{
|
||||
if (valueLengths == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(valueLengths));
|
||||
}
|
||||
|
||||
if (valueLengths.Length > TreeEntry.MAX_INDEX / 2)
|
||||
{
|
||||
throw new InvalidOperationException("Too many code lengths");
|
||||
}
|
||||
|
||||
_tree.Clear();
|
||||
|
||||
int maxAllocated = 1; // start with a single (root) node
|
||||
|
||||
for (byte currentLen = 1; ; currentLen++)
|
||||
{
|
||||
// add missing branches up to current limit
|
||||
int maxLimit = maxAllocated;
|
||||
|
||||
for (int i = _tree.Count; i < maxLimit; i++)
|
||||
{
|
||||
// TreeEntry.Branch may throw if index too large
|
||||
try
|
||||
{
|
||||
_tree.Add(TreeEntry.Branch(maxAllocated));
|
||||
}
|
||||
catch (ArgumentOutOfRangeException e)
|
||||
{
|
||||
_tree.Clear();
|
||||
throw new InvalidOperationException("Branch index exceeds limit", e);
|
||||
}
|
||||
|
||||
// each branch node allocates two children
|
||||
maxAllocated += 2;
|
||||
}
|
||||
|
||||
// fill tree with leaves found in the lengths table at the current length
|
||||
bool moreLeaves = false;
|
||||
|
||||
for (int value = 0; value < valueLengths.Length; value++)
|
||||
{
|
||||
byte len = valueLengths[value];
|
||||
if (len == currentLen)
|
||||
{
|
||||
_tree.Add(TreeEntry.Leaf(value));
|
||||
}
|
||||
else if (len > currentLen)
|
||||
{
|
||||
moreLeaves = true; // there are more leaves to process
|
||||
}
|
||||
}
|
||||
|
||||
// sanity check (too many leaves)
|
||||
if (_tree.Count > maxAllocated)
|
||||
{
|
||||
throw new InvalidOperationException("Too many leaves");
|
||||
}
|
||||
|
||||
// stop when no longer finding longer codes
|
||||
if (!moreLeaves)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// ensure tree is complete
|
||||
if (_tree.Count != maxAllocated)
|
||||
{
|
||||
throw new InvalidOperationException(
|
||||
$"Missing some leaves: tree count = {_tree.Count}, expected = {maxAllocated}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public int ReadEntry(BitReader reader)
|
||||
{
|
||||
if (_tree.Count == 0)
|
||||
{
|
||||
throw new InvalidOperationException("Tree not initialized");
|
||||
}
|
||||
|
||||
TreeEntry node = _tree[0];
|
||||
while (true)
|
||||
{
|
||||
if (node.Type == NodeType.Leaf)
|
||||
{
|
||||
return node.LeafValue;
|
||||
}
|
||||
|
||||
int bit = reader.ReadBit();
|
||||
int index = node.BranchIndex + bit;
|
||||
|
||||
if (index >= _tree.Count)
|
||||
{
|
||||
throw new InvalidOperationException("Invalid branch index during read");
|
||||
}
|
||||
|
||||
node = _tree[index];
|
||||
}
|
||||
}
|
||||
|
||||
public override string ToString()
|
||||
{
|
||||
var result = new StringBuilder();
|
||||
|
||||
void FormatStep(int index, string prefix)
|
||||
{
|
||||
var node = _tree[index];
|
||||
if (node.Type == NodeType.Leaf)
|
||||
{
|
||||
result.AppendLine($"{prefix} -> {node.LeafValue}");
|
||||
}
|
||||
else
|
||||
{
|
||||
FormatStep(node.BranchIndex, prefix + "0");
|
||||
FormatStep(node.BranchIndex + 1, prefix + "1");
|
||||
}
|
||||
}
|
||||
|
||||
if (_tree.Count > 0)
|
||||
{
|
||||
FormatStep(0, "");
|
||||
}
|
||||
|
||||
return result.ToString();
|
||||
}
|
||||
}
|
||||
}
|
||||
9
src/SharpCompress/Compressors/Arj/ILhaDecoderConfig.cs
Normal file
9
src/SharpCompress/Compressors/Arj/ILhaDecoderConfig.cs
Normal file
@@ -0,0 +1,9 @@
|
||||
namespace SharpCompress.Compressors.Arj
|
||||
{
|
||||
public interface ILhaDecoderConfig
|
||||
{
|
||||
int HistoryBits { get; }
|
||||
int OffsetBits { get; }
|
||||
RingBuffer RingBuffer { get; }
|
||||
}
|
||||
}
|
||||
17
src/SharpCompress/Compressors/Arj/IRingBuffer.cs
Normal file
17
src/SharpCompress/Compressors/Arj/IRingBuffer.cs
Normal file
@@ -0,0 +1,17 @@
|
||||
namespace SharpCompress.Compressors.Arj
|
||||
{
|
||||
public interface IRingBuffer
|
||||
{
|
||||
int BufferSize { get; }
|
||||
|
||||
int Cursor { get; }
|
||||
void SetCursor(int pos);
|
||||
|
||||
void Push(byte value);
|
||||
|
||||
HistoryIterator IterFromOffset(int offset);
|
||||
HistoryIterator IterFromPos(int pos);
|
||||
|
||||
byte this[int index] { get; }
|
||||
}
|
||||
}
|
||||
191
src/SharpCompress/Compressors/Arj/LHDecoderStream.cs
Normal file
191
src/SharpCompress/Compressors/Arj/LHDecoderStream.cs
Normal file
@@ -0,0 +1,191 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Arj
|
||||
{
|
||||
[CLSCompliant(true)]
|
||||
public sealed class LHDecoderStream : Stream, IStreamStack
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
long IStreamStack.InstanceId { get; set; }
|
||||
#endif
|
||||
int IStreamStack.DefaultBufferSize { get; set; }
|
||||
|
||||
Stream IStreamStack.BaseStream() => _stream;
|
||||
|
||||
int IStreamStack.BufferSize
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
int IStreamStack.BufferPosition
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
|
||||
void IStreamStack.SetPosition(long position) { }
|
||||
|
||||
private readonly BitReader _bitReader;
|
||||
private readonly Stream _stream;
|
||||
|
||||
// Buffer containing *all* bytes decoded so far.
|
||||
private readonly List<byte> _buffer = new();
|
||||
|
||||
private long _readPosition;
|
||||
private readonly int _originalSize;
|
||||
private bool _finishedDecoding;
|
||||
private bool _disposed;
|
||||
|
||||
private const int THRESHOLD = 3;
|
||||
|
||||
public LHDecoderStream(Stream compressedStream, int originalSize)
|
||||
{
|
||||
_stream = compressedStream ?? throw new ArgumentNullException(nameof(compressedStream));
|
||||
if (!compressedStream.CanRead)
|
||||
throw new ArgumentException(
|
||||
"compressedStream must be readable.",
|
||||
nameof(compressedStream)
|
||||
);
|
||||
|
||||
_bitReader = new BitReader(compressedStream);
|
||||
_originalSize = originalSize;
|
||||
_readPosition = 0;
|
||||
_finishedDecoding = (originalSize == 0);
|
||||
}
|
||||
|
||||
public Stream BaseStream => _stream;
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => _originalSize;
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _readPosition;
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Decodes a single element (literal or back-reference) and appends it to _buffer.
|
||||
/// Returns true if data was added, or false if all input has already been decoded.
|
||||
/// </summary>
|
||||
private bool DecodeNext()
|
||||
{
|
||||
if (_buffer.Count >= _originalSize)
|
||||
{
|
||||
_finishedDecoding = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
int len = DecodeVal(0, 7);
|
||||
if (len == 0)
|
||||
{
|
||||
byte nextChar = (byte)_bitReader.ReadBits(8);
|
||||
_buffer.Add(nextChar);
|
||||
}
|
||||
else
|
||||
{
|
||||
int repCount = len + THRESHOLD - 1;
|
||||
int backPtr = DecodeVal(9, 13);
|
||||
|
||||
if (backPtr >= _buffer.Count)
|
||||
throw new InvalidDataException("Invalid back_ptr in LH stream");
|
||||
|
||||
int srcIndex = _buffer.Count - 1 - backPtr;
|
||||
for (int j = 0; j < repCount && _buffer.Count < _originalSize; j++)
|
||||
{
|
||||
byte b = _buffer[srcIndex];
|
||||
_buffer.Add(b);
|
||||
srcIndex++;
|
||||
// srcIndex may grow; it's allowed (source region can overlap destination)
|
||||
}
|
||||
}
|
||||
|
||||
if (_buffer.Count >= _originalSize)
|
||||
{
|
||||
_finishedDecoding = true;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private int DecodeVal(int from, int to)
|
||||
{
|
||||
int add = 0;
|
||||
int bit = from;
|
||||
|
||||
while (bit < to && _bitReader.ReadBits(1) == 1)
|
||||
{
|
||||
add |= 1 << bit;
|
||||
bit++;
|
||||
}
|
||||
|
||||
int res = bit > 0 ? _bitReader.ReadBits(bit) : 0;
|
||||
return res + add;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads decompressed bytes into buffer[offset..offset+count].
|
||||
/// The method decodes additional data on demand when needed.
|
||||
/// </summary>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_disposed)
|
||||
throw new ObjectDisposedException(nameof(LHDecoderStream));
|
||||
if (buffer == null)
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
if (offset < 0 || count < 0 || offset + count > buffer.Length)
|
||||
throw new ArgumentOutOfRangeException("offset/count");
|
||||
|
||||
if (_readPosition >= _originalSize)
|
||||
return 0; // EOF
|
||||
|
||||
int totalRead = 0;
|
||||
|
||||
while (totalRead < count && _readPosition < _originalSize)
|
||||
{
|
||||
if (_readPosition >= _buffer.Count)
|
||||
{
|
||||
bool had = DecodeNext();
|
||||
if (!had)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int available = _buffer.Count - (int)_readPosition;
|
||||
if (available <= 0)
|
||||
{
|
||||
if (!_finishedDecoding)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
int toCopy = Math.Min(available, count - totalRead);
|
||||
_buffer.CopyTo((int)_readPosition, buffer, offset + totalRead, toCopy);
|
||||
|
||||
_readPosition += toCopy;
|
||||
totalRead += toCopy;
|
||||
}
|
||||
|
||||
return totalRead;
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotSupportedException();
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
}
|
||||
9
src/SharpCompress/Compressors/Arj/Lh5DecoderCfg.cs
Normal file
9
src/SharpCompress/Compressors/Arj/Lh5DecoderCfg.cs
Normal file
@@ -0,0 +1,9 @@
|
||||
namespace SharpCompress.Compressors.Arj
|
||||
{
|
||||
public class Lh5DecoderCfg : ILhaDecoderConfig
|
||||
{
|
||||
public int HistoryBits => 14;
|
||||
public int OffsetBits => 4;
|
||||
public RingBuffer RingBuffer { get; } = new RingBuffer(1 << 14);
|
||||
}
|
||||
}
|
||||
9
src/SharpCompress/Compressors/Arj/Lh7DecoderCfg.cs
Normal file
9
src/SharpCompress/Compressors/Arj/Lh7DecoderCfg.cs
Normal file
@@ -0,0 +1,9 @@
|
||||
namespace SharpCompress.Compressors.Arj
|
||||
{
|
||||
public class Lh7DecoderCfg : ILhaDecoderConfig
|
||||
{
|
||||
public int HistoryBits => 17;
|
||||
public int OffsetBits => 5;
|
||||
public RingBuffer RingBuffer { get; } = new RingBuffer(1 << 17);
|
||||
}
|
||||
}
|
||||
363
src/SharpCompress/Compressors/Arj/LhaStream.cs
Normal file
363
src/SharpCompress/Compressors/Arj/LhaStream.cs
Normal file
@@ -0,0 +1,363 @@
|
||||
using System;
|
||||
using System.Data;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Arj
|
||||
{
|
||||
[CLSCompliant(true)]
|
||||
public sealed class LhaStream<C> : Stream, IStreamStack
|
||||
where C : ILhaDecoderConfig, new()
|
||||
{
|
||||
private readonly BitReader _bitReader;
|
||||
private readonly Stream _stream;
|
||||
|
||||
private readonly HuffTree _commandTree;
|
||||
private readonly HuffTree _offsetTree;
|
||||
private int _remainingCommands;
|
||||
private (int offset, int count)? _copyProgress;
|
||||
private readonly RingBuffer _ringBuffer;
|
||||
private readonly C _config = new C();
|
||||
|
||||
private const int NUM_COMMANDS = 510;
|
||||
private const int NUM_TEMP_CODELEN = 20;
|
||||
|
||||
private readonly int _originalSize;
|
||||
private int _producedBytes = 0;
|
||||
|
||||
#if DEBUG_STREAMS
|
||||
long IStreamStack.InstanceId { get; set; }
|
||||
#endif
|
||||
int IStreamStack.DefaultBufferSize { get; set; }
|
||||
|
||||
Stream IStreamStack.BaseStream() => _stream;
|
||||
|
||||
int IStreamStack.BufferSize
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
int IStreamStack.BufferPosition
|
||||
{
|
||||
get => 0;
|
||||
set { }
|
||||
}
|
||||
|
||||
void IStreamStack.SetPosition(long position) { }
|
||||
|
||||
public LhaStream(Stream compressedStream, int originalSize)
|
||||
{
|
||||
_stream = compressedStream ?? throw new ArgumentNullException(nameof(compressedStream));
|
||||
_bitReader = new BitReader(compressedStream);
|
||||
_ringBuffer = _config.RingBuffer;
|
||||
_commandTree = new HuffTree(NUM_COMMANDS * 2);
|
||||
_offsetTree = new HuffTree(NUM_TEMP_CODELEN * 2);
|
||||
_remainingCommands = 0;
|
||||
_copyProgress = null;
|
||||
_originalSize = originalSize;
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => false;
|
||||
public override bool CanWrite => false;
|
||||
public override long Length => throw new NotSupportedException();
|
||||
public override long Position
|
||||
{
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush() { }
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (buffer == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
}
|
||||
if (offset < 0 || count < 0 || (offset + count) > buffer.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException();
|
||||
}
|
||||
|
||||
if (_producedBytes >= _originalSize)
|
||||
{
|
||||
return 0; // EOF
|
||||
}
|
||||
if (count == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bytesRead = FillBuffer(buffer);
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
private byte ReadCodeLength()
|
||||
{
|
||||
byte len = (byte)_bitReader.ReadBits(3);
|
||||
if (len == 7)
|
||||
{
|
||||
while (_bitReader.ReadBit() != 0)
|
||||
{
|
||||
len++;
|
||||
if (len > 255)
|
||||
{
|
||||
throw new InvalidOperationException("Code length overflow");
|
||||
}
|
||||
}
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
private int ReadCodeSkip(int skipRange)
|
||||
{
|
||||
int bits;
|
||||
int increment;
|
||||
|
||||
switch (skipRange)
|
||||
{
|
||||
case 0:
|
||||
return 1;
|
||||
case 1:
|
||||
bits = 4;
|
||||
increment = 3; // 3..=18
|
||||
break;
|
||||
default:
|
||||
bits = 9;
|
||||
increment = 20; // 20..=531
|
||||
break;
|
||||
}
|
||||
|
||||
int skip = _bitReader.ReadBits(bits);
|
||||
return skip + increment;
|
||||
}
|
||||
|
||||
private void ReadTempTree()
|
||||
{
|
||||
byte[] codeLengths = new byte[NUM_TEMP_CODELEN];
|
||||
|
||||
// number of codes to read (5 bits)
|
||||
int numCodes = _bitReader.ReadBits(5);
|
||||
|
||||
// single code only
|
||||
if (numCodes == 0)
|
||||
{
|
||||
int code = _bitReader.ReadBits(5);
|
||||
_offsetTree.SetSingle((byte)code);
|
||||
return;
|
||||
}
|
||||
|
||||
if (numCodes > NUM_TEMP_CODELEN)
|
||||
{
|
||||
throw new Exception("temporary codelen table has invalid size");
|
||||
}
|
||||
|
||||
// read actual lengths
|
||||
int count = Math.Min(3, numCodes);
|
||||
for (int i = 0; i < count; i++)
|
||||
{
|
||||
codeLengths[i] = (byte)ReadCodeLength();
|
||||
}
|
||||
|
||||
// 2-bit skip value follows
|
||||
int skip = _bitReader.ReadBits(2);
|
||||
|
||||
if (3 + skip > numCodes)
|
||||
{
|
||||
throw new Exception("temporary codelen table has invalid size");
|
||||
}
|
||||
|
||||
for (int i = 3 + skip; i < numCodes; i++)
|
||||
{
|
||||
codeLengths[i] = (byte)ReadCodeLength();
|
||||
}
|
||||
|
||||
_offsetTree.BuildTree(codeLengths, numCodes);
|
||||
}
|
||||
|
||||
private void ReadCommandTree()
|
||||
{
|
||||
byte[] codeLengths = new byte[NUM_COMMANDS];
|
||||
|
||||
// number of codes to read (9 bits)
|
||||
int numCodes = _bitReader.ReadBits(9);
|
||||
|
||||
// single code only
|
||||
if (numCodes == 0)
|
||||
{
|
||||
int code = _bitReader.ReadBits(9);
|
||||
_commandTree.SetSingle((ushort)code);
|
||||
return;
|
||||
}
|
||||
|
||||
if (numCodes > NUM_COMMANDS)
|
||||
{
|
||||
throw new Exception("commands codelen table has invalid size");
|
||||
}
|
||||
|
||||
int index = 0;
|
||||
while (index < numCodes)
|
||||
{
|
||||
for (int n = 0; n < numCodes - index; n++)
|
||||
{
|
||||
int code = _offsetTree.ReadEntry(_bitReader);
|
||||
|
||||
if (code >= 0 && code <= 2) // skip range
|
||||
{
|
||||
int skipCount = ReadCodeSkip(code);
|
||||
index += n + skipCount;
|
||||
goto outerLoop;
|
||||
}
|
||||
else
|
||||
{
|
||||
codeLengths[index + n] = (byte)(code - 2);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
outerLoop:
|
||||
;
|
||||
}
|
||||
|
||||
_commandTree.BuildTree(codeLengths, numCodes);
|
||||
}
|
||||
|
||||
private void ReadOffsetTree()
|
||||
{
|
||||
int numCodes = _bitReader.ReadBits(_config.OffsetBits);
|
||||
if (numCodes == 0)
|
||||
{
|
||||
int code = _bitReader.ReadBits(_config.OffsetBits);
|
||||
_offsetTree.SetSingle(code);
|
||||
return;
|
||||
}
|
||||
|
||||
if (numCodes > _config.HistoryBits)
|
||||
{
|
||||
throw new InvalidDataException("Offset code table too large");
|
||||
}
|
||||
|
||||
byte[] codeLengths = new byte[NUM_TEMP_CODELEN];
|
||||
for (int i = 0; i < numCodes; i++)
|
||||
{
|
||||
codeLengths[i] = (byte)ReadCodeLength();
|
||||
}
|
||||
|
||||
_offsetTree.BuildTree(codeLengths, numCodes);
|
||||
}
|
||||
|
||||
private void BeginNewBlock()
|
||||
{
|
||||
ReadTempTree();
|
||||
ReadCommandTree();
|
||||
ReadOffsetTree();
|
||||
}
|
||||
|
||||
private int ReadCommand() => _commandTree.ReadEntry(_bitReader);
|
||||
|
||||
private int ReadOffset()
|
||||
{
|
||||
int bits = _offsetTree.ReadEntry(_bitReader);
|
||||
if (bits <= 1)
|
||||
{
|
||||
return bits;
|
||||
}
|
||||
|
||||
int res = _bitReader.ReadBits(bits - 1);
|
||||
return res | (1 << (bits - 1));
|
||||
}
|
||||
|
||||
private int CopyFromHistory(byte[] target, int targetIndex, int offset, int count)
|
||||
{
|
||||
var historyIter = _ringBuffer.IterFromOffset(offset);
|
||||
int copied = 0;
|
||||
|
||||
while (
|
||||
copied < count && historyIter.MoveNext() && (targetIndex + copied) < target.Length
|
||||
)
|
||||
{
|
||||
target[targetIndex + copied] = historyIter.Current;
|
||||
copied++;
|
||||
}
|
||||
|
||||
if (copied < count)
|
||||
{
|
||||
_copyProgress = (offset, count - copied);
|
||||
}
|
||||
|
||||
return copied;
|
||||
}
|
||||
|
||||
public int FillBuffer(byte[] buffer)
|
||||
{
|
||||
int bufLen = buffer.Length;
|
||||
int bufIndex = 0;
|
||||
|
||||
// stop when we reached original size
|
||||
if (_producedBytes >= _originalSize)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
// calculate limit, so that we don't go over the original size
|
||||
int remaining = (int)Math.Min(bufLen, _originalSize - _producedBytes);
|
||||
|
||||
while (bufIndex < remaining)
|
||||
{
|
||||
if (_copyProgress.HasValue)
|
||||
{
|
||||
var (offset, count) = _copyProgress.Value;
|
||||
int copied = CopyFromHistory(
|
||||
buffer,
|
||||
bufIndex,
|
||||
offset,
|
||||
(int)Math.Min(count, remaining - bufIndex)
|
||||
);
|
||||
bufIndex += copied;
|
||||
_copyProgress = null;
|
||||
}
|
||||
|
||||
if (_remainingCommands == 0)
|
||||
{
|
||||
_remainingCommands = _bitReader.ReadBits(16);
|
||||
if (bufIndex + _remainingCommands > remaining)
|
||||
{
|
||||
break;
|
||||
}
|
||||
BeginNewBlock();
|
||||
}
|
||||
|
||||
_remainingCommands--;
|
||||
|
||||
int command = ReadCommand();
|
||||
|
||||
if (command >= 0 && command <= 0xFF)
|
||||
{
|
||||
byte value = (byte)command;
|
||||
buffer[bufIndex++] = value;
|
||||
_ringBuffer.Push(value);
|
||||
}
|
||||
else
|
||||
{
|
||||
int count = command - 0x100 + 3;
|
||||
int offset = ReadOffset();
|
||||
int copyCount = (int)Math.Min(count, remaining - bufIndex);
|
||||
bufIndex += CopyFromHistory(buffer, bufIndex, offset, copyCount);
|
||||
}
|
||||
}
|
||||
|
||||
_producedBytes += bufIndex;
|
||||
return bufIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
67
src/SharpCompress/Compressors/Arj/RingBuffer.cs
Normal file
67
src/SharpCompress/Compressors/Arj/RingBuffer.cs
Normal file
@@ -0,0 +1,67 @@
|
||||
using System;
|
||||
using System.Collections;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace SharpCompress.Compressors.Arj
|
||||
{
|
||||
/// <summary>
|
||||
/// A fixed-size ring buffer where N must be a power of two.
|
||||
/// </summary>
|
||||
public class RingBuffer : IRingBuffer
|
||||
{
|
||||
private readonly byte[] _buffer;
|
||||
private int _cursor;
|
||||
|
||||
public int BufferSize { get; }
|
||||
|
||||
public int Cursor => _cursor;
|
||||
|
||||
private readonly int _mask;
|
||||
|
||||
public RingBuffer(int size)
|
||||
{
|
||||
if ((size & (size - 1)) != 0)
|
||||
{
|
||||
throw new ArgumentException("RingArrayBuffer size must be a power of two");
|
||||
}
|
||||
|
||||
BufferSize = size;
|
||||
_buffer = new byte[size];
|
||||
_cursor = 0;
|
||||
_mask = size - 1;
|
||||
|
||||
// Fill with spaces
|
||||
for (int i = 0; i < size; i++)
|
||||
{
|
||||
_buffer[i] = (byte)' ';
|
||||
}
|
||||
}
|
||||
|
||||
public void SetCursor(int pos)
|
||||
{
|
||||
_cursor = pos & _mask;
|
||||
}
|
||||
|
||||
public void Push(byte value)
|
||||
{
|
||||
int index = _cursor;
|
||||
_buffer[index & _mask] = value;
|
||||
_cursor = (index + 1) & _mask;
|
||||
}
|
||||
|
||||
public byte this[int index] => _buffer[index & _mask];
|
||||
|
||||
public HistoryIterator IterFromOffset(int offset)
|
||||
{
|
||||
int masked = (offset & _mask) + 1;
|
||||
int startIndex = _cursor + BufferSize - masked;
|
||||
return new HistoryIterator(this, startIndex);
|
||||
}
|
||||
|
||||
public HistoryIterator IterFromPos(int pos)
|
||||
{
|
||||
int startIndex = pos & _mask;
|
||||
return new HistoryIterator(this, startIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.BZip2;
|
||||
@@ -96,13 +98,37 @@ public sealed class BZip2Stream : Stream, IStreamStack
|
||||
|
||||
public override void SetLength(long value) => stream.SetLength(value);
|
||||
|
||||
#if !NETFRAMEWORK&& !NETSTANDARD2_0
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
|
||||
public override int Read(Span<byte> buffer) => stream.Read(buffer);
|
||||
|
||||
public override void Write(ReadOnlySpan<byte> buffer) => stream.Write(buffer);
|
||||
|
||||
public override async ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
) => await stream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
public override async ValueTask WriteAsync(
|
||||
ReadOnlyMemory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
) => await stream.WriteAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
#endif
|
||||
|
||||
public override async Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken = default
|
||||
) => await stream.ReadAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
public override async Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken = default
|
||||
) => await stream.WriteAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
stream.Write(buffer, offset, count);
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
/*
|
||||
@@ -1127,6 +1129,28 @@ internal class CBZip2InputStream : Stream, IStreamStack
|
||||
return k;
|
||||
}
|
||||
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var c = -1;
|
||||
int k;
|
||||
for (k = 0; k < count; ++k)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
c = ReadByte();
|
||||
if (c == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
buffer[k + offset] = (byte)c;
|
||||
}
|
||||
return Task.FromResult(k);
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => 0;
|
||||
|
||||
public override void SetLength(long value) { }
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
/*
|
||||
@@ -542,6 +544,12 @@ internal sealed class CBZip2OutputStream : Stream, IStreamStack
|
||||
|
||||
private void EndBlock()
|
||||
{
|
||||
// Skip block processing for empty input (no data written)
|
||||
if (last < 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
blockCRC = mCrc.GetFinalCRC();
|
||||
combinedCRC = (combinedCRC << 1) | (int)(((uint)combinedCRC) >> 31);
|
||||
combinedCRC ^= blockCRC;
|
||||
@@ -2022,6 +2030,21 @@ internal sealed class CBZip2OutputStream : Stream, IStreamStack
|
||||
}
|
||||
}
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
for (var k = 0; k < count; ++k)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
WriteByte(buffer[k + offset]);
|
||||
}
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public override bool CanRead => false;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
@@ -2,6 +2,8 @@ using System;
|
||||
using System.IO;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Compressors.LZMA.Utilites;
|
||||
using SharpCompress.IO;
|
||||
|
||||
@@ -283,5 +285,70 @@ internal sealed class AesDecoderStream : DecoderStream2, IStreamStack
|
||||
return count;
|
||||
}
|
||||
|
||||
public override async Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (count == 0 || mWritten == mLimit)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mUnderflow > 0)
|
||||
{
|
||||
return HandleUnderflow(buffer, offset, count);
|
||||
}
|
||||
|
||||
// Need at least 16 bytes to proceed.
|
||||
if (mEnding - mOffset < 16)
|
||||
{
|
||||
Buffer.BlockCopy(mBuffer, mOffset, mBuffer, 0, mEnding - mOffset);
|
||||
mEnding -= mOffset;
|
||||
mOffset = 0;
|
||||
|
||||
do
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
var read = await mStream
|
||||
.ReadAsync(mBuffer, mEnding, mBuffer.Length - mEnding, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (read == 0)
|
||||
{
|
||||
// We are not done decoding and have less than 16 bytes.
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
|
||||
mEnding += read;
|
||||
} while (mEnding - mOffset < 16);
|
||||
}
|
||||
|
||||
// We shouldn't return more data than we are limited to.
|
||||
if (count > mLimit - mWritten)
|
||||
{
|
||||
count = (int)(mLimit - mWritten);
|
||||
}
|
||||
|
||||
// We cannot transform less than 16 bytes into the target buffer,
|
||||
// but we also cannot return zero, so we need to handle this.
|
||||
if (count < 16)
|
||||
{
|
||||
return HandleUnderflow(buffer, offset, count);
|
||||
}
|
||||
|
||||
if (count > mEnding - mOffset)
|
||||
{
|
||||
count = mEnding - mOffset;
|
||||
}
|
||||
|
||||
// Otherwise we transform directly into the target buffer.
|
||||
var processed = mDecoder.TransformBlock(mBuffer, mOffset, count & ~15, buffer, offset);
|
||||
mOffset += processed;
|
||||
mWritten += processed;
|
||||
return processed;
|
||||
}
|
||||
|
||||
#endregion
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA;
|
||||
@@ -191,6 +193,18 @@ internal class Bcj2DecoderStream : DecoderStream2, IStreamStack
|
||||
return count;
|
||||
}
|
||||
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
// Bcj2DecoderStream uses complex state machine with multiple streams
|
||||
return Task.FromResult(Read(buffer, offset, count));
|
||||
}
|
||||
|
||||
public override int ReadByte()
|
||||
{
|
||||
if (_mFinished)
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA.LZ;
|
||||
|
||||
@@ -85,6 +87,12 @@ internal class OutWindow : IDisposable
|
||||
_stream = null;
|
||||
}
|
||||
|
||||
public async Task ReleaseStreamAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
await FlushAsync(cancellationToken).ConfigureAwait(false);
|
||||
_stream = null;
|
||||
}
|
||||
|
||||
private void Flush()
|
||||
{
|
||||
if (_stream is null)
|
||||
@@ -104,6 +112,27 @@ internal class OutWindow : IDisposable
|
||||
_streamPos = _pos;
|
||||
}
|
||||
|
||||
private async Task FlushAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
if (_stream is null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
var size = _pos - _streamPos;
|
||||
if (size == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
await _stream
|
||||
.WriteAsync(_buffer, _streamPos, size, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (_pos >= _windowSize)
|
||||
{
|
||||
_pos = 0;
|
||||
}
|
||||
_streamPos = _pos;
|
||||
}
|
||||
|
||||
public void CopyPending()
|
||||
{
|
||||
if (_pendingLen < 1)
|
||||
@@ -124,6 +153,26 @@ internal class OutWindow : IDisposable
|
||||
_pendingLen = rem;
|
||||
}
|
||||
|
||||
public async Task CopyPendingAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
if (_pendingLen < 1)
|
||||
{
|
||||
return;
|
||||
}
|
||||
var rem = _pendingLen;
|
||||
var pos = (_pendingDist < _pos ? _pos : _pos + _windowSize) - _pendingDist - 1;
|
||||
while (rem > 0 && HasSpace)
|
||||
{
|
||||
if (pos >= _windowSize)
|
||||
{
|
||||
pos = 0;
|
||||
}
|
||||
await PutByteAsync(_buffer[pos++], cancellationToken).ConfigureAwait(false);
|
||||
rem--;
|
||||
}
|
||||
_pendingLen = rem;
|
||||
}
|
||||
|
||||
public void CopyBlock(int distance, int len)
|
||||
{
|
||||
var rem = len;
|
||||
@@ -157,6 +206,43 @@ internal class OutWindow : IDisposable
|
||||
_pendingDist = distance;
|
||||
}
|
||||
|
||||
public async Task CopyBlockAsync(
|
||||
int distance,
|
||||
int len,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var rem = len;
|
||||
var pos = (distance < _pos ? _pos : _pos + _windowSize) - distance - 1;
|
||||
var targetSize = HasSpace ? (int)Math.Min(rem, _limit - _total) : 0;
|
||||
var sizeUntilWindowEnd = Math.Min(_windowSize - _pos, _windowSize - pos);
|
||||
var sizeUntilOverlap = Math.Abs(pos - _pos);
|
||||
var fastSize = Math.Min(Math.Min(sizeUntilWindowEnd, sizeUntilOverlap), targetSize);
|
||||
if (fastSize >= 2)
|
||||
{
|
||||
_buffer.AsSpan(pos, fastSize).CopyTo(_buffer.AsSpan(_pos, fastSize));
|
||||
_pos += fastSize;
|
||||
pos += fastSize;
|
||||
_total += fastSize;
|
||||
if (_pos >= _windowSize)
|
||||
{
|
||||
await FlushAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
rem -= fastSize;
|
||||
}
|
||||
while (rem > 0 && HasSpace)
|
||||
{
|
||||
if (pos >= _windowSize)
|
||||
{
|
||||
pos = 0;
|
||||
}
|
||||
await PutByteAsync(_buffer[pos++], cancellationToken).ConfigureAwait(false);
|
||||
rem--;
|
||||
}
|
||||
_pendingLen = rem;
|
||||
_pendingDist = distance;
|
||||
}
|
||||
|
||||
public void PutByte(byte b)
|
||||
{
|
||||
_buffer[_pos++] = b;
|
||||
@@ -167,6 +253,16 @@ internal class OutWindow : IDisposable
|
||||
}
|
||||
}
|
||||
|
||||
public async Task PutByteAsync(byte b, CancellationToken cancellationToken = default)
|
||||
{
|
||||
_buffer[_pos++] = b;
|
||||
_total++;
|
||||
if (_pos >= _windowSize)
|
||||
{
|
||||
await FlushAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
public byte GetByte(int distance)
|
||||
{
|
||||
var pos = _pos - distance - 1;
|
||||
@@ -207,6 +303,44 @@ internal class OutWindow : IDisposable
|
||||
return len - size;
|
||||
}
|
||||
|
||||
public async Task<int> CopyStreamAsync(
|
||||
Stream stream,
|
||||
int len,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var size = len;
|
||||
while (size > 0 && _pos < _windowSize && _total < _limit)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var curSize = _windowSize - _pos;
|
||||
if (curSize > _limit - _total)
|
||||
{
|
||||
curSize = (int)(_limit - _total);
|
||||
}
|
||||
if (curSize > size)
|
||||
{
|
||||
curSize = size;
|
||||
}
|
||||
var numReadBytes = await stream
|
||||
.ReadAsync(_buffer, _pos, curSize, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (numReadBytes == 0)
|
||||
{
|
||||
throw new DataErrorException();
|
||||
}
|
||||
size -= numReadBytes;
|
||||
_pos += numReadBytes;
|
||||
_total += numReadBytes;
|
||||
if (_pos >= _windowSize)
|
||||
{
|
||||
await FlushAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
return len - size;
|
||||
}
|
||||
|
||||
public void SetLimit(long size) => _limit = _total + size;
|
||||
|
||||
public bool HasSpace => _pos < _windowSize && _total < _limit;
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Crypto;
|
||||
using SharpCompress.IO;
|
||||
@@ -157,6 +159,11 @@ public sealed class LZipStream : Stream, IStreamStack
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
|
||||
public override ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
CancellationToken cancellationToken = default
|
||||
) => _stream.ReadAsync(buffer, cancellationToken);
|
||||
|
||||
public override int Read(Span<byte> buffer) => _stream.Read(buffer);
|
||||
|
||||
public override void Write(ReadOnlySpan<byte> buffer)
|
||||
@@ -179,6 +186,25 @@ public sealed class LZipStream : Stream, IStreamStack
|
||||
++_writeCount;
|
||||
}
|
||||
|
||||
public override Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken = default
|
||||
) => _stream.ReadAsync(buffer, offset, count, cancellationToken);
|
||||
|
||||
public override async Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
await _stream.WriteAsync(buffer, offset, count, cancellationToken);
|
||||
_writeCount += count;
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#nullable disable
|
||||
|
||||
using System;
|
||||
using System.Diagnostics.CodeAnalysis;
|
||||
using System.IO;
|
||||
using SharpCompress.Compressors.LZMA.LZ;
|
||||
using SharpCompress.Compressors.LZMA.RangeCoder;
|
||||
@@ -199,6 +200,9 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
}
|
||||
}
|
||||
|
||||
#if !NETFRAMEWORK && !NETSTANDARD2_0
|
||||
[MemberNotNull(nameof(_outWindow))]
|
||||
#endif
|
||||
private void CreateDictionary()
|
||||
{
|
||||
if (_dictionarySize < 0)
|
||||
@@ -309,6 +313,42 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
_outWindow = null;
|
||||
}
|
||||
|
||||
public async System.Threading.Tasks.Task CodeAsync(
|
||||
Stream inStream,
|
||||
Stream outStream,
|
||||
long inSize,
|
||||
long outSize,
|
||||
ICodeProgress progress,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (_outWindow is null)
|
||||
{
|
||||
CreateDictionary();
|
||||
}
|
||||
_outWindow.Init(outStream);
|
||||
if (outSize > 0)
|
||||
{
|
||||
_outWindow.SetLimit(outSize);
|
||||
}
|
||||
else
|
||||
{
|
||||
_outWindow.SetLimit(long.MaxValue - _outWindow.Total);
|
||||
}
|
||||
|
||||
var rangeDecoder = new RangeCoder.Decoder();
|
||||
rangeDecoder.Init(inStream);
|
||||
|
||||
await CodeAsync(_dictionarySize, _outWindow, rangeDecoder, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
await _outWindow.ReleaseStreamAsync(cancellationToken).ConfigureAwait(false);
|
||||
rangeDecoder.ReleaseStream();
|
||||
|
||||
_outWindow.Dispose();
|
||||
_outWindow = null;
|
||||
}
|
||||
|
||||
internal bool Code(int dictionarySize, OutWindow outWindow, RangeCoder.Decoder rangeDecoder)
|
||||
{
|
||||
var dictionarySizeCheck = Math.Max(dictionarySize, 1);
|
||||
@@ -435,6 +475,143 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
return false;
|
||||
}
|
||||
|
||||
internal async System.Threading.Tasks.Task<bool> CodeAsync(
|
||||
int dictionarySize,
|
||||
OutWindow outWindow,
|
||||
RangeCoder.Decoder rangeDecoder,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var dictionarySizeCheck = Math.Max(dictionarySize, 1);
|
||||
|
||||
await outWindow.CopyPendingAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
while (outWindow.HasSpace)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
var posState = (uint)outWindow.Total & _posStateMask;
|
||||
if (
|
||||
_isMatchDecoders[(_state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState]
|
||||
.Decode(rangeDecoder) == 0
|
||||
)
|
||||
{
|
||||
byte b;
|
||||
var prevByte = outWindow.GetByte(0);
|
||||
if (!_state.IsCharState())
|
||||
{
|
||||
b = _literalDecoder.DecodeWithMatchByte(
|
||||
rangeDecoder,
|
||||
(uint)outWindow.Total,
|
||||
prevByte,
|
||||
outWindow.GetByte((int)_rep0)
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
b = _literalDecoder.DecodeNormal(rangeDecoder, (uint)outWindow.Total, prevByte);
|
||||
}
|
||||
await outWindow.PutByteAsync(b, cancellationToken).ConfigureAwait(false);
|
||||
_state.UpdateChar();
|
||||
}
|
||||
else
|
||||
{
|
||||
uint len;
|
||||
if (_isRepDecoders[_state._index].Decode(rangeDecoder) == 1)
|
||||
{
|
||||
if (_isRepG0Decoders[_state._index].Decode(rangeDecoder) == 0)
|
||||
{
|
||||
if (
|
||||
_isRep0LongDecoders[
|
||||
(_state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState
|
||||
]
|
||||
.Decode(rangeDecoder) == 0
|
||||
)
|
||||
{
|
||||
_state.UpdateShortRep();
|
||||
await outWindow
|
||||
.PutByteAsync(outWindow.GetByte((int)_rep0), cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
uint distance;
|
||||
if (_isRepG1Decoders[_state._index].Decode(rangeDecoder) == 0)
|
||||
{
|
||||
distance = _rep1;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (_isRepG2Decoders[_state._index].Decode(rangeDecoder) == 0)
|
||||
{
|
||||
distance = _rep2;
|
||||
}
|
||||
else
|
||||
{
|
||||
distance = _rep3;
|
||||
_rep3 = _rep2;
|
||||
}
|
||||
_rep2 = _rep1;
|
||||
}
|
||||
_rep1 = _rep0;
|
||||
_rep0 = distance;
|
||||
}
|
||||
len = _repLenDecoder.Decode(rangeDecoder, posState) + Base.K_MATCH_MIN_LEN;
|
||||
_state.UpdateRep();
|
||||
}
|
||||
else
|
||||
{
|
||||
_rep3 = _rep2;
|
||||
_rep2 = _rep1;
|
||||
_rep1 = _rep0;
|
||||
len = Base.K_MATCH_MIN_LEN + _lenDecoder.Decode(rangeDecoder, posState);
|
||||
_state.UpdateMatch();
|
||||
var posSlot = _posSlotDecoder[Base.GetLenToPosState(len)].Decode(rangeDecoder);
|
||||
if (posSlot >= Base.K_START_POS_MODEL_INDEX)
|
||||
{
|
||||
var numDirectBits = (int)((posSlot >> 1) - 1);
|
||||
_rep0 = ((2 | (posSlot & 1)) << numDirectBits);
|
||||
if (posSlot < Base.K_END_POS_MODEL_INDEX)
|
||||
{
|
||||
_rep0 += BitTreeDecoder.ReverseDecode(
|
||||
_posDecoders,
|
||||
_rep0 - posSlot - 1,
|
||||
rangeDecoder,
|
||||
numDirectBits
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
_rep0 += (
|
||||
rangeDecoder.DecodeDirectBits(numDirectBits - Base.K_NUM_ALIGN_BITS)
|
||||
<< Base.K_NUM_ALIGN_BITS
|
||||
);
|
||||
_rep0 += _posAlignDecoder.ReverseDecode(rangeDecoder);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
_rep0 = posSlot;
|
||||
}
|
||||
}
|
||||
if (_rep0 >= outWindow.Total || _rep0 >= dictionarySizeCheck)
|
||||
{
|
||||
if (_rep0 == 0xFFFFFFFF)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
throw new DataErrorException();
|
||||
}
|
||||
await outWindow
|
||||
.CopyBlockAsync((int)_rep0, (int)len, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public void SetDecoderProperties(byte[] properties)
|
||||
{
|
||||
if (properties.Length < 1)
|
||||
@@ -470,29 +647,4 @@ public class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream
|
||||
}
|
||||
_outWindow.Train(stream);
|
||||
}
|
||||
|
||||
/*
|
||||
public override bool CanRead { get { return true; }}
|
||||
public override bool CanWrite { get { return true; }}
|
||||
public override bool CanSeek { get { return true; }}
|
||||
public override long Length { get { return 0; }}
|
||||
public override long Position
|
||||
{
|
||||
get { return 0; }
|
||||
set { }
|
||||
}
|
||||
public override void Flush() { }
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
}
|
||||
public override long Seek(long offset, System.IO.SeekOrigin origin)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
public override void SetLength(long value) {}
|
||||
*/
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Compressors.LZMA.LZ;
|
||||
using SharpCompress.IO;
|
||||
|
||||
@@ -423,6 +425,90 @@ public class LzmaStream : Stream, IStreamStack
|
||||
}
|
||||
}
|
||||
|
||||
private async Task DecodeChunkHeaderAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
var controlBuffer = new byte[1];
|
||||
await _inputStream
|
||||
.ReadExactlyAsync(controlBuffer, 0, 1, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
var control = controlBuffer[0];
|
||||
_inputPosition++;
|
||||
|
||||
if (control == 0x00)
|
||||
{
|
||||
_endReached = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (control >= 0xE0 || control == 0x01)
|
||||
{
|
||||
_needProps = true;
|
||||
_needDictReset = false;
|
||||
_outWindow.Reset();
|
||||
}
|
||||
else if (_needDictReset)
|
||||
{
|
||||
throw new DataErrorException();
|
||||
}
|
||||
|
||||
if (control >= 0x80)
|
||||
{
|
||||
_uncompressedChunk = false;
|
||||
|
||||
_availableBytes = (control & 0x1F) << 16;
|
||||
var buffer = new byte[2];
|
||||
await _inputStream
|
||||
.ReadExactlyAsync(buffer, 0, 2, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
_availableBytes += (buffer[0] << 8) + buffer[1] + 1;
|
||||
_inputPosition += 2;
|
||||
|
||||
await _inputStream
|
||||
.ReadExactlyAsync(buffer, 0, 2, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
_rangeDecoderLimit = (buffer[0] << 8) + buffer[1] + 1;
|
||||
_inputPosition += 2;
|
||||
|
||||
if (control >= 0xC0)
|
||||
{
|
||||
_needProps = false;
|
||||
await _inputStream
|
||||
.ReadExactlyAsync(controlBuffer, 0, 1, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
Properties[0] = controlBuffer[0];
|
||||
_inputPosition++;
|
||||
|
||||
_decoder = new Decoder();
|
||||
_decoder.SetDecoderProperties(Properties);
|
||||
}
|
||||
else if (_needProps)
|
||||
{
|
||||
throw new DataErrorException();
|
||||
}
|
||||
else if (control >= 0xA0)
|
||||
{
|
||||
_decoder = new Decoder();
|
||||
_decoder.SetDecoderProperties(Properties);
|
||||
}
|
||||
|
||||
_rangeDecoder.Init(_inputStream);
|
||||
}
|
||||
else if (control > 0x02)
|
||||
{
|
||||
throw new DataErrorException();
|
||||
}
|
||||
else
|
||||
{
|
||||
_uncompressedChunk = true;
|
||||
var buffer = new byte[2];
|
||||
await _inputStream
|
||||
.ReadExactlyAsync(buffer, 0, 2, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
_availableBytes = (buffer[0] << 8) + buffer[1] + 1;
|
||||
_inputPosition += 2;
|
||||
}
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
@@ -435,5 +521,128 @@ public class LzmaStream : Stream, IStreamStack
|
||||
}
|
||||
}
|
||||
|
||||
public override async Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
if (_endReached)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
var total = 0;
|
||||
while (total < count)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
|
||||
if (_availableBytes == 0)
|
||||
{
|
||||
if (_isLzma2)
|
||||
{
|
||||
await DecodeChunkHeaderAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
else
|
||||
{
|
||||
_endReached = true;
|
||||
}
|
||||
if (_endReached)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
var toProcess = count - total;
|
||||
if (toProcess > _availableBytes)
|
||||
{
|
||||
toProcess = (int)_availableBytes;
|
||||
}
|
||||
|
||||
_outWindow.SetLimit(toProcess);
|
||||
if (_uncompressedChunk)
|
||||
{
|
||||
_inputPosition += await _outWindow
|
||||
.CopyStreamAsync(_inputStream, toProcess, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
else if (
|
||||
await _decoder
|
||||
.CodeAsync(_dictionarySize, _outWindow, _rangeDecoder, cancellationToken)
|
||||
.ConfigureAwait(false)
|
||||
&& _outputSize < 0
|
||||
)
|
||||
{
|
||||
_availableBytes = _outWindow.AvailableBytes;
|
||||
}
|
||||
|
||||
var read = _outWindow.Read(buffer, offset, toProcess);
|
||||
total += read;
|
||||
offset += read;
|
||||
_position += read;
|
||||
_availableBytes -= read;
|
||||
|
||||
if (_availableBytes == 0 && !_uncompressedChunk)
|
||||
{
|
||||
if (
|
||||
!_rangeDecoder.IsFinished
|
||||
|| (_rangeDecoderLimit >= 0 && _rangeDecoder._total != _rangeDecoderLimit)
|
||||
)
|
||||
{
|
||||
_outWindow.SetLimit(toProcess + 1);
|
||||
if (
|
||||
!await _decoder
|
||||
.CodeAsync(
|
||||
_dictionarySize,
|
||||
_outWindow,
|
||||
_rangeDecoder,
|
||||
cancellationToken
|
||||
)
|
||||
.ConfigureAwait(false)
|
||||
)
|
||||
{
|
||||
_rangeDecoder.ReleaseStream();
|
||||
throw new DataErrorException();
|
||||
}
|
||||
}
|
||||
|
||||
_rangeDecoder.ReleaseStream();
|
||||
|
||||
_inputPosition += _rangeDecoder._total;
|
||||
if (_outWindow.HasPending)
|
||||
{
|
||||
throw new DataErrorException();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (_endReached)
|
||||
{
|
||||
if (_inputSize >= 0 && _inputPosition != _inputSize)
|
||||
{
|
||||
throw new DataErrorException();
|
||||
}
|
||||
if (_outputSize >= 0 && _position != _outputSize)
|
||||
{
|
||||
throw new DataErrorException();
|
||||
}
|
||||
}
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
Write(buffer, offset, count);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public byte[] Properties { get; } = new byte[5];
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ using SharpCompress.Compressors.Deflate;
|
||||
using SharpCompress.Compressors.Filters;
|
||||
using SharpCompress.Compressors.LZMA.Utilites;
|
||||
using SharpCompress.Compressors.PPMd;
|
||||
using ZstdSharp;
|
||||
using SharpCompress.Compressors.ZStandard;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA;
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA.Utilites;
|
||||
@@ -101,4 +103,22 @@ internal class CrcBuilderStream : Stream, IStreamStack
|
||||
_mCrc = Crc.Update(_mCrc, buffer, offset, count);
|
||||
_mTarget.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
public override async Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
if (_mFinished)
|
||||
{
|
||||
throw new InvalidOperationException("CRC calculation has been finished.");
|
||||
}
|
||||
|
||||
Processed += count;
|
||||
_mCrc = Crc.Update(_mCrc, buffer, offset, count);
|
||||
await _mTarget.WriteAsync(buffer, offset, count, cancellationToken);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.Diagnostics;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Compressors.LZMA.Utilites;
|
||||
|
||||
@@ -11,7 +14,7 @@ public class CrcCheckStream : Stream
|
||||
private uint _mCurrentCrc;
|
||||
private bool _mClosed;
|
||||
|
||||
private readonly long[] _mBytes = new long[256];
|
||||
private readonly long[] _mBytes = ArrayPool<long>.Shared.Rent(256);
|
||||
private long _mLength;
|
||||
|
||||
public CrcCheckStream(uint crc)
|
||||
@@ -65,6 +68,7 @@ public class CrcCheckStream : Stream
|
||||
finally
|
||||
{
|
||||
base.Dispose(disposing);
|
||||
ArrayPool<long>.Shared.Return(_mBytes);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,4 +105,16 @@ public class CrcCheckStream : Stream
|
||||
|
||||
_mCurrentCrc = Crc.Update(_mCurrentCrc, buffer, offset, count);
|
||||
}
|
||||
|
||||
public override Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
Write(buffer, offset, count);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.RLE90
|
||||
{
|
||||
/// <summary>
|
||||
/// Real-time streaming RLE90 decompression stream.
|
||||
/// Decompresses bytes on demand without buffering the entire file in memory.
|
||||
/// </summary>
|
||||
public class RunLength90Stream : Stream, IStreamStack
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
@@ -31,13 +31,19 @@ namespace SharpCompress.Compressors.RLE90
|
||||
void IStreamStack.SetPosition(long position) { }
|
||||
|
||||
private readonly Stream _stream;
|
||||
private readonly int _compressedSize;
|
||||
private int _bytesReadFromSource;
|
||||
|
||||
private const byte DLE = 0x90;
|
||||
private int _compressedSize;
|
||||
private bool _processed = false;
|
||||
private bool _inDleMode;
|
||||
private byte _lastByte;
|
||||
private int _repeatCount;
|
||||
|
||||
private bool _endOfCompressedData;
|
||||
|
||||
public RunLength90Stream(Stream stream, int compressedSize)
|
||||
{
|
||||
_stream = stream;
|
||||
_stream = stream ?? throw new ArgumentNullException(nameof(stream));
|
||||
_compressedSize = compressedSize;
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugConstruct(typeof(RunLength90Stream));
|
||||
@@ -53,44 +59,93 @@ namespace SharpCompress.Compressors.RLE90
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotImplementedException();
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
public override long Position
|
||||
{
|
||||
get => _stream.Position;
|
||||
set => throw new NotImplementedException();
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotImplementedException();
|
||||
public override void Flush() => throw new NotSupportedException();
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_processed)
|
||||
if (buffer == null)
|
||||
throw new ArgumentNullException(nameof(buffer));
|
||||
if (offset < 0 || count < 0 || offset + count > buffer.Length)
|
||||
throw new ArgumentOutOfRangeException();
|
||||
|
||||
int bytesWritten = 0;
|
||||
|
||||
while (bytesWritten < count && !_endOfCompressedData)
|
||||
{
|
||||
return 0;
|
||||
// Handle pending repeat bytes first
|
||||
if (_repeatCount > 0)
|
||||
{
|
||||
int toWrite = Math.Min(_repeatCount, count - bytesWritten);
|
||||
for (int i = 0; i < toWrite; i++)
|
||||
{
|
||||
buffer[offset + bytesWritten++] = _lastByte;
|
||||
}
|
||||
_repeatCount -= toWrite;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try to read the next byte from compressed data
|
||||
if (_bytesReadFromSource >= _compressedSize)
|
||||
{
|
||||
_endOfCompressedData = true;
|
||||
break;
|
||||
}
|
||||
|
||||
int next = _stream.ReadByte();
|
||||
if (next == -1)
|
||||
{
|
||||
_endOfCompressedData = true;
|
||||
break;
|
||||
}
|
||||
|
||||
_bytesReadFromSource++;
|
||||
byte c = (byte)next;
|
||||
|
||||
if (_inDleMode)
|
||||
{
|
||||
_inDleMode = false;
|
||||
|
||||
if (c == 0)
|
||||
{
|
||||
buffer[offset + bytesWritten++] = DLE;
|
||||
_lastByte = DLE;
|
||||
}
|
||||
else
|
||||
{
|
||||
_repeatCount = c - 1;
|
||||
// We’ll handle these repeats in next loop iteration.
|
||||
}
|
||||
}
|
||||
else if (c == DLE)
|
||||
{
|
||||
_inDleMode = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
buffer[offset + bytesWritten++] = c;
|
||||
_lastByte = c;
|
||||
}
|
||||
}
|
||||
_processed = true;
|
||||
|
||||
using var binaryReader = new BinaryReader(_stream);
|
||||
byte[] compressedBuffer = binaryReader.ReadBytes(_compressedSize);
|
||||
|
||||
var unpacked = RLE.UnpackRLE(compressedBuffer);
|
||||
unpacked.CopyTo(buffer);
|
||||
|
||||
return unpacked.Count;
|
||||
return bytesWritten;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotImplementedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Rar.Headers;
|
||||
|
||||
namespace SharpCompress.Compressors.Rar;
|
||||
@@ -8,6 +10,14 @@ internal interface IRarUnpack
|
||||
void DoUnpack(FileHeader fileHeader, Stream readStream, Stream writeStream);
|
||||
void DoUnpack();
|
||||
|
||||
Task DoUnpackAsync(
|
||||
FileHeader fileHeader,
|
||||
Stream readStream,
|
||||
Stream writeStream,
|
||||
CancellationToken cancellationToken
|
||||
);
|
||||
Task DoUnpackAsync(CancellationToken cancellationToken);
|
||||
|
||||
// eg u/i pause/resume button
|
||||
bool Suspended { get; set; }
|
||||
|
||||
|
||||
@@ -37,18 +37,8 @@ internal sealed class MultiVolumeReadOnlyStream : Stream, IStreamStack
|
||||
private IEnumerator<RarFilePart> filePartEnumerator;
|
||||
private Stream currentStream;
|
||||
|
||||
private readonly IExtractionListener streamListener;
|
||||
|
||||
private long currentPartTotalReadBytes;
|
||||
private long currentEntryTotalReadBytes;
|
||||
|
||||
internal MultiVolumeReadOnlyStream(
|
||||
IEnumerable<RarFilePart> parts,
|
||||
IExtractionListener streamListener
|
||||
)
|
||||
internal MultiVolumeReadOnlyStream(IEnumerable<RarFilePart> parts)
|
||||
{
|
||||
this.streamListener = streamListener;
|
||||
|
||||
filePartEnumerator = parts.GetEnumerator();
|
||||
filePartEnumerator.MoveNext();
|
||||
InitializeNextFilePart();
|
||||
@@ -81,15 +71,7 @@ internal sealed class MultiVolumeReadOnlyStream : Stream, IStreamStack
|
||||
currentPosition = 0;
|
||||
currentStream = filePartEnumerator.Current.GetCompressedStream();
|
||||
|
||||
currentPartTotalReadBytes = 0;
|
||||
|
||||
CurrentCrc = filePartEnumerator.Current.FileHeader.FileCrc;
|
||||
|
||||
streamListener.FireFilePartExtractionBegin(
|
||||
filePartEnumerator.Current.FilePartName,
|
||||
filePartEnumerator.Current.FileHeader.CompressedSize,
|
||||
filePartEnumerator.Current.FileHeader.UncompressedSize
|
||||
);
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
@@ -141,15 +123,127 @@ internal sealed class MultiVolumeReadOnlyStream : Stream, IStreamStack
|
||||
break;
|
||||
}
|
||||
}
|
||||
currentPartTotalReadBytes += totalRead;
|
||||
currentEntryTotalReadBytes += totalRead;
|
||||
streamListener.FireCompressedBytesRead(
|
||||
currentPartTotalReadBytes,
|
||||
currentEntryTotalReadBytes
|
||||
);
|
||||
return totalRead;
|
||||
}
|
||||
|
||||
public override async System.Threading.Tasks.Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
System.Threading.CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
var totalRead = 0;
|
||||
var currentOffset = offset;
|
||||
var currentCount = count;
|
||||
while (currentCount > 0)
|
||||
{
|
||||
var readSize = currentCount;
|
||||
if (currentCount > maxPosition - currentPosition)
|
||||
{
|
||||
readSize = (int)(maxPosition - currentPosition);
|
||||
}
|
||||
|
||||
var read = await currentStream
|
||||
.ReadAsync(buffer, currentOffset, readSize, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (read < 0)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
|
||||
currentPosition += read;
|
||||
currentOffset += read;
|
||||
currentCount -= read;
|
||||
totalRead += read;
|
||||
if (
|
||||
((maxPosition - currentPosition) == 0)
|
||||
&& filePartEnumerator.Current.FileHeader.IsSplitAfter
|
||||
)
|
||||
{
|
||||
if (filePartEnumerator.Current.FileHeader.R4Salt != null)
|
||||
{
|
||||
throw new InvalidFormatException(
|
||||
"Sharpcompress currently does not support multi-volume decryption."
|
||||
);
|
||||
}
|
||||
var fileName = filePartEnumerator.Current.FileHeader.FileName;
|
||||
if (!filePartEnumerator.MoveNext())
|
||||
{
|
||||
throw new InvalidFormatException(
|
||||
"Multi-part rar file is incomplete. Entry expects a new volume: "
|
||||
+ fileName
|
||||
);
|
||||
}
|
||||
InitializeNextFilePart();
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
return totalRead;
|
||||
}
|
||||
|
||||
#if NETCOREAPP2_1_OR_GREATER || NETSTANDARD2_1_OR_GREATER
|
||||
public override async System.Threading.Tasks.ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var totalRead = 0;
|
||||
var currentOffset = 0;
|
||||
var currentCount = buffer.Length;
|
||||
while (currentCount > 0)
|
||||
{
|
||||
var readSize = currentCount;
|
||||
if (currentCount > maxPosition - currentPosition)
|
||||
{
|
||||
readSize = (int)(maxPosition - currentPosition);
|
||||
}
|
||||
|
||||
var read = await currentStream
|
||||
.ReadAsync(buffer.Slice(currentOffset, readSize), cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (read < 0)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
|
||||
currentPosition += read;
|
||||
currentOffset += read;
|
||||
currentCount -= read;
|
||||
totalRead += read;
|
||||
if (
|
||||
((maxPosition - currentPosition) == 0)
|
||||
&& filePartEnumerator.Current.FileHeader.IsSplitAfter
|
||||
)
|
||||
{
|
||||
if (filePartEnumerator.Current.FileHeader.R4Salt != null)
|
||||
{
|
||||
throw new InvalidFormatException(
|
||||
"Sharpcompress currently does not support multi-volume decryption."
|
||||
);
|
||||
}
|
||||
var fileName = filePartEnumerator.Current.FileHeader.FileName;
|
||||
if (!filePartEnumerator.MoveNext())
|
||||
{
|
||||
throw new InvalidFormatException(
|
||||
"Multi-part rar file is incomplete. Entry expects a new volume: "
|
||||
+ fileName
|
||||
);
|
||||
}
|
||||
InitializeNextFilePart();
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
return totalRead;
|
||||
}
|
||||
#endif
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Rar.Headers;
|
||||
using SharpCompress.IO;
|
||||
@@ -103,7 +105,7 @@ internal class RarBLAKE2spStream : RarStream, IStreamStack
|
||||
|
||||
byte[] _hash = { };
|
||||
|
||||
public RarBLAKE2spStream(
|
||||
private RarBLAKE2spStream(
|
||||
IRarUnpack unpack,
|
||||
FileHeader fileHeader,
|
||||
MultiVolumeReadOnlyStream readStream
|
||||
@@ -121,6 +123,29 @@ internal class RarBLAKE2spStream : RarStream, IStreamStack
|
||||
ResetCrc();
|
||||
}
|
||||
|
||||
public static RarBLAKE2spStream Create(
|
||||
IRarUnpack unpack,
|
||||
FileHeader fileHeader,
|
||||
MultiVolumeReadOnlyStream readStream
|
||||
)
|
||||
{
|
||||
var stream = new RarBLAKE2spStream(unpack, fileHeader, readStream);
|
||||
stream.Initialize();
|
||||
return stream;
|
||||
}
|
||||
|
||||
public static async Task<RarBLAKE2spStream> CreateAsync(
|
||||
IRarUnpack unpack,
|
||||
FileHeader fileHeader,
|
||||
MultiVolumeReadOnlyStream readStream,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var stream = new RarBLAKE2spStream(unpack, fileHeader, readStream);
|
||||
await stream.InitializeAsync(cancellationToken);
|
||||
return stream;
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
@@ -333,4 +358,59 @@ internal class RarBLAKE2spStream : RarStream, IStreamStack
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
public override async System.Threading.Tasks.Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
System.Threading.CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
var result = await base.ReadAsync(buffer, offset, count, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (result != 0)
|
||||
{
|
||||
Update(_blake2sp, new ReadOnlySpan<byte>(buffer, offset, result), result);
|
||||
}
|
||||
else
|
||||
{
|
||||
_hash = Final(_blake2sp);
|
||||
if (!disableCRCCheck && !(GetCrc().SequenceEqual(readStream.CurrentCrc)) && count != 0)
|
||||
{
|
||||
// NOTE: we use the last FileHeader in a multipart volume to check CRC
|
||||
throw new InvalidFormatException("file crc mismatch");
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#if NETCOREAPP2_1_OR_GREATER || NETSTANDARD2_1_OR_GREATER
|
||||
public override async System.Threading.Tasks.ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var result = await base.ReadAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
if (result != 0)
|
||||
{
|
||||
Update(_blake2sp, buffer.Span.Slice(0, result), result);
|
||||
}
|
||||
else
|
||||
{
|
||||
_hash = Final(_blake2sp);
|
||||
if (
|
||||
!disableCRCCheck
|
||||
&& !(GetCrc().SequenceEqual(readStream.CurrentCrc))
|
||||
&& buffer.Length != 0
|
||||
)
|
||||
{
|
||||
// NOTE: we use the last FileHeader in a multipart volume to check CRC
|
||||
throw new InvalidFormatException("file crc mismatch");
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common;
|
||||
using SharpCompress.Common.Rar.Headers;
|
||||
using SharpCompress.IO;
|
||||
@@ -31,7 +33,7 @@ internal class RarCrcStream : RarStream, IStreamStack
|
||||
private uint currentCrc;
|
||||
private readonly bool disableCRC;
|
||||
|
||||
public RarCrcStream(
|
||||
private RarCrcStream(
|
||||
IRarUnpack unpack,
|
||||
FileHeader fileHeader,
|
||||
MultiVolumeReadOnlyStream readStream
|
||||
@@ -46,6 +48,29 @@ internal class RarCrcStream : RarStream, IStreamStack
|
||||
ResetCrc();
|
||||
}
|
||||
|
||||
public static RarCrcStream Create(
|
||||
IRarUnpack unpack,
|
||||
FileHeader fileHeader,
|
||||
MultiVolumeReadOnlyStream readStream
|
||||
)
|
||||
{
|
||||
var stream = new RarCrcStream(unpack, fileHeader, readStream);
|
||||
stream.Initialize();
|
||||
return stream;
|
||||
}
|
||||
|
||||
public static async Task<RarCrcStream> CreateAsync(
|
||||
IRarUnpack unpack,
|
||||
FileHeader fileHeader,
|
||||
MultiVolumeReadOnlyStream readStream,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var stream = new RarCrcStream(unpack, fileHeader, readStream);
|
||||
await stream.InitializeAsync(cancellationToken);
|
||||
return stream;
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
@@ -77,4 +102,56 @@ internal class RarCrcStream : RarStream, IStreamStack
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
public override async System.Threading.Tasks.Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
System.Threading.CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
var result = await base.ReadAsync(buffer, offset, count, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (result != 0)
|
||||
{
|
||||
currentCrc = RarCRC.CheckCrc(currentCrc, buffer, offset, result);
|
||||
}
|
||||
else if (
|
||||
!disableCRC
|
||||
&& GetCrc() != BitConverter.ToUInt32(readStream.CurrentCrc, 0)
|
||||
&& count != 0
|
||||
)
|
||||
{
|
||||
// NOTE: we use the last FileHeader in a multipart volume to check CRC
|
||||
throw new InvalidFormatException("file crc mismatch");
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#if NETCOREAPP2_1_OR_GREATER || NETSTANDARD2_1_OR_GREATER
|
||||
public override async System.Threading.Tasks.ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
var result = await base.ReadAsync(buffer, cancellationToken).ConfigureAwait(false);
|
||||
if (result != 0)
|
||||
{
|
||||
currentCrc = RarCRC.CheckCrc(currentCrc, buffer.Span, 0, result);
|
||||
}
|
||||
else if (
|
||||
!disableCRC
|
||||
&& GetCrc() != BitConverter.ToUInt32(readStream.CurrentCrc, 0)
|
||||
&& buffer.Length != 0
|
||||
)
|
||||
{
|
||||
// NOTE: we use the last FileHeader in a multipart volume to check CRC
|
||||
throw new InvalidFormatException("file crc mismatch");
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Common.Rar.Headers;
|
||||
using SharpCompress.IO;
|
||||
|
||||
@@ -56,13 +58,24 @@ internal class RarStream : Stream, IStreamStack
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugConstruct(typeof(RarStream));
|
||||
#endif
|
||||
}
|
||||
|
||||
public void Initialize()
|
||||
{
|
||||
fetch = true;
|
||||
unpack.DoUnpack(fileHeader, readStream, this);
|
||||
fetch = false;
|
||||
_position = 0;
|
||||
}
|
||||
|
||||
public async Task InitializeAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
fetch = true;
|
||||
await unpack.DoUnpackAsync(fileHeader, readStream, this, cancellationToken);
|
||||
fetch = false;
|
||||
_position = 0;
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (!isDisposed)
|
||||
@@ -131,6 +144,73 @@ internal class RarStream : Stream, IStreamStack
|
||||
return outTotal;
|
||||
}
|
||||
|
||||
public override async System.Threading.Tasks.Task<int> ReadAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
System.Threading.CancellationToken cancellationToken
|
||||
) => await ReadImplAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
private async System.Threading.Tasks.Task<int> ReadImplAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
System.Threading.CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
outTotal = 0;
|
||||
if (tmpCount > 0)
|
||||
{
|
||||
var toCopy = tmpCount < count ? tmpCount : count;
|
||||
Buffer.BlockCopy(tmpBuffer, tmpOffset, buffer, offset, toCopy);
|
||||
tmpOffset += toCopy;
|
||||
tmpCount -= toCopy;
|
||||
offset += toCopy;
|
||||
count -= toCopy;
|
||||
outTotal += toCopy;
|
||||
}
|
||||
if (count > 0 && unpack.DestSize > 0)
|
||||
{
|
||||
outBuffer = buffer;
|
||||
outOffset = offset;
|
||||
outCount = count;
|
||||
fetch = true;
|
||||
await unpack.DoUnpackAsync(cancellationToken).ConfigureAwait(false);
|
||||
fetch = false;
|
||||
}
|
||||
_position += outTotal;
|
||||
if (count > 0 && outTotal == 0 && _position != Length)
|
||||
{
|
||||
// sanity check, eg if we try to decompress a redir entry
|
||||
throw new InvalidOperationException(
|
||||
$"unpacked file size does not match header: expected {Length} found {_position}"
|
||||
);
|
||||
}
|
||||
return outTotal;
|
||||
}
|
||||
|
||||
#if NETCOREAPP2_1_OR_GREATER || NETSTANDARD2_1_OR_GREATER
|
||||
public override async System.Threading.Tasks.ValueTask<int> ReadAsync(
|
||||
Memory<byte> buffer,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
var array = System.Buffers.ArrayPool<byte>.Shared.Rent(buffer.Length);
|
||||
try
|
||||
{
|
||||
var bytesRead = await ReadImplAsync(array, 0, buffer.Length, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
new ReadOnlySpan<byte>(array, 0, bytesRead).CopyTo(buffer.Span);
|
||||
return bytesRead;
|
||||
}
|
||||
finally
|
||||
{
|
||||
System.Buffers.ArrayPool<byte>.Shared.Return(array);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
@@ -165,6 +245,18 @@ internal class RarStream : Stream, IStreamStack
|
||||
}
|
||||
}
|
||||
|
||||
public override System.Threading.Tasks.Task WriteAsync(
|
||||
byte[] buffer,
|
||||
int offset,
|
||||
int count,
|
||||
System.Threading.CancellationToken cancellationToken
|
||||
)
|
||||
{
|
||||
cancellationToken.ThrowIfCancellationRequested();
|
||||
Write(buffer, offset, count);
|
||||
return System.Threading.Tasks.Task.CompletedTask;
|
||||
}
|
||||
|
||||
private void EnsureBufferCapacity(int count)
|
||||
{
|
||||
if (this.tmpBuffer.Length < this.tmpCount + count)
|
||||
|
||||
@@ -27,7 +27,7 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
if (!disposed)
|
||||
{
|
||||
base.Dispose();
|
||||
if (!externalWindow)
|
||||
if (!externalWindow && window is not null)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(window);
|
||||
window = null;
|
||||
@@ -155,6 +155,25 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
DoUnpack();
|
||||
}
|
||||
|
||||
public async System.Threading.Tasks.Task DoUnpackAsync(
|
||||
FileHeader fileHeader,
|
||||
Stream readStream,
|
||||
Stream writeStream,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
destUnpSize = fileHeader.UncompressedSize;
|
||||
this.fileHeader = fileHeader;
|
||||
this.readStream = readStream;
|
||||
this.writeStream = writeStream;
|
||||
if (!fileHeader.IsSolid)
|
||||
{
|
||||
Init(null);
|
||||
}
|
||||
suspended = false;
|
||||
await DoUnpackAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public void DoUnpack()
|
||||
{
|
||||
if (fileHeader.CompressionMethod == 0)
|
||||
@@ -189,6 +208,42 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
}
|
||||
}
|
||||
|
||||
public async System.Threading.Tasks.Task DoUnpackAsync(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (fileHeader.CompressionMethod == 0)
|
||||
{
|
||||
await UnstoreFileAsync(cancellationToken).ConfigureAwait(false);
|
||||
return;
|
||||
}
|
||||
switch (fileHeader.CompressionAlgorithm)
|
||||
{
|
||||
case 15: // rar 1.5 compression
|
||||
await unpack15Async(fileHeader.IsSolid, cancellationToken).ConfigureAwait(false);
|
||||
break;
|
||||
|
||||
case 20: // rar 2.x compression
|
||||
case 26: // files larger than 2GB
|
||||
await unpack20Async(fileHeader.IsSolid, cancellationToken).ConfigureAwait(false);
|
||||
break;
|
||||
|
||||
case 29: // rar 3.x compression
|
||||
case 36: // alternative hash
|
||||
await Unpack29Async(fileHeader.IsSolid, cancellationToken).ConfigureAwait(false);
|
||||
break;
|
||||
|
||||
case 50: // rar 5.x compression
|
||||
await Unpack5Async(fileHeader.IsSolid, cancellationToken).ConfigureAwait(false);
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new InvalidFormatException(
|
||||
"unknown rar compression version " + fileHeader.CompressionAlgorithm
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private void UnstoreFile()
|
||||
{
|
||||
Span<byte> buffer = stackalloc byte[(int)Math.Min(0x10000, destUnpSize)];
|
||||
@@ -205,6 +260,26 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
} while (!suspended && destUnpSize > 0);
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task UnstoreFileAsync(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var buffer = new byte[(int)Math.Min(0x10000, destUnpSize)];
|
||||
do
|
||||
{
|
||||
var code = await readStream
|
||||
.ReadAsync(buffer, 0, buffer.Length, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (code == 0 || code == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
code = code < destUnpSize ? code : (int)destUnpSize;
|
||||
await writeStream.WriteAsync(buffer, 0, code, cancellationToken).ConfigureAwait(false);
|
||||
destUnpSize -= code;
|
||||
} while (!suspended && destUnpSize > 0);
|
||||
}
|
||||
|
||||
private void Unpack29(bool solid)
|
||||
{
|
||||
Span<int> DDecode = stackalloc int[PackDef.DC];
|
||||
@@ -483,6 +558,281 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
UnpWriteBuf();
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task Unpack29Async(
|
||||
bool solid,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
int[] DDecode = new int[PackDef.DC];
|
||||
byte[] DBits = new byte[PackDef.DC];
|
||||
|
||||
int Bits;
|
||||
|
||||
if (DDecode[1] == 0)
|
||||
{
|
||||
int Dist = 0,
|
||||
BitLength = 0,
|
||||
Slot = 0;
|
||||
for (var I = 0; I < DBitLengthCounts.Length; I++, BitLength++)
|
||||
{
|
||||
var count = DBitLengthCounts[I];
|
||||
for (var J = 0; J < count; J++, Slot++, Dist += (1 << BitLength))
|
||||
{
|
||||
DDecode[Slot] = Dist;
|
||||
DBits[Slot] = (byte)BitLength;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FileExtracted = true;
|
||||
|
||||
if (!suspended)
|
||||
{
|
||||
UnpInitData(solid);
|
||||
if (!await unpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return;
|
||||
}
|
||||
if ((!solid || !tablesRead) && !ReadTables())
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (ppmError)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
unpPtr &= PackDef.MAXWINMASK;
|
||||
|
||||
if (inAddr > readBorder)
|
||||
{
|
||||
if (!await unpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (((wrPtr - unpPtr) & PackDef.MAXWINMASK) < 260 && wrPtr != unpPtr)
|
||||
{
|
||||
await UnpWriteBufAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (destUnpSize < 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (suspended)
|
||||
{
|
||||
FileExtracted = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (unpBlockType == BlockTypes.BLOCK_PPM)
|
||||
{
|
||||
var ch = ppm.DecodeChar();
|
||||
if (ch == -1)
|
||||
{
|
||||
ppmError = true;
|
||||
break;
|
||||
}
|
||||
if (ch == PpmEscChar)
|
||||
{
|
||||
var nextCh = ppm.DecodeChar();
|
||||
if (nextCh == 0)
|
||||
{
|
||||
if (!ReadTables())
|
||||
{
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (nextCh == 2 || nextCh == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (nextCh == 3)
|
||||
{
|
||||
if (!ReadVMCode())
|
||||
{
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (nextCh == 4)
|
||||
{
|
||||
uint Distance = 0,
|
||||
Length = 0;
|
||||
var failed = false;
|
||||
for (var I = 0; I < 4 && !failed; I++)
|
||||
{
|
||||
var ch2 = ppm.DecodeChar();
|
||||
if (ch2 == -1)
|
||||
{
|
||||
failed = true;
|
||||
}
|
||||
else if (I == 3)
|
||||
{
|
||||
Length = (uint)ch2;
|
||||
}
|
||||
else
|
||||
{
|
||||
Distance = (Distance << 8) + (uint)ch2;
|
||||
}
|
||||
}
|
||||
if (failed)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
CopyString(Length + 32, Distance + 2);
|
||||
continue;
|
||||
}
|
||||
if (nextCh == 5)
|
||||
{
|
||||
var length = ppm.DecodeChar();
|
||||
if (length == -1)
|
||||
{
|
||||
break;
|
||||
}
|
||||
CopyString((uint)(length + 4), 1);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
window[unpPtr++] = (byte)ch;
|
||||
continue;
|
||||
}
|
||||
|
||||
var Number = this.decodeNumber(LD);
|
||||
if (Number < 256)
|
||||
{
|
||||
window[unpPtr++] = (byte)Number;
|
||||
continue;
|
||||
}
|
||||
if (Number >= 271)
|
||||
{
|
||||
var Length = LDecode[Number -= 271] + 3;
|
||||
if ((Bits = LBits[Number]) > 0)
|
||||
{
|
||||
Length += GetBits() >> (16 - Bits);
|
||||
AddBits(Bits);
|
||||
}
|
||||
|
||||
var DistNumber = this.decodeNumber(DD);
|
||||
var Distance = DDecode[DistNumber] + 1;
|
||||
if ((Bits = DBits[DistNumber]) > 0)
|
||||
{
|
||||
if (DistNumber > 9)
|
||||
{
|
||||
if (Bits > 4)
|
||||
{
|
||||
Distance += (GetBits() >> (20 - Bits)) << 4;
|
||||
AddBits(Bits - 4);
|
||||
}
|
||||
if (lowDistRepCount > 0)
|
||||
{
|
||||
lowDistRepCount--;
|
||||
Distance += prevLowDist;
|
||||
}
|
||||
else
|
||||
{
|
||||
var LowDist = this.decodeNumber(LDD);
|
||||
if (LowDist == 16)
|
||||
{
|
||||
lowDistRepCount = PackDef.LOW_DIST_REP_COUNT - 1;
|
||||
Distance += prevLowDist;
|
||||
}
|
||||
else
|
||||
{
|
||||
Distance += LowDist;
|
||||
prevLowDist = (int)LowDist;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Distance += GetBits() >> (16 - Bits);
|
||||
AddBits(Bits);
|
||||
}
|
||||
}
|
||||
|
||||
if (Distance >= 0x2000)
|
||||
{
|
||||
Length++;
|
||||
if (Distance >= 0x40000)
|
||||
{
|
||||
Length++;
|
||||
}
|
||||
}
|
||||
|
||||
InsertOldDist(Distance);
|
||||
lastLength = Length;
|
||||
CopyString(Length, Distance);
|
||||
continue;
|
||||
}
|
||||
if (Number == 256)
|
||||
{
|
||||
if (!ReadEndOfBlock())
|
||||
{
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (Number == 257)
|
||||
{
|
||||
if (!ReadVMCode())
|
||||
{
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (Number == 258)
|
||||
{
|
||||
if (lastLength != 0)
|
||||
{
|
||||
CopyString(lastLength, oldDist[0]);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
if (Number < 263)
|
||||
{
|
||||
var DistNum = Number - 259;
|
||||
var Distance = (uint)oldDist[DistNum];
|
||||
for (var I = DistNum; I > 0; I--)
|
||||
{
|
||||
oldDist[I] = oldDist[I - 1];
|
||||
}
|
||||
oldDist[0] = (int)Distance;
|
||||
|
||||
var LengthNumber = this.decodeNumber(RD);
|
||||
var Length = LDecode[LengthNumber] + 2;
|
||||
if ((Bits = LBits[LengthNumber]) > 0)
|
||||
{
|
||||
Length += GetBits() >> (16 - Bits);
|
||||
AddBits(Bits);
|
||||
}
|
||||
lastLength = Length;
|
||||
CopyString((uint)Length, Distance);
|
||||
continue;
|
||||
}
|
||||
if (Number < 272)
|
||||
{
|
||||
var Distance = SDDecode[Number -= 263] + 1;
|
||||
if ((Bits = SDBits[Number]) > 0)
|
||||
{
|
||||
Distance += GetBits() >> (16 - Bits);
|
||||
AddBits(Bits);
|
||||
}
|
||||
InsertOldDist((uint)Distance);
|
||||
lastLength = 2;
|
||||
CopyString(2, (uint)Distance);
|
||||
}
|
||||
}
|
||||
await UnpWriteBufAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private void UnpWriteBuf()
|
||||
{
|
||||
var WrittenBorder = wrPtr;
|
||||
@@ -1339,6 +1689,256 @@ internal sealed partial class Unpack : BitInput, IRarUnpack
|
||||
}
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task UnpWriteBufAsync(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var WrittenBorder = wrPtr;
|
||||
var WriteSize = (unpPtr - WrittenBorder) & PackDef.MAXWINMASK;
|
||||
for (var I = 0; I < prgStack.Count; I++)
|
||||
{
|
||||
var flt = prgStack[I];
|
||||
if (flt is null)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if (flt.NextWindow)
|
||||
{
|
||||
flt.NextWindow = false;
|
||||
continue;
|
||||
}
|
||||
var BlockStart = flt.BlockStart;
|
||||
var BlockLength = flt.BlockLength;
|
||||
if (((BlockStart - WrittenBorder) & PackDef.MAXWINMASK) < WriteSize)
|
||||
{
|
||||
if (WrittenBorder != BlockStart)
|
||||
{
|
||||
await UnpWriteAreaAsync(WrittenBorder, BlockStart, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
WrittenBorder = BlockStart;
|
||||
WriteSize = (unpPtr - WrittenBorder) & PackDef.MAXWINMASK;
|
||||
}
|
||||
if (BlockLength <= WriteSize)
|
||||
{
|
||||
var BlockEnd = (BlockStart + BlockLength) & PackDef.MAXWINMASK;
|
||||
if (BlockStart < BlockEnd || BlockEnd == 0)
|
||||
{
|
||||
rarVM.setMemory(0, window, BlockStart, BlockLength);
|
||||
}
|
||||
else
|
||||
{
|
||||
var FirstPartLength = PackDef.MAXWINSIZE - BlockStart;
|
||||
rarVM.setMemory(0, window, BlockStart, FirstPartLength);
|
||||
rarVM.setMemory(FirstPartLength, window, 0, BlockEnd);
|
||||
}
|
||||
|
||||
var ParentPrg = filters[flt.ParentFilter].Program;
|
||||
var Prg = flt.Program;
|
||||
|
||||
if (ParentPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
Prg.GlobalData.Clear();
|
||||
for (
|
||||
var i = 0;
|
||||
i < ParentPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE;
|
||||
i++
|
||||
)
|
||||
{
|
||||
Prg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] = ParentPrg.GlobalData[
|
||||
RarVM.VM_FIXEDGLOBALSIZE + i
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
ExecuteCode(Prg);
|
||||
|
||||
if (Prg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
if (ParentPrg.GlobalData.Count < Prg.GlobalData.Count)
|
||||
{
|
||||
ParentPrg.GlobalData.SetSize(Prg.GlobalData.Count);
|
||||
}
|
||||
|
||||
for (var i = 0; i < Prg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE; i++)
|
||||
{
|
||||
ParentPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] = Prg.GlobalData[
|
||||
RarVM.VM_FIXEDGLOBALSIZE + i
|
||||
];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ParentPrg.GlobalData.Clear();
|
||||
}
|
||||
|
||||
var FilteredDataOffset = Prg.FilteredDataOffset;
|
||||
var FilteredDataSize = Prg.FilteredDataSize;
|
||||
var FilteredData = ArrayPool<byte>.Shared.Rent(FilteredDataSize);
|
||||
try
|
||||
{
|
||||
Array.Copy(
|
||||
rarVM.Mem,
|
||||
FilteredDataOffset,
|
||||
FilteredData,
|
||||
0,
|
||||
FilteredDataSize
|
||||
);
|
||||
|
||||
prgStack[I] = null;
|
||||
while (I + 1 < prgStack.Count)
|
||||
{
|
||||
var NextFilter = prgStack[I + 1];
|
||||
if (
|
||||
NextFilter is null
|
||||
|| NextFilter.BlockStart != BlockStart
|
||||
|| NextFilter.BlockLength != FilteredDataSize
|
||||
|| NextFilter.NextWindow
|
||||
)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
rarVM.setMemory(0, FilteredData, 0, FilteredDataSize);
|
||||
|
||||
var pPrg = filters[NextFilter.ParentFilter].Program;
|
||||
var NextPrg = NextFilter.Program;
|
||||
|
||||
if (pPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
NextPrg.GlobalData.SetSize(pPrg.GlobalData.Count);
|
||||
|
||||
for (
|
||||
var i = 0;
|
||||
i < pPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE;
|
||||
i++
|
||||
)
|
||||
{
|
||||
NextPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] =
|
||||
pPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i];
|
||||
}
|
||||
}
|
||||
|
||||
ExecuteCode(NextPrg);
|
||||
|
||||
if (NextPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE)
|
||||
{
|
||||
if (pPrg.GlobalData.Count < NextPrg.GlobalData.Count)
|
||||
{
|
||||
pPrg.GlobalData.SetSize(NextPrg.GlobalData.Count);
|
||||
}
|
||||
|
||||
for (
|
||||
var i = 0;
|
||||
i < NextPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE;
|
||||
i++
|
||||
)
|
||||
{
|
||||
pPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] =
|
||||
NextPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
pPrg.GlobalData.Clear();
|
||||
}
|
||||
|
||||
FilteredDataOffset = NextPrg.FilteredDataOffset;
|
||||
FilteredDataSize = NextPrg.FilteredDataSize;
|
||||
if (FilteredData.Length < FilteredDataSize)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(FilteredData);
|
||||
FilteredData = ArrayPool<byte>.Shared.Rent(FilteredDataSize);
|
||||
}
|
||||
for (var i = 0; i < FilteredDataSize; i++)
|
||||
{
|
||||
FilteredData[i] = NextPrg.GlobalData[FilteredDataOffset + i];
|
||||
}
|
||||
|
||||
I++;
|
||||
prgStack[I] = null;
|
||||
}
|
||||
|
||||
await writeStream
|
||||
.WriteAsync(FilteredData, 0, FilteredDataSize, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
writtenFileSize += FilteredDataSize;
|
||||
destUnpSize -= FilteredDataSize;
|
||||
WrittenBorder = BlockEnd;
|
||||
WriteSize = (unpPtr - WrittenBorder) & PackDef.MAXWINMASK;
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(FilteredData);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (var J = I; J < prgStack.Count; J++)
|
||||
{
|
||||
var filt = prgStack[J];
|
||||
if (filt != null && filt.NextWindow)
|
||||
{
|
||||
filt.NextWindow = false;
|
||||
}
|
||||
}
|
||||
wrPtr = WrittenBorder;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await UnpWriteAreaAsync(WrittenBorder, unpPtr, cancellationToken).ConfigureAwait(false);
|
||||
wrPtr = unpPtr;
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task UnpWriteAreaAsync(
|
||||
int startPtr,
|
||||
int endPtr,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (endPtr < startPtr)
|
||||
{
|
||||
await UnpWriteDataAsync(
|
||||
window,
|
||||
startPtr,
|
||||
-startPtr & PackDef.MAXWINMASK,
|
||||
cancellationToken
|
||||
)
|
||||
.ConfigureAwait(false);
|
||||
await UnpWriteDataAsync(window, 0, endPtr, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
else
|
||||
{
|
||||
await UnpWriteDataAsync(window, startPtr, endPtr - startPtr, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task UnpWriteDataAsync(
|
||||
byte[] data,
|
||||
int offset,
|
||||
int size,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (destUnpSize < 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
var writeSize = size;
|
||||
if (writeSize > destUnpSize)
|
||||
{
|
||||
writeSize = (int)destUnpSize;
|
||||
}
|
||||
await writeStream
|
||||
.WriteAsync(data, offset, writeSize, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
writtenFileSize += size;
|
||||
destUnpSize -= size;
|
||||
}
|
||||
|
||||
private void CleanUp()
|
||||
{
|
||||
if (ppm != null)
|
||||
|
||||
@@ -316,6 +316,110 @@ internal partial class Unpack
|
||||
oldUnpWriteBuf();
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task unpack15Async(
|
||||
bool solid,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (suspended)
|
||||
{
|
||||
unpPtr = wrPtr;
|
||||
}
|
||||
else
|
||||
{
|
||||
UnpInitData(solid);
|
||||
oldUnpInitData(solid);
|
||||
await unpReadBufAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (!solid)
|
||||
{
|
||||
initHuff();
|
||||
unpPtr = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
unpPtr = wrPtr;
|
||||
}
|
||||
--destUnpSize;
|
||||
}
|
||||
if (destUnpSize >= 0)
|
||||
{
|
||||
getFlagsBuf();
|
||||
FlagsCnt = 8;
|
||||
}
|
||||
|
||||
while (destUnpSize >= 0)
|
||||
{
|
||||
unpPtr &= PackDef.MAXWINMASK;
|
||||
|
||||
if (
|
||||
inAddr > readTop - 30
|
||||
&& !await unpReadBufAsync(cancellationToken).ConfigureAwait(false)
|
||||
)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (((wrPtr - unpPtr) & PackDef.MAXWINMASK) < 270 && wrPtr != unpPtr)
|
||||
{
|
||||
await oldUnpWriteBufAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (suspended)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (StMode != 0)
|
||||
{
|
||||
huffDecode();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (--FlagsCnt < 0)
|
||||
{
|
||||
getFlagsBuf();
|
||||
FlagsCnt = 7;
|
||||
}
|
||||
|
||||
if ((FlagBuf & 0x80) != 0)
|
||||
{
|
||||
FlagBuf <<= 1;
|
||||
if (Nlzb > Nhfb)
|
||||
{
|
||||
longLZ();
|
||||
}
|
||||
else
|
||||
{
|
||||
huffDecode();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
FlagBuf <<= 1;
|
||||
if (--FlagsCnt < 0)
|
||||
{
|
||||
getFlagsBuf();
|
||||
FlagsCnt = 7;
|
||||
}
|
||||
if ((FlagBuf & 0x80) != 0)
|
||||
{
|
||||
FlagBuf <<= 1;
|
||||
if (Nlzb > Nhfb)
|
||||
{
|
||||
huffDecode();
|
||||
}
|
||||
else
|
||||
{
|
||||
longLZ();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
FlagBuf <<= 1;
|
||||
shortLZ();
|
||||
}
|
||||
}
|
||||
}
|
||||
await oldUnpWriteBufAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private bool unpReadBuf()
|
||||
{
|
||||
var dataSize = readTop - inAddr;
|
||||
@@ -351,6 +455,40 @@ internal partial class Unpack
|
||||
return (readCode != -1);
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task<bool> unpReadBufAsync(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var dataSize = readTop - inAddr;
|
||||
if (dataSize < 0)
|
||||
{
|
||||
return (false);
|
||||
}
|
||||
if (inAddr > MAX_SIZE / 2)
|
||||
{
|
||||
if (dataSize > 0)
|
||||
{
|
||||
Array.Copy(InBuf, inAddr, InBuf, 0, dataSize);
|
||||
}
|
||||
inAddr = 0;
|
||||
readTop = dataSize;
|
||||
}
|
||||
else
|
||||
{
|
||||
dataSize = readTop;
|
||||
}
|
||||
|
||||
var readCode = await readStream
|
||||
.ReadAsync(InBuf, dataSize, (MAX_SIZE - dataSize) & ~0xf, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (readCode > 0)
|
||||
{
|
||||
readTop += readCode;
|
||||
}
|
||||
readBorder = readTop - 30;
|
||||
return (readCode != -1);
|
||||
}
|
||||
|
||||
private int getShortLen1(int pos) => pos == 1 ? Buf60 + 3 : ShortLen1[pos];
|
||||
|
||||
private int getShortLen2(int pos) => pos == 3 ? Buf60 + 3 : ShortLen2[pos];
|
||||
@@ -814,4 +952,26 @@ internal partial class Unpack
|
||||
}
|
||||
wrPtr = unpPtr;
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task oldUnpWriteBufAsync(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (unpPtr < wrPtr)
|
||||
{
|
||||
await writeStream
|
||||
.WriteAsync(window, wrPtr, -wrPtr & PackDef.MAXWINMASK, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await writeStream
|
||||
.WriteAsync(window, 0, unpPtr, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
else
|
||||
{
|
||||
await writeStream
|
||||
.WriteAsync(window, wrPtr, unpPtr - wrPtr, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
wrPtr = unpPtr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -368,6 +368,163 @@ internal partial class Unpack
|
||||
oldUnpWriteBuf();
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task unpack20Async(
|
||||
bool solid,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
int Bits;
|
||||
|
||||
if (suspended)
|
||||
{
|
||||
unpPtr = wrPtr;
|
||||
}
|
||||
else
|
||||
{
|
||||
UnpInitData(solid);
|
||||
if (!await unpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (!solid)
|
||||
{
|
||||
if (!await ReadTables20Async(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
--destUnpSize;
|
||||
}
|
||||
|
||||
while (destUnpSize >= 0)
|
||||
{
|
||||
unpPtr &= PackDef.MAXWINMASK;
|
||||
|
||||
if (inAddr > readTop - 30)
|
||||
{
|
||||
if (!await unpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (((wrPtr - unpPtr) & PackDef.MAXWINMASK) < 270 && wrPtr != unpPtr)
|
||||
{
|
||||
await oldUnpWriteBufAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (suspended)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (UnpAudioBlock != 0)
|
||||
{
|
||||
var AudioNumber = this.decodeNumber(MD[UnpCurChannel]);
|
||||
|
||||
if (AudioNumber == 256)
|
||||
{
|
||||
if (!await ReadTables20Async(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
window[unpPtr++] = DecodeAudio(AudioNumber);
|
||||
if (++UnpCurChannel == UnpChannels)
|
||||
{
|
||||
UnpCurChannel = 0;
|
||||
}
|
||||
--destUnpSize;
|
||||
continue;
|
||||
}
|
||||
|
||||
var Number = this.decodeNumber(LD);
|
||||
if (Number < 256)
|
||||
{
|
||||
window[unpPtr++] = (byte)Number;
|
||||
--destUnpSize;
|
||||
continue;
|
||||
}
|
||||
if (Number > 269)
|
||||
{
|
||||
var Length = LDecode[Number -= 270] + 3;
|
||||
if ((Bits = LBits[Number]) > 0)
|
||||
{
|
||||
Length += Utility.URShift(GetBits(), (16 - Bits));
|
||||
AddBits(Bits);
|
||||
}
|
||||
|
||||
var DistNumber = this.decodeNumber(DD);
|
||||
var Distance = DDecode[DistNumber] + 1;
|
||||
if ((Bits = DBits[DistNumber]) > 0)
|
||||
{
|
||||
Distance += Utility.URShift(GetBits(), (16 - Bits));
|
||||
AddBits(Bits);
|
||||
}
|
||||
|
||||
if (Distance >= 0x2000)
|
||||
{
|
||||
Length++;
|
||||
if (Distance >= 0x40000L)
|
||||
{
|
||||
Length++;
|
||||
}
|
||||
}
|
||||
|
||||
CopyString20(Length, Distance);
|
||||
continue;
|
||||
}
|
||||
if (Number == 269)
|
||||
{
|
||||
if (!await ReadTables20Async(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (Number == 256)
|
||||
{
|
||||
CopyString20(lastLength, lastDist);
|
||||
continue;
|
||||
}
|
||||
if (Number < 261)
|
||||
{
|
||||
var Distance = oldDist[(oldDistPtr - (Number - 256)) & 3];
|
||||
var LengthNumber = this.decodeNumber(RD);
|
||||
var Length = LDecode[LengthNumber] + 2;
|
||||
if ((Bits = LBits[LengthNumber]) > 0)
|
||||
{
|
||||
Length += Utility.URShift(GetBits(), (16 - Bits));
|
||||
AddBits(Bits);
|
||||
}
|
||||
if (Distance >= 0x101)
|
||||
{
|
||||
Length++;
|
||||
if (Distance >= 0x2000)
|
||||
{
|
||||
Length++;
|
||||
if (Distance >= 0x40000)
|
||||
{
|
||||
Length++;
|
||||
}
|
||||
}
|
||||
}
|
||||
CopyString20(Length, Distance);
|
||||
continue;
|
||||
}
|
||||
if (Number < 270)
|
||||
{
|
||||
var Distance = SDDecode[Number -= 261] + 1;
|
||||
if ((Bits = SDBits[Number]) > 0)
|
||||
{
|
||||
Distance += Utility.URShift(GetBits(), (16 - Bits));
|
||||
AddBits(Bits);
|
||||
}
|
||||
CopyString20(2, Distance);
|
||||
}
|
||||
}
|
||||
ReadLastTables();
|
||||
await oldUnpWriteBufAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private void CopyString20(int Length, int Distance)
|
||||
{
|
||||
lastDist = oldDist[oldDistPtr++ & 3] = Distance;
|
||||
@@ -534,6 +691,120 @@ internal partial class Unpack
|
||||
return (true);
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task<bool> ReadTables20Async(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
byte[] BitLength = new byte[PackDef.BC20];
|
||||
byte[] Table = new byte[PackDef.MC20 * 4];
|
||||
int TableSize,
|
||||
N,
|
||||
I;
|
||||
if (inAddr > readTop - 25)
|
||||
{
|
||||
if (!await unpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return (false);
|
||||
}
|
||||
}
|
||||
var BitField = GetBits();
|
||||
UnpAudioBlock = (BitField & 0x8000);
|
||||
|
||||
if (0 == (BitField & 0x4000))
|
||||
{
|
||||
new Span<byte>(UnpOldTable20).Clear();
|
||||
}
|
||||
AddBits(2);
|
||||
|
||||
if (UnpAudioBlock != 0)
|
||||
{
|
||||
UnpChannels = ((Utility.URShift(BitField, 12)) & 3) + 1;
|
||||
if (UnpCurChannel >= UnpChannels)
|
||||
{
|
||||
UnpCurChannel = 0;
|
||||
}
|
||||
AddBits(2);
|
||||
TableSize = PackDef.MC20 * UnpChannels;
|
||||
}
|
||||
else
|
||||
{
|
||||
TableSize = PackDef.NC20 + PackDef.DC20 + PackDef.RC20;
|
||||
}
|
||||
for (I = 0; I < PackDef.BC20; I++)
|
||||
{
|
||||
BitLength[I] = (byte)(Utility.URShift(GetBits(), 12));
|
||||
AddBits(4);
|
||||
}
|
||||
UnpackUtility.makeDecodeTables(BitLength, 0, BD, PackDef.BC20);
|
||||
I = 0;
|
||||
while (I < TableSize)
|
||||
{
|
||||
if (inAddr > readTop - 5)
|
||||
{
|
||||
if (!await unpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return (false);
|
||||
}
|
||||
}
|
||||
var Number = this.decodeNumber(BD);
|
||||
if (Number < 16)
|
||||
{
|
||||
Table[I] = (byte)((Number + UnpOldTable20[I]) & 0xf);
|
||||
I++;
|
||||
}
|
||||
else if (Number == 16)
|
||||
{
|
||||
N = (Utility.URShift(GetBits(), 14)) + 3;
|
||||
AddBits(2);
|
||||
while (N-- > 0 && I < TableSize)
|
||||
{
|
||||
Table[I] = Table[I - 1];
|
||||
I++;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (Number == 17)
|
||||
{
|
||||
N = (Utility.URShift(GetBits(), 13)) + 3;
|
||||
AddBits(3);
|
||||
}
|
||||
else
|
||||
{
|
||||
N = (Utility.URShift(GetBits(), 9)) + 11;
|
||||
AddBits(7);
|
||||
}
|
||||
while (N-- > 0 && I < TableSize)
|
||||
{
|
||||
Table[I++] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (inAddr > readTop)
|
||||
{
|
||||
return (true);
|
||||
}
|
||||
if (UnpAudioBlock != 0)
|
||||
{
|
||||
for (I = 0; I < UnpChannels; I++)
|
||||
{
|
||||
UnpackUtility.makeDecodeTables(Table, I * PackDef.MC20, MD[I], PackDef.MC20);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
UnpackUtility.makeDecodeTables(Table, 0, LD, PackDef.NC20);
|
||||
UnpackUtility.makeDecodeTables(Table, PackDef.NC20, DD, PackDef.DC20);
|
||||
UnpackUtility.makeDecodeTables(Table, PackDef.NC20 + PackDef.DC20, RD, PackDef.RC20);
|
||||
}
|
||||
|
||||
for (var i = 0; i < UnpOldTable20.Length; i++)
|
||||
{
|
||||
UnpOldTable20[i] = Table[i];
|
||||
}
|
||||
return (true);
|
||||
}
|
||||
|
||||
private void unpInitData20(bool Solid)
|
||||
{
|
||||
if (!Solid)
|
||||
|
||||
@@ -479,6 +479,354 @@ internal partial class Unpack
|
||||
return ReadCode != -1;
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task<bool> UnpReadBufAsync(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var DataSize = ReadTop - Inp.InAddr; // Data left to process.
|
||||
if (DataSize < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
BlockHeader.BlockSize -= Inp.InAddr - BlockHeader.BlockStart;
|
||||
if (Inp.InAddr > MAX_SIZE / 2)
|
||||
{
|
||||
if (DataSize > 0)
|
||||
{
|
||||
Array.Copy(InBuf, inAddr, InBuf, 0, DataSize);
|
||||
}
|
||||
|
||||
Inp.InAddr = 0;
|
||||
ReadTop = DataSize;
|
||||
}
|
||||
else
|
||||
{
|
||||
DataSize = ReadTop;
|
||||
}
|
||||
|
||||
var ReadCode = 0;
|
||||
if (MAX_SIZE != DataSize)
|
||||
{
|
||||
ReadCode = await readStream
|
||||
.ReadAsync(InBuf, DataSize, MAX_SIZE - DataSize, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
if (ReadCode > 0) // Can be also -1.
|
||||
{
|
||||
ReadTop += ReadCode;
|
||||
}
|
||||
|
||||
ReadBorder = ReadTop - 30;
|
||||
BlockHeader.BlockStart = Inp.InAddr;
|
||||
if (BlockHeader.BlockSize != -1) // '-1' means not defined yet.
|
||||
{
|
||||
ReadBorder = Math.Min(ReadBorder, BlockHeader.BlockStart + BlockHeader.BlockSize - 1);
|
||||
}
|
||||
return ReadCode != -1;
|
||||
}
|
||||
|
||||
public async System.Threading.Tasks.Task Unpack5Async(
|
||||
bool Solid,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
FileExtracted = true;
|
||||
|
||||
if (!Suspended)
|
||||
{
|
||||
UnpInitData(Solid);
|
||||
if (!await UnpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Check TablesRead5 to be sure that we read tables at least once
|
||||
// regardless of current block header TablePresent flag.
|
||||
// So we can safefly use these tables below.
|
||||
if (
|
||||
!await ReadBlockHeaderAsync(cancellationToken).ConfigureAwait(false)
|
||||
|| !ReadTables()
|
||||
|| !TablesRead5
|
||||
)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
UnpPtr &= MaxWinMask;
|
||||
|
||||
if (Inp.InAddr >= ReadBorder)
|
||||
{
|
||||
var FileDone = false;
|
||||
|
||||
// We use 'while', because for empty block containing only Huffman table,
|
||||
// we'll be on the block border once again just after reading the table.
|
||||
while (
|
||||
Inp.InAddr > BlockHeader.BlockStart + BlockHeader.BlockSize - 1
|
||||
|| Inp.InAddr == BlockHeader.BlockStart + BlockHeader.BlockSize - 1
|
||||
&& Inp.InBit >= BlockHeader.BlockBitSize
|
||||
)
|
||||
{
|
||||
if (BlockHeader.LastBlockInFile)
|
||||
{
|
||||
FileDone = true;
|
||||
break;
|
||||
}
|
||||
if (
|
||||
!await ReadBlockHeaderAsync(cancellationToken).ConfigureAwait(false)
|
||||
|| !ReadTables()
|
||||
)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (FileDone || !await UnpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
((WriteBorder - UnpPtr) & MaxWinMask) < PackDef.MAX_LZ_MATCH + 3
|
||||
&& WriteBorder != UnpPtr
|
||||
)
|
||||
{
|
||||
UnpWriteBuf();
|
||||
if (WrittenFileSize > DestUnpSize)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (Suspended)
|
||||
{
|
||||
FileExtracted = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
//uint MainSlot=DecodeNumber(Inp,LD);
|
||||
var MainSlot = this.DecodeNumber(LD);
|
||||
if (MainSlot < 256)
|
||||
{
|
||||
// if (Fragmented)
|
||||
// FragWindow[UnpPtr++]=(byte)MainSlot;
|
||||
// else
|
||||
Window[UnpPtr++] = (byte)MainSlot;
|
||||
continue;
|
||||
}
|
||||
if (MainSlot >= 262)
|
||||
{
|
||||
var Length = SlotToLength(MainSlot - 262);
|
||||
|
||||
//uint DBits,Distance=1,DistSlot=DecodeNumber(Inp,&BlockTables.DD);
|
||||
int DBits;
|
||||
uint Distance = 1,
|
||||
DistSlot = this.DecodeNumber(DD);
|
||||
if (DistSlot < 4)
|
||||
{
|
||||
DBits = 0;
|
||||
Distance += DistSlot;
|
||||
}
|
||||
else
|
||||
{
|
||||
//DBits=DistSlot/2 - 1;
|
||||
DBits = (int)((DistSlot / 2) - 1);
|
||||
Distance += (2 | (DistSlot & 1)) << DBits;
|
||||
}
|
||||
|
||||
if (DBits > 0)
|
||||
{
|
||||
if (DBits >= 4)
|
||||
{
|
||||
if (DBits > 4)
|
||||
{
|
||||
Distance += ((Inp.getbits() >> (36 - DBits)) << 4);
|
||||
Inp.AddBits(DBits - 4);
|
||||
}
|
||||
//uint LowDist=DecodeNumber(Inp,&BlockTables.LDD);
|
||||
var LowDist = this.DecodeNumber(LDD);
|
||||
Distance += LowDist;
|
||||
}
|
||||
else
|
||||
{
|
||||
Distance += Inp.getbits() >> (32 - DBits);
|
||||
Inp.AddBits(DBits);
|
||||
}
|
||||
}
|
||||
|
||||
if (Distance > 0x100)
|
||||
{
|
||||
Length++;
|
||||
if (Distance > 0x2000)
|
||||
{
|
||||
Length++;
|
||||
if (Distance > 0x40000)
|
||||
{
|
||||
Length++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
InsertOldDist(Distance);
|
||||
LastLength = Length;
|
||||
// if (Fragmented)
|
||||
// FragWindow.CopyString(Length,Distance,UnpPtr,MaxWinMask);
|
||||
// else
|
||||
CopyString(Length, Distance);
|
||||
continue;
|
||||
}
|
||||
if (MainSlot == 256)
|
||||
{
|
||||
var Filter = new UnpackFilter();
|
||||
if (
|
||||
!await ReadFilterAsync(Filter, cancellationToken).ConfigureAwait(false)
|
||||
|| !AddFilter(Filter)
|
||||
)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
if (MainSlot == 257)
|
||||
{
|
||||
if (LastLength != 0)
|
||||
// if (Fragmented)
|
||||
// FragWindow.CopyString(LastLength,OldDist[0],UnpPtr,MaxWinMask);
|
||||
// else
|
||||
//CopyString(LastLength,OldDist[0]);
|
||||
{
|
||||
CopyString(LastLength, OldDistN(0));
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
if (MainSlot < 262)
|
||||
{
|
||||
//uint DistNum=MainSlot-258;
|
||||
var DistNum = (int)(MainSlot - 258);
|
||||
//uint Distance=OldDist[DistNum];
|
||||
var Distance = OldDistN(DistNum);
|
||||
//for (uint I=DistNum;I>0;I--)
|
||||
for (var I = DistNum; I > 0; I--)
|
||||
//OldDistN[I]=OldDistN(I-1);
|
||||
{
|
||||
SetOldDistN(I, OldDistN(I - 1));
|
||||
}
|
||||
|
||||
//OldDistN[0]=Distance;
|
||||
SetOldDistN(0, Distance);
|
||||
|
||||
var LengthSlot = this.DecodeNumber(RD);
|
||||
var Length = SlotToLength(LengthSlot);
|
||||
LastLength = Length;
|
||||
// if (Fragmented)
|
||||
// FragWindow.CopyString(Length,Distance,UnpPtr,MaxWinMask);
|
||||
// else
|
||||
CopyString(Length, Distance);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
UnpWriteBuf();
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task<bool> ReadBlockHeaderAsync(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
Header.HeaderSize = 0;
|
||||
|
||||
if (!Inp.ExternalBuffer && Inp.InAddr > ReadTop - 7)
|
||||
{
|
||||
if (!await UnpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
//Inp.faddbits((8-Inp.InBit)&7);
|
||||
Inp.faddbits((uint)((8 - Inp.InBit) & 7));
|
||||
|
||||
var BlockFlags = (byte)(Inp.fgetbits() >> 8);
|
||||
Inp.faddbits(8);
|
||||
//uint ByteCount=((BlockFlags>>3)&3)+1; // Block size byte count.
|
||||
var ByteCount = (uint)(((BlockFlags >> 3) & 3) + 1); // Block size byte count.
|
||||
|
||||
if (ByteCount == 4)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
//Header.HeaderSize=2+ByteCount;
|
||||
Header.HeaderSize = (int)(2 + ByteCount);
|
||||
|
||||
Header.BlockBitSize = (BlockFlags & 7) + 1;
|
||||
|
||||
var SavedCheckSum = (byte)(Inp.fgetbits() >> 8);
|
||||
Inp.faddbits(8);
|
||||
|
||||
var BlockSize = 0;
|
||||
//for (uint I=0;I<ByteCount;I++)
|
||||
for (var I = 0; I < ByteCount; I++)
|
||||
{
|
||||
//BlockSize+=(Inp.fgetbits()>>8)<<(I*8);
|
||||
BlockSize += (int)(Inp.fgetbits() >> 8) << (I * 8);
|
||||
Inp.AddBits(8);
|
||||
}
|
||||
|
||||
Header.BlockSize = BlockSize;
|
||||
var CheckSum = (byte)(0x5a ^ BlockFlags ^ BlockSize ^ (BlockSize >> 8) ^ (BlockSize >> 16));
|
||||
if (CheckSum != SavedCheckSum)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
Header.BlockStart = Inp.InAddr;
|
||||
ReadBorder = Math.Min(ReadBorder, Header.BlockStart + Header.BlockSize - 1);
|
||||
|
||||
Header.LastBlockInFile = (BlockFlags & 0x40) != 0;
|
||||
Header.TablePresent = (BlockFlags & 0x80) != 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task<bool> ReadFilterAsync(
|
||||
UnpackFilter Filter,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (!Inp.ExternalBuffer && Inp.InAddr > ReadTop - 16)
|
||||
{
|
||||
if (!await UnpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Filter.uBlockStart = ReadFilterData();
|
||||
Filter.uBlockLength = ReadFilterData();
|
||||
if (Filter.BlockLength > MAX_FILTER_BLOCK_SIZE)
|
||||
{
|
||||
Filter.BlockLength = 0;
|
||||
}
|
||||
|
||||
//Filter.Type=Inp.fgetbits()>>13;
|
||||
Filter.Type = (byte)(Inp.fgetbits() >> 13);
|
||||
Inp.faddbits(3);
|
||||
|
||||
if (Filter.Type == (byte)FilterType.FILTER_DELTA)
|
||||
{
|
||||
//Filter.Channels=(Inp.fgetbits()>>11)+1;
|
||||
Filter.Channels = (byte)((Inp.fgetbits() >> 11) + 1);
|
||||
Inp.faddbits(5);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//?
|
||||
// void UnpWriteBuf()
|
||||
// {
|
||||
@@ -814,116 +1162,5 @@ internal partial class Unpack
|
||||
Header.TablePresent = (BlockFlags & 0x80) != 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
//?
|
||||
// bool ReadTables(BitInput Inp, ref UnpackBlockHeader Header, ref UnpackBlockTables Tables)
|
||||
// {
|
||||
// if (!Header.TablePresent)
|
||||
// return true;
|
||||
//
|
||||
// if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-25)
|
||||
// if (!UnpReadBuf())
|
||||
// return false;
|
||||
//
|
||||
// byte BitLength[BC];
|
||||
// for (uint I=0;I<BC;I++)
|
||||
// {
|
||||
// uint Length=(byte)(Inp.fgetbits() >> 12);
|
||||
// Inp.faddbits(4);
|
||||
// if (Length==15)
|
||||
// {
|
||||
// uint ZeroCount=(byte)(Inp.fgetbits() >> 12);
|
||||
// Inp.faddbits(4);
|
||||
// if (ZeroCount==0)
|
||||
// BitLength[I]=15;
|
||||
// else
|
||||
// {
|
||||
// ZeroCount+=2;
|
||||
// while (ZeroCount-- > 0 && I<ASIZE(BitLength))
|
||||
// BitLength[I++]=0;
|
||||
// I--;
|
||||
// }
|
||||
// }
|
||||
// else
|
||||
// BitLength[I]=Length;
|
||||
// }
|
||||
//
|
||||
// MakeDecodeTables(BitLength,&Tables.BD,BC);
|
||||
//
|
||||
// byte Table[HUFF_TABLE_SIZE];
|
||||
// const uint TableSize=HUFF_TABLE_SIZE;
|
||||
// for (uint I=0;I<TableSize;)
|
||||
// {
|
||||
// if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-5)
|
||||
// if (!UnpReadBuf())
|
||||
// return false;
|
||||
// uint Number=DecodeNumber(Inp,&Tables.BD);
|
||||
// if (Number<16)
|
||||
// {
|
||||
// Table[I]=Number;
|
||||
// I++;
|
||||
// }
|
||||
// else
|
||||
// if (Number<18)
|
||||
// {
|
||||
// uint N;
|
||||
// if (Number==16)
|
||||
// {
|
||||
// N=(Inp.fgetbits() >> 13)+3;
|
||||
// Inp.faddbits(3);
|
||||
// }
|
||||
// else
|
||||
// {
|
||||
// N=(Inp.fgetbits() >> 9)+11;
|
||||
// Inp.faddbits(7);
|
||||
// }
|
||||
// if (I==0)
|
||||
// {
|
||||
// // We cannot have "repeat previous" code at the first position.
|
||||
// // Multiple such codes would shift Inp position without changing I,
|
||||
// // which can lead to reading beyond of Inp boundary in mutithreading
|
||||
// // mode, where Inp.ExternalBuffer disables bounds check and we just
|
||||
// // reserve a lot of buffer space to not need such check normally.
|
||||
// return false;
|
||||
// }
|
||||
// else
|
||||
// while (N-- > 0 && I<TableSize)
|
||||
// {
|
||||
// Table[I]=Table[I-1];
|
||||
// I++;
|
||||
// }
|
||||
// }
|
||||
// else
|
||||
// {
|
||||
// uint N;
|
||||
// if (Number==18)
|
||||
// {
|
||||
// N=(Inp.fgetbits() >> 13)+3;
|
||||
// Inp.faddbits(3);
|
||||
// }
|
||||
// else
|
||||
// {
|
||||
// N=(Inp.fgetbits() >> 9)+11;
|
||||
// Inp.faddbits(7);
|
||||
// }
|
||||
// while (N-- > 0 && I<TableSize)
|
||||
// Table[I++]=0;
|
||||
// }
|
||||
// }
|
||||
// TablesRead5=true;
|
||||
// if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop)
|
||||
// return false;
|
||||
// MakeDecodeTables(&Table[0],&Tables.LD,NC);
|
||||
// MakeDecodeTables(&Table[NC],&Tables.DD,DC);
|
||||
// MakeDecodeTables(&Table[NC+DC],&Tables.LDD,LDC);
|
||||
// MakeDecodeTables(&Table[NC+DC+LDC],&Tables.RD,RC);
|
||||
// return true;
|
||||
// }
|
||||
|
||||
//?
|
||||
// void InitFilters()
|
||||
// {
|
||||
// Filters.SoftReset();
|
||||
// }
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -29,9 +29,28 @@ internal partial class Unpack : IRarUnpack
|
||||
// NOTE: caller has logic to check for -1 for error we throw instead.
|
||||
readStream.Read(buf, offset, count);
|
||||
|
||||
private async System.Threading.Tasks.Task<int> UnpIO_UnpReadAsync(
|
||||
byte[] buf,
|
||||
int offset,
|
||||
int count,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
) =>
|
||||
// NOTE: caller has logic to check for -1 for error we throw instead.
|
||||
await readStream.ReadAsync(buf, offset, count, cancellationToken).ConfigureAwait(false);
|
||||
|
||||
private void UnpIO_UnpWrite(byte[] buf, size_t offset, uint count) =>
|
||||
writeStream.Write(buf, checked((int)offset), checked((int)count));
|
||||
|
||||
private async System.Threading.Tasks.Task UnpIO_UnpWriteAsync(
|
||||
byte[] buf,
|
||||
size_t offset,
|
||||
uint count,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
) =>
|
||||
await writeStream
|
||||
.WriteAsync(buf, checked((int)offset), checked((int)count), cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
|
||||
public void DoUnpack(FileHeader fileHeader, Stream readStream, Stream writeStream)
|
||||
{
|
||||
// as of 12/2017 .NET limits array indexing to using a signed integer
|
||||
@@ -53,6 +72,25 @@ internal partial class Unpack : IRarUnpack
|
||||
DoUnpack();
|
||||
}
|
||||
|
||||
public async System.Threading.Tasks.Task DoUnpackAsync(
|
||||
FileHeader fileHeader,
|
||||
Stream readStream,
|
||||
Stream writeStream,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
DestUnpSize = fileHeader.UncompressedSize;
|
||||
this.fileHeader = fileHeader;
|
||||
this.readStream = readStream;
|
||||
this.writeStream = writeStream;
|
||||
if (!fileHeader.IsStored)
|
||||
{
|
||||
Init(fileHeader.WindowSize, fileHeader.IsSolid);
|
||||
}
|
||||
Suspended = false;
|
||||
await DoUnpackAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
public void DoUnpack()
|
||||
{
|
||||
if (fileHeader.IsStored)
|
||||
@@ -65,6 +103,27 @@ internal partial class Unpack : IRarUnpack
|
||||
}
|
||||
}
|
||||
|
||||
public async System.Threading.Tasks.Task DoUnpackAsync(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (fileHeader.IsStored)
|
||||
{
|
||||
await UnstoreFileAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: When compression methods are converted to async, call them here
|
||||
// For now, fall back to synchronous version
|
||||
await DoUnpackAsync(
|
||||
fileHeader.CompressionAlgorithm,
|
||||
fileHeader.IsSolid,
|
||||
cancellationToken
|
||||
)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
private void UnstoreFile()
|
||||
{
|
||||
Span<byte> b = stackalloc byte[(int)Math.Min(0x10000, DestUnpSize)];
|
||||
@@ -80,6 +139,25 @@ internal partial class Unpack : IRarUnpack
|
||||
} while (!Suspended);
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task UnstoreFileAsync(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var buffer = new byte[(int)Math.Min(0x10000, DestUnpSize)];
|
||||
do
|
||||
{
|
||||
var n = await readStream
|
||||
.ReadAsync(buffer, 0, buffer.Length, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
if (n == 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
await writeStream.WriteAsync(buffer, 0, n, cancellationToken).ConfigureAwait(false);
|
||||
DestUnpSize -= n;
|
||||
} while (!Suspended);
|
||||
}
|
||||
|
||||
public bool Suspended { get; set; }
|
||||
|
||||
public long DestSize => DestUnpSize;
|
||||
|
||||
@@ -200,6 +200,102 @@ internal partial class Unpack
|
||||
UnpWriteBuf20();
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task Unpack15Async(
|
||||
bool Solid,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
UnpInitData(Solid);
|
||||
UnpInitData15(Solid);
|
||||
await UnpReadBufAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (!Solid)
|
||||
{
|
||||
InitHuff();
|
||||
UnpPtr = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
UnpPtr = WrPtr;
|
||||
}
|
||||
|
||||
--DestUnpSize;
|
||||
if (DestUnpSize >= 0)
|
||||
{
|
||||
GetFlagsBuf();
|
||||
FlagsCnt = 8;
|
||||
}
|
||||
|
||||
while (DestUnpSize >= 0)
|
||||
{
|
||||
UnpPtr &= MaxWinMask;
|
||||
|
||||
if (
|
||||
Inp.InAddr > ReadTop - 30
|
||||
&& !await UnpReadBufAsync(cancellationToken).ConfigureAwait(false)
|
||||
)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
if (((WrPtr - UnpPtr) & MaxWinMask) < 270 && WrPtr != UnpPtr)
|
||||
{
|
||||
await UnpWriteBuf20Async(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
if (StMode != 0)
|
||||
{
|
||||
HuffDecode();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (--FlagsCnt < 0)
|
||||
{
|
||||
GetFlagsBuf();
|
||||
FlagsCnt = 7;
|
||||
}
|
||||
|
||||
if ((FlagBuf & 0x80) != 0)
|
||||
{
|
||||
FlagBuf <<= 1;
|
||||
if (Nlzb > Nhfb)
|
||||
{
|
||||
LongLZ();
|
||||
}
|
||||
else
|
||||
{
|
||||
HuffDecode();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
FlagBuf <<= 1;
|
||||
if (--FlagsCnt < 0)
|
||||
{
|
||||
GetFlagsBuf();
|
||||
FlagsCnt = 7;
|
||||
}
|
||||
if ((FlagBuf & 0x80) != 0)
|
||||
{
|
||||
FlagBuf <<= 1;
|
||||
if (Nlzb > Nhfb)
|
||||
{
|
||||
HuffDecode();
|
||||
}
|
||||
else
|
||||
{
|
||||
LongLZ();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
FlagBuf <<= 1;
|
||||
ShortLZ();
|
||||
}
|
||||
}
|
||||
}
|
||||
await UnpWriteBuf20Async(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
//#define GetShortLen1(pos) ((pos)==1 ? Buf60+3:ShortLen1[pos])
|
||||
private uint GetShortLen1(uint pos) => ((pos) == 1 ? (uint)(Buf60 + 3) : ShortLen1[pos]);
|
||||
|
||||
|
||||
@@ -349,6 +349,170 @@ internal partial class Unpack
|
||||
UnpWriteBuf20();
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task Unpack20Async(
|
||||
bool Solid,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
uint Bits;
|
||||
|
||||
if (Suspended)
|
||||
{
|
||||
UnpPtr = WrPtr;
|
||||
}
|
||||
else
|
||||
{
|
||||
UnpInitData(Solid);
|
||||
if (!await UnpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
(!Solid || !TablesRead2)
|
||||
&& !await ReadTables20Async(cancellationToken).ConfigureAwait(false)
|
||||
)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
--DestUnpSize;
|
||||
}
|
||||
|
||||
while (DestUnpSize >= 0)
|
||||
{
|
||||
UnpPtr &= MaxWinMask;
|
||||
|
||||
if (Inp.InAddr > ReadTop - 30)
|
||||
{
|
||||
if (!await UnpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (((WrPtr - UnpPtr) & MaxWinMask) < 270 && WrPtr != UnpPtr)
|
||||
{
|
||||
await UnpWriteBuf20Async(cancellationToken).ConfigureAwait(false);
|
||||
if (Suspended)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (UnpAudioBlock)
|
||||
{
|
||||
var AudioNumber = DecodeNumber(Inp, MD[UnpCurChannel]);
|
||||
|
||||
if (AudioNumber == 256)
|
||||
{
|
||||
if (!await ReadTables20Async(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
Window[UnpPtr++] = DecodeAudio((int)AudioNumber);
|
||||
if (++UnpCurChannel == UnpChannels)
|
||||
{
|
||||
UnpCurChannel = 0;
|
||||
}
|
||||
|
||||
--DestUnpSize;
|
||||
continue;
|
||||
}
|
||||
|
||||
var Number = DecodeNumber(Inp, BlockTables.LD);
|
||||
if (Number < 256)
|
||||
{
|
||||
Window[UnpPtr++] = (byte)Number;
|
||||
--DestUnpSize;
|
||||
continue;
|
||||
}
|
||||
if (Number > 269)
|
||||
{
|
||||
var Length = (uint)(LDecode[Number -= 270] + 3);
|
||||
if ((Bits = LBits[Number]) > 0)
|
||||
{
|
||||
Length += Inp.getbits() >> (int)(16 - Bits);
|
||||
Inp.addbits(Bits);
|
||||
}
|
||||
|
||||
var DistNumber = DecodeNumber(Inp, BlockTables.DD);
|
||||
var Distance = DDecode[DistNumber] + 1;
|
||||
if ((Bits = DBits[DistNumber]) > 0)
|
||||
{
|
||||
Distance += Inp.getbits() >> (int)(16 - Bits);
|
||||
Inp.addbits(Bits);
|
||||
}
|
||||
|
||||
if (Distance >= 0x2000)
|
||||
{
|
||||
Length++;
|
||||
if (Distance >= 0x40000L)
|
||||
{
|
||||
Length++;
|
||||
}
|
||||
}
|
||||
|
||||
CopyString20(Length, Distance);
|
||||
continue;
|
||||
}
|
||||
if (Number == 269)
|
||||
{
|
||||
if (!await ReadTables20Async(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
if (Number == 256)
|
||||
{
|
||||
CopyString20(LastLength, LastDist);
|
||||
continue;
|
||||
}
|
||||
if (Number < 261)
|
||||
{
|
||||
var Distance = OldDist[(OldDistPtr - (Number - 256)) & 3];
|
||||
var LengthNumber = DecodeNumber(Inp, BlockTables.RD);
|
||||
var Length = (uint)(LDecode[LengthNumber] + 2);
|
||||
if ((Bits = LBits[LengthNumber]) > 0)
|
||||
{
|
||||
Length += Inp.getbits() >> (int)(16 - Bits);
|
||||
Inp.addbits(Bits);
|
||||
}
|
||||
if (Distance >= 0x101)
|
||||
{
|
||||
Length++;
|
||||
if (Distance >= 0x2000)
|
||||
{
|
||||
Length++;
|
||||
if (Distance >= 0x40000)
|
||||
{
|
||||
Length++;
|
||||
}
|
||||
}
|
||||
}
|
||||
CopyString20(Length, Distance);
|
||||
continue;
|
||||
}
|
||||
if (Number < 270)
|
||||
{
|
||||
var Distance = (uint)(SDDecode[Number -= 261] + 1);
|
||||
if ((Bits = SDBits[Number]) > 0)
|
||||
{
|
||||
Distance += Inp.getbits() >> (int)(16 - Bits);
|
||||
Inp.addbits(Bits);
|
||||
}
|
||||
CopyString20(2, Distance);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
ReadLastTables();
|
||||
await UnpWriteBuf20Async(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private void UnpWriteBuf20()
|
||||
{
|
||||
if (UnpPtr != WrPtr)
|
||||
@@ -370,6 +534,36 @@ internal partial class Unpack
|
||||
WrPtr = UnpPtr;
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task UnpWriteBuf20Async(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (UnpPtr != WrPtr)
|
||||
{
|
||||
UnpSomeRead = true;
|
||||
}
|
||||
|
||||
if (UnpPtr < WrPtr)
|
||||
{
|
||||
await UnpIO_UnpWriteAsync(
|
||||
Window,
|
||||
WrPtr,
|
||||
(uint)(-(int)WrPtr & MaxWinMask),
|
||||
cancellationToken
|
||||
)
|
||||
.ConfigureAwait(false);
|
||||
await UnpIO_UnpWriteAsync(Window, 0, UnpPtr, cancellationToken).ConfigureAwait(false);
|
||||
UnpAllBuf = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
await UnpIO_UnpWriteAsync(Window, WrPtr, UnpPtr - WrPtr, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
WrPtr = UnpPtr;
|
||||
}
|
||||
|
||||
private bool ReadTables20()
|
||||
{
|
||||
Span<byte> BitLength = stackalloc byte[checked((int)BC20)];
|
||||
@@ -490,6 +684,130 @@ internal partial class Unpack
|
||||
return true;
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task<bool> ReadTables20Async(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
byte[] BitLength = new byte[checked((int)BC20)];
|
||||
byte[] Table = new byte[checked((int)MC20 * 4)];
|
||||
if (Inp.InAddr > ReadTop - 25)
|
||||
{
|
||||
if (!await UnpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
var BitField = Inp.getbits();
|
||||
UnpAudioBlock = (BitField & 0x8000) != 0;
|
||||
|
||||
if ((BitField & 0x4000) != 0)
|
||||
{
|
||||
Array.Clear(UnpOldTable20, 0, UnpOldTable20.Length);
|
||||
}
|
||||
|
||||
Inp.addbits(2);
|
||||
|
||||
uint TableSize;
|
||||
if (UnpAudioBlock)
|
||||
{
|
||||
UnpChannels = ((BitField >> 12) & 3) + 1;
|
||||
if (UnpCurChannel >= UnpChannels)
|
||||
{
|
||||
UnpCurChannel = 0;
|
||||
}
|
||||
|
||||
Inp.addbits(2);
|
||||
TableSize = MC20 * UnpChannels;
|
||||
}
|
||||
else
|
||||
{
|
||||
TableSize = NC20 + DC20 + RC20;
|
||||
}
|
||||
|
||||
for (int I = 0; I < checked((int)BC20); I++)
|
||||
{
|
||||
BitLength[I] = (byte)(Inp.getbits() >> 12);
|
||||
Inp.addbits(4);
|
||||
}
|
||||
MakeDecodeTables(BitLength, 0, BlockTables.BD, BC20);
|
||||
for (int I = 0; I < checked((int)TableSize); )
|
||||
{
|
||||
if (Inp.InAddr > ReadTop - 5)
|
||||
{
|
||||
if (!await UnpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
var Number = DecodeNumber(Inp, BlockTables.BD);
|
||||
if (Number < 16)
|
||||
{
|
||||
Table[I] = (byte)((Number + UnpOldTable20[I]) & 0xF);
|
||||
I++;
|
||||
}
|
||||
else if (Number < 18)
|
||||
{
|
||||
uint N;
|
||||
if (Number == 16)
|
||||
{
|
||||
N = (Inp.getbits() >> 14) + 3;
|
||||
Inp.addbits(2);
|
||||
}
|
||||
else
|
||||
{
|
||||
N = (Inp.getbits() >> 13) + 11;
|
||||
Inp.addbits(3);
|
||||
}
|
||||
if (I == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
while (N-- > 0 && I < checked((int)TableSize))
|
||||
{
|
||||
Table[I] = Table[I - 1];
|
||||
I++;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
uint N;
|
||||
if (Number == 18)
|
||||
{
|
||||
N = (Inp.getbits() >> 13) + 3;
|
||||
Inp.addbits(3);
|
||||
}
|
||||
else
|
||||
{
|
||||
N = (Inp.getbits() >> 9) + 11;
|
||||
Inp.addbits(7);
|
||||
}
|
||||
|
||||
while (N-- > 0 && I < checked((int)TableSize))
|
||||
{
|
||||
Table[I++] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (UnpAudioBlock)
|
||||
{
|
||||
for (int I = 0; I < UnpChannels; I++)
|
||||
{
|
||||
MakeDecodeTables(Table, (int)(I * MC20), MD[I], MC20);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
MakeDecodeTables(Table, 0, BlockTables.LD, NC20);
|
||||
MakeDecodeTables(Table, (int)NC20, BlockTables.DD, DC20);
|
||||
MakeDecodeTables(Table, (int)(NC20 + DC20), BlockTables.RD, RC20);
|
||||
}
|
||||
Array.Copy(Table, 0, this.UnpOldTable20, 0, UnpOldTable20.Length);
|
||||
return true;
|
||||
}
|
||||
|
||||
private void ReadLastTables()
|
||||
{
|
||||
if (ReadTop >= Inp.InAddr + 5)
|
||||
|
||||
@@ -1,793 +0,0 @@
|
||||
#if !Rar2017_64bit
|
||||
#else
|
||||
using nint = System.Int64;
|
||||
using nuint = System.UInt64;
|
||||
using size_t = System.UInt64;
|
||||
#endif
|
||||
|
||||
//using static SharpCompress.Compressors.Rar.UnpackV2017.Unpack.Unpack30Local;
|
||||
/*
|
||||
namespace SharpCompress.Compressors.Rar.UnpackV2017
|
||||
{
|
||||
internal partial class Unpack
|
||||
{
|
||||
|
||||
#if !RarV2017_RAR5ONLY
|
||||
// We use it instead of direct PPM.DecodeChar call to be sure that
|
||||
// we reset PPM structures in case of corrupt data. It is important,
|
||||
// because these structures can be invalid after PPM.DecodeChar returned -1.
|
||||
int SafePPMDecodeChar()
|
||||
{
|
||||
int Ch=PPM.DecodeChar();
|
||||
if (Ch==-1) // Corrupt PPM data found.
|
||||
{
|
||||
PPM.CleanUp(); // Reset possibly corrupt PPM data structures.
|
||||
UnpBlockType=BLOCK_LZ; // Set faster and more fail proof LZ mode.
|
||||
}
|
||||
return(Ch);
|
||||
}
|
||||
|
||||
internal static class Unpack30Local {
|
||||
public static readonly byte[] LDecode={0,1,2,3,4,5,6,7,8,10,12,14,16,20,24,28,32,40,48,56,64,80,96,112,128,160,192,224};
|
||||
public static readonly byte[] LBits= {0,0,0,0,0,0,0,0,1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5};
|
||||
public static readonly int[] DDecode = new int[DC];
|
||||
public static readonly byte[] DBits = new byte[DC];
|
||||
public static readonly int[] DBitLengthCounts= {4,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,14,0,12};
|
||||
public static readonly byte[] SDDecode={0,4,8,16,32,64,128,192};
|
||||
public static readonly byte[] SDBits= {2,2,3, 4, 5, 6, 6, 6};
|
||||
}
|
||||
void Unpack29(bool Solid)
|
||||
{
|
||||
uint Bits;
|
||||
|
||||
if (DDecode[1]==0)
|
||||
{
|
||||
int Dist=0,BitLength=0,Slot=0;
|
||||
for (int I=0;I<DBitLengthCounts.Length;I++,BitLength++)
|
||||
for (int J=0;J<DBitLengthCounts[I];J++,Slot++,Dist+=(1<<BitLength))
|
||||
{
|
||||
DDecode[Slot]=Dist;
|
||||
DBits[Slot]=(byte)BitLength;
|
||||
}
|
||||
}
|
||||
|
||||
FileExtracted=true;
|
||||
|
||||
if (!Suspended)
|
||||
{
|
||||
UnpInitData(Solid);
|
||||
if (!UnpReadBuf30())
|
||||
return;
|
||||
if ((!Solid || !TablesRead3) && !ReadTables30())
|
||||
return;
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
UnpPtr&=MaxWinMask;
|
||||
|
||||
if (Inp.InAddr>ReadBorder)
|
||||
{
|
||||
if (!UnpReadBuf30())
|
||||
break;
|
||||
}
|
||||
if (((WrPtr-UnpPtr) & MaxWinMask)<260 && WrPtr!=UnpPtr)
|
||||
{
|
||||
UnpWriteBuf30();
|
||||
if (WrittenFileSize>DestUnpSize)
|
||||
return;
|
||||
if (Suspended)
|
||||
{
|
||||
FileExtracted=false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (UnpBlockType==BLOCK_PPM)
|
||||
{
|
||||
// Here speed is critical, so we do not use SafePPMDecodeChar,
|
||||
// because sometimes even the inline function can introduce
|
||||
// some additional penalty.
|
||||
int Ch=PPM.DecodeChar();
|
||||
if (Ch==-1) // Corrupt PPM data found.
|
||||
{
|
||||
PPM.CleanUp(); // Reset possibly corrupt PPM data structures.
|
||||
UnpBlockType=BLOCK_LZ; // Set faster and more fail proof LZ mode.
|
||||
break;
|
||||
}
|
||||
if (Ch==PPMEscChar)
|
||||
{
|
||||
int NextCh=SafePPMDecodeChar();
|
||||
if (NextCh==0) // End of PPM encoding.
|
||||
{
|
||||
if (!ReadTables30())
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
if (NextCh==-1) // Corrupt PPM data found.
|
||||
break;
|
||||
if (NextCh==2) // End of file in PPM mode.
|
||||
break;
|
||||
if (NextCh==3) // Read VM code.
|
||||
{
|
||||
if (!ReadVMCodePPM())
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
if (NextCh==4) // LZ inside of PPM.
|
||||
{
|
||||
uint Distance=0,Length;
|
||||
bool Failed=false;
|
||||
for (int I=0;I<4 && !Failed;I++)
|
||||
{
|
||||
int _Ch=SafePPMDecodeChar();
|
||||
if (_Ch==-1)
|
||||
Failed=true;
|
||||
else
|
||||
if (I==3)
|
||||
Length=(byte)_Ch;
|
||||
else
|
||||
Distance=(Distance<<8)+(byte)_Ch;
|
||||
}
|
||||
if (Failed)
|
||||
break;
|
||||
|
||||
CopyString(Length+32,Distance+2);
|
||||
continue;
|
||||
}
|
||||
if (NextCh==5) // One byte distance match (RLE) inside of PPM.
|
||||
{
|
||||
int Length=SafePPMDecodeChar();
|
||||
if (Length==-1)
|
||||
break;
|
||||
CopyString((uint)(Length+4),1);
|
||||
continue;
|
||||
}
|
||||
// If we are here, NextCh must be 1, what means that current byte
|
||||
// is equal to our 'escape' byte, so we just store it to Window.
|
||||
}
|
||||
Window[UnpPtr++]=(byte)Ch;
|
||||
continue;
|
||||
}
|
||||
|
||||
uint Number=DecodeNumber(Inp,BlockTables.LD);
|
||||
if (Number<256)
|
||||
{
|
||||
Window[UnpPtr++]=(byte)Number;
|
||||
continue;
|
||||
}
|
||||
if (Number>=271)
|
||||
{
|
||||
uint Length=(uint)(LDecode[Number-=271]+3);
|
||||
if ((Bits=LBits[Number])>0)
|
||||
{
|
||||
Length+=Inp.getbits()>>(int)(16-Bits);
|
||||
Inp.addbits(Bits);
|
||||
}
|
||||
|
||||
uint DistNumber=DecodeNumber(Inp,BlockTables.DD);
|
||||
uint Distance=(uint)(DDecode[DistNumber]+1);
|
||||
if ((Bits=DBits[DistNumber])>0)
|
||||
{
|
||||
if (DistNumber>9)
|
||||
{
|
||||
if (Bits>4)
|
||||
{
|
||||
Distance+=((Inp.getbits()>>(int)(20-Bits))<<4);
|
||||
Inp.addbits(Bits-4);
|
||||
}
|
||||
if (LowDistRepCount>0)
|
||||
{
|
||||
LowDistRepCount--;
|
||||
Distance+=(uint)PrevLowDist;
|
||||
}
|
||||
else
|
||||
{
|
||||
uint LowDist=DecodeNumber(Inp,BlockTables.LDD);
|
||||
if (LowDist==16)
|
||||
{
|
||||
LowDistRepCount=(int)(LOW_DIST_REP_COUNT-1);
|
||||
Distance+=(uint)PrevLowDist;
|
||||
}
|
||||
else
|
||||
{
|
||||
Distance+=LowDist;
|
||||
PrevLowDist=(int)LowDist;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Distance+=Inp.getbits()>>(int)(16-Bits);
|
||||
Inp.addbits(Bits);
|
||||
}
|
||||
}
|
||||
|
||||
if (Distance>=0x2000)
|
||||
{
|
||||
Length++;
|
||||
if (Distance>=0x40000)
|
||||
Length++;
|
||||
}
|
||||
|
||||
InsertOldDist(Distance);
|
||||
LastLength=Length;
|
||||
CopyString(Length,Distance);
|
||||
continue;
|
||||
}
|
||||
if (Number==256)
|
||||
{
|
||||
if (!ReadEndOfBlock())
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
if (Number==257)
|
||||
{
|
||||
if (!ReadVMCode())
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
if (Number==258)
|
||||
{
|
||||
if (LastLength!=0)
|
||||
CopyString(LastLength,OldDist[0]);
|
||||
continue;
|
||||
}
|
||||
if (Number<263)
|
||||
{
|
||||
uint DistNum=Number-259;
|
||||
uint Distance=OldDist[DistNum];
|
||||
for (uint I=DistNum;I>0;I--)
|
||||
OldDist[I]=OldDist[I-1];
|
||||
OldDist[0]=Distance;
|
||||
|
||||
uint LengthNumber=DecodeNumber(Inp,BlockTables.RD);
|
||||
int Length=LDecode[LengthNumber]+2;
|
||||
if ((Bits=LBits[LengthNumber])>0)
|
||||
{
|
||||
Length+=(int)(Inp.getbits()>>(int)(16-Bits));
|
||||
Inp.addbits(Bits);
|
||||
}
|
||||
LastLength=(uint)Length;
|
||||
CopyString((uint)Length,Distance);
|
||||
continue;
|
||||
}
|
||||
if (Number<272)
|
||||
{
|
||||
uint Distance=(uint)(SDDecode[Number-=263]+1);
|
||||
if ((Bits=SDBits[Number])>0)
|
||||
{
|
||||
Distance+=Inp.getbits()>>(int)(16-Bits);
|
||||
Inp.addbits(Bits);
|
||||
}
|
||||
InsertOldDist(Distance);
|
||||
LastLength=2;
|
||||
CopyString(2,Distance);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
UnpWriteBuf30();
|
||||
}
|
||||
|
||||
|
||||
// Return 'false' to quit unpacking the current file or 'true' to continue.
|
||||
bool ReadEndOfBlock()
|
||||
{
|
||||
uint BitField=Inp.getbits();
|
||||
bool NewTable,NewFile=false;
|
||||
|
||||
// "1" - no new file, new table just here.
|
||||
// "00" - new file, no new table.
|
||||
// "01" - new file, new table (in beginning of next file).
|
||||
|
||||
if ((BitField & 0x8000)!=0)
|
||||
{
|
||||
NewTable=true;
|
||||
Inp.addbits(1);
|
||||
}
|
||||
else
|
||||
{
|
||||
NewFile=true;
|
||||
NewTable=(BitField & 0x4000)!=0;
|
||||
Inp.addbits(2);
|
||||
}
|
||||
TablesRead3=!NewTable;
|
||||
|
||||
// Quit immediately if "new file" flag is set. If "new table" flag
|
||||
// is present, we'll read the table in beginning of next file
|
||||
// based on 'TablesRead3' 'false' value.
|
||||
if (NewFile)
|
||||
return false;
|
||||
return ReadTables30(); // Quit only if we failed to read tables.
|
||||
}
|
||||
|
||||
|
||||
bool ReadVMCode()
|
||||
{
|
||||
// Entire VM code is guaranteed to fully present in block defined
|
||||
// by current Huffman table. Compressor checks that VM code does not cross
|
||||
// Huffman block boundaries.
|
||||
uint FirstByte=Inp.getbits()>>8;
|
||||
Inp.addbits(8);
|
||||
uint Length=(FirstByte & 7)+1;
|
||||
if (Length==7)
|
||||
{
|
||||
Length=(Inp.getbits()>>8)+7;
|
||||
Inp.addbits(8);
|
||||
}
|
||||
else
|
||||
if (Length==8)
|
||||
{
|
||||
Length=Inp.getbits();
|
||||
Inp.addbits(16);
|
||||
}
|
||||
if (Length==0)
|
||||
return false;
|
||||
Array<byte> VMCode(Length);
|
||||
for (uint I=0;I<Length;I++)
|
||||
{
|
||||
// Try to read the new buffer if only one byte is left.
|
||||
// But if we read all bytes except the last, one byte is enough.
|
||||
if (Inp.InAddr>=ReadTop-1 && !UnpReadBuf30() && I<Length-1)
|
||||
return false;
|
||||
VMCode[I]=Inp.getbits()>>8;
|
||||
Inp.addbits(8);
|
||||
}
|
||||
return AddVMCode(FirstByte,&VMCode[0],Length);
|
||||
}
|
||||
|
||||
|
||||
bool ReadVMCodePPM()
|
||||
{
|
||||
uint FirstByte=(uint)SafePPMDecodeChar();
|
||||
if ((int)FirstByte==-1)
|
||||
return false;
|
||||
uint Length=(FirstByte & 7)+1;
|
||||
if (Length==7)
|
||||
{
|
||||
int B1=SafePPMDecodeChar();
|
||||
if (B1==-1)
|
||||
return false;
|
||||
Length=B1+7;
|
||||
}
|
||||
else
|
||||
if (Length==8)
|
||||
{
|
||||
int B1=SafePPMDecodeChar();
|
||||
if (B1==-1)
|
||||
return false;
|
||||
int B2=SafePPMDecodeChar();
|
||||
if (B2==-1)
|
||||
return false;
|
||||
Length=B1*256+B2;
|
||||
}
|
||||
if (Length==0)
|
||||
return false;
|
||||
Array<byte> VMCode(Length);
|
||||
for (uint I=0;I<Length;I++)
|
||||
{
|
||||
int Ch=SafePPMDecodeChar();
|
||||
if (Ch==-1)
|
||||
return false;
|
||||
VMCode[I]=Ch;
|
||||
}
|
||||
return AddVMCode(FirstByte,&VMCode[0],Length);
|
||||
}
|
||||
|
||||
|
||||
bool AddVMCode(uint FirstByte,byte[] Code,int CodeSize)
|
||||
{
|
||||
VMCodeInp.InitBitInput();
|
||||
//x memcpy(VMCodeInp.InBuf,Code,Min(BitInput.MAX_SIZE,CodeSize));
|
||||
Array.Copy(Code, 0, VMCodeInp.InBuf, 0, Math.Min(BitInput.MAX_SIZE,CodeSize));
|
||||
VM.Init();
|
||||
|
||||
uint FiltPos;
|
||||
if ((FirstByte & 0x80)!=0)
|
||||
{
|
||||
FiltPos=RarVM.ReadData(VMCodeInp);
|
||||
if (FiltPos==0)
|
||||
InitFilters30(false);
|
||||
else
|
||||
FiltPos--;
|
||||
}
|
||||
else
|
||||
FiltPos=(uint)this.LastFilter; // Use the same filter as last time.
|
||||
|
||||
if (FiltPos>Filters30.Count || FiltPos>OldFilterLengths.Count)
|
||||
return false;
|
||||
LastFilter=(int)FiltPos;
|
||||
bool NewFilter=(FiltPos==Filters30.Count);
|
||||
|
||||
UnpackFilter30 StackFilter=new UnpackFilter30(); // New filter for PrgStack.
|
||||
|
||||
UnpackFilter30 Filter;
|
||||
if (NewFilter) // New filter code, never used before since VM reset.
|
||||
{
|
||||
if (FiltPos>MAX3_UNPACK_FILTERS)
|
||||
{
|
||||
// Too many different filters, corrupt archive.
|
||||
//delete StackFilter;
|
||||
return false;
|
||||
}
|
||||
|
||||
Filters30.Add(1);
|
||||
Filters30[Filters30.Count-1]=Filter=new UnpackFilter30();
|
||||
StackFilter.ParentFilter=(uint)(Filters30.Count-1);
|
||||
|
||||
// Reserve one item to store the data block length of our new filter
|
||||
// entry. We'll set it to real block length below, after reading it.
|
||||
// But we need to initialize it now, because when processing corrupt
|
||||
// data, we can access this item even before we set it to real value.
|
||||
OldFilterLengths.Add(0);
|
||||
}
|
||||
else // Filter was used in the past.
|
||||
{
|
||||
Filter=Filters30[(int)FiltPos];
|
||||
StackFilter.ParentFilter=FiltPos;
|
||||
}
|
||||
|
||||
int EmptyCount=0;
|
||||
for (int I=0;I<PrgStack.Count;I++)
|
||||
{
|
||||
PrgStack[I-EmptyCount]=PrgStack[I];
|
||||
if (PrgStack[I]==null)
|
||||
EmptyCount++;
|
||||
if (EmptyCount>0)
|
||||
PrgStack[I]=null;
|
||||
}
|
||||
if (EmptyCount==0)
|
||||
{
|
||||
if (PrgStack.Count>MAX3_UNPACK_FILTERS)
|
||||
{
|
||||
//delete StackFilter;
|
||||
return false;
|
||||
}
|
||||
PrgStack.Add(1);
|
||||
EmptyCount=1;
|
||||
}
|
||||
size_t StackPos=(uint)(this.PrgStack.Count-EmptyCount);
|
||||
PrgStack[(int)StackPos]=StackFilter;
|
||||
|
||||
uint BlockStart=RarVM.ReadData(VMCodeInp);
|
||||
if ((FirstByte & 0x40)!=0)
|
||||
BlockStart+=258;
|
||||
StackFilter.BlockStart=(uint)((BlockStart+UnpPtr)&MaxWinMask);
|
||||
if ((FirstByte & 0x20)!=0)
|
||||
{
|
||||
StackFilter.BlockLength=RarVM.ReadData(VMCodeInp);
|
||||
|
||||
// Store the last data block length for current filter.
|
||||
OldFilterLengths[(int)FiltPos]=(int)StackFilter.BlockLength;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Set the data block size to same value as the previous block size
|
||||
// for same filter. It is possible for corrupt data to access a new
|
||||
// and not filled yet item of OldFilterLengths array here. This is why
|
||||
// we set new OldFilterLengths items to zero above.
|
||||
StackFilter.BlockLength=FiltPos<OldFilterLengths.Count ? OldFilterLengths[(int)FiltPos]:0;
|
||||
}
|
||||
|
||||
StackFilter.NextWindow=WrPtr!=UnpPtr && ((WrPtr-UnpPtr)&MaxWinMask)<=BlockStart;
|
||||
|
||||
// DebugLog("\nNextWindow: UnpPtr=%08x WrPtr=%08x BlockStart=%08x",UnpPtr,WrPtr,BlockStart);
|
||||
|
||||
memset(StackFilter.Prg.InitR,0,sizeof(StackFilter.Prg.InitR));
|
||||
StackFilter.Prg.InitR[4]=StackFilter.BlockLength;
|
||||
|
||||
if ((FirstByte & 0x10)!=0) // Set registers to optional parameters if any.
|
||||
{
|
||||
uint InitMask=VMCodeInp.fgetbits()>>9;
|
||||
VMCodeInp.faddbits(7);
|
||||
for (int I=0;I<7;I++)
|
||||
if ((InitMask & (1<<I)) != 0)
|
||||
StackFilter.Prg.InitR[I]=RarVM.ReadData(VMCodeInp);
|
||||
}
|
||||
|
||||
if (NewFilter)
|
||||
{
|
||||
uint VMCodeSize=RarVM.ReadData(VMCodeInp);
|
||||
if (VMCodeSize>=0x10000 || VMCodeSize==0)
|
||||
return false;
|
||||
Array<byte> VMCode(VMCodeSize);
|
||||
for (uint I=0;I<VMCodeSize;I++)
|
||||
{
|
||||
if (VMCodeInp.Overflow(3))
|
||||
return false;
|
||||
VMCode[I]=VMCodeInp.fgetbits()>>8;
|
||||
VMCodeInp.faddbits(8);
|
||||
}
|
||||
VM.Prepare(&VMCode[0],VMCodeSize,&Filter->Prg);
|
||||
}
|
||||
StackFilter.Prg.Type=Filter.Prg.Type;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool UnpReadBuf30()
|
||||
{
|
||||
int DataSize=ReadTop-Inp.InAddr; // Data left to process.
|
||||
if (DataSize<0)
|
||||
return false;
|
||||
if (Inp.InAddr>BitInput.MAX_SIZE/2)
|
||||
{
|
||||
// If we already processed more than half of buffer, let's move
|
||||
// remaining data into beginning to free more space for new data
|
||||
// and ensure that calling function does not cross the buffer border
|
||||
// even if we did not read anything here. Also it ensures that read size
|
||||
// is not less than CRYPT_BLOCK_SIZE, so we can align it without risk
|
||||
// to make it zero.
|
||||
if (DataSize>0)
|
||||
//x memmove(Inp.InBuf,Inp.InBuf+Inp.InAddr,DataSize);
|
||||
Array.Copy(Inp.InBuf,Inp.InAddr,Inp.InBuf,0,DataSize);
|
||||
Inp.InAddr=0;
|
||||
ReadTop=DataSize;
|
||||
}
|
||||
else
|
||||
DataSize=ReadTop;
|
||||
int ReadCode=UnpIO_UnpRead(Inp.InBuf,DataSize,BitInput.MAX_SIZE-DataSize);
|
||||
if (ReadCode>0)
|
||||
ReadTop+=ReadCode;
|
||||
ReadBorder=ReadTop-30;
|
||||
return ReadCode!=-1;
|
||||
}
|
||||
|
||||
|
||||
void UnpWriteBuf30()
|
||||
{
|
||||
uint WrittenBorder=(uint)WrPtr;
|
||||
uint WriteSize=(uint)((UnpPtr-WrittenBorder)&MaxWinMask);
|
||||
for (int I=0;I<PrgStack.Count;I++)
|
||||
{
|
||||
// Here we apply filters to data which we need to write.
|
||||
// We always copy data to virtual machine memory before processing.
|
||||
// We cannot process them just in place in Window buffer, because
|
||||
// these data can be used for future string matches, so we must
|
||||
// preserve them in original form.
|
||||
|
||||
UnpackFilter30 flt=PrgStack[I];
|
||||
if (flt==null)
|
||||
continue;
|
||||
if (flt.NextWindow)
|
||||
{
|
||||
flt.NextWindow=false;
|
||||
continue;
|
||||
}
|
||||
uint BlockStart=flt.BlockStart;
|
||||
uint BlockLength=flt.BlockLength;
|
||||
if (((BlockStart-WrittenBorder)&MaxWinMask)<WriteSize)
|
||||
{
|
||||
if (WrittenBorder!=BlockStart)
|
||||
{
|
||||
UnpWriteArea(WrittenBorder,BlockStart);
|
||||
WrittenBorder=BlockStart;
|
||||
WriteSize=(uint)((UnpPtr-WrittenBorder)&MaxWinMask);
|
||||
}
|
||||
if (BlockLength<=WriteSize)
|
||||
{
|
||||
uint BlockEnd=(BlockStart+BlockLength)&MaxWinMask;
|
||||
if (BlockStart<BlockEnd || BlockEnd==0)
|
||||
VM.SetMemory(0,Window+BlockStart,BlockLength);
|
||||
else
|
||||
{
|
||||
uint FirstPartLength=uint(MaxWinSize-BlockStart);
|
||||
VM.SetMemory(0,Window+BlockStart,FirstPartLength);
|
||||
VM.SetMemory(FirstPartLength,Window,BlockEnd);
|
||||
}
|
||||
|
||||
VM_PreparedProgram *ParentPrg=&Filters30[flt->ParentFilter]->Prg;
|
||||
VM_PreparedProgram *Prg=&flt->Prg;
|
||||
|
||||
ExecuteCode(Prg);
|
||||
|
||||
byte[] FilteredData=Prg.FilteredData;
|
||||
uint FilteredDataSize=Prg.FilteredDataSize;
|
||||
|
||||
delete PrgStack[I];
|
||||
PrgStack[I]=null;
|
||||
while (I+1<PrgStack.Count)
|
||||
{
|
||||
UnpackFilter30 NextFilter=PrgStack[I+1];
|
||||
// It is required to check NextWindow here.
|
||||
if (NextFilter==null || NextFilter.BlockStart!=BlockStart ||
|
||||
NextFilter.BlockLength!=FilteredDataSize || NextFilter.NextWindow)
|
||||
break;
|
||||
|
||||
// Apply several filters to same data block.
|
||||
|
||||
VM.SetMemory(0,FilteredData,FilteredDataSize);
|
||||
|
||||
VM_PreparedProgram *ParentPrg=&Filters30[NextFilter.ParentFilter]->Prg;
|
||||
VM_PreparedProgram *NextPrg=&NextFilter->Prg;
|
||||
|
||||
ExecuteCode(NextPrg);
|
||||
|
||||
FilteredData=NextPrg.FilteredData;
|
||||
FilteredDataSize=NextPrg.FilteredDataSize;
|
||||
I++;
|
||||
delete PrgStack[I];
|
||||
PrgStack[I]=null;
|
||||
}
|
||||
UnpIO_UnpWrite(FilteredData,0,FilteredDataSize);
|
||||
UnpSomeRead=true;
|
||||
WrittenFileSize+=FilteredDataSize;
|
||||
WrittenBorder=BlockEnd;
|
||||
WriteSize=(uint)((UnpPtr-WrittenBorder)&MaxWinMask);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Current filter intersects the window write border, so we adjust
|
||||
// the window border to process this filter next time, not now.
|
||||
for (size_t J=I;J<PrgStack.Count;J++)
|
||||
{
|
||||
UnpackFilter30 flt=PrgStack[J];
|
||||
if (flt!=null && flt.NextWindow)
|
||||
flt.NextWindow=false;
|
||||
}
|
||||
WrPtr=WrittenBorder;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
UnpWriteArea(WrittenBorder,UnpPtr);
|
||||
WrPtr=UnpPtr;
|
||||
}
|
||||
|
||||
|
||||
void ExecuteCode(VM_PreparedProgram *Prg)
|
||||
{
|
||||
Prg->InitR[6]=(uint)WrittenFileSize;
|
||||
VM.Execute(Prg);
|
||||
}
|
||||
|
||||
|
||||
bool ReadTables30()
|
||||
{
|
||||
byte[] BitLength = new byte[BC];
|
||||
byte[] Table = new byte[HUFF_TABLE_SIZE30];
|
||||
if (Inp.InAddr>ReadTop-25)
|
||||
if (!UnpReadBuf30())
|
||||
return(false);
|
||||
Inp.faddbits((uint)((8-Inp.InBit)&7));
|
||||
uint BitField=Inp.fgetbits();
|
||||
if ((BitField & 0x8000) != 0)
|
||||
{
|
||||
UnpBlockType=BLOCK_PPM;
|
||||
return(PPM.DecodeInit(this,PPMEscChar));
|
||||
}
|
||||
UnpBlockType=BLOCK_LZ;
|
||||
|
||||
PrevLowDist=0;
|
||||
LowDistRepCount=0;
|
||||
|
||||
if ((BitField & 0x4000) == 0)
|
||||
Utility.Memset(UnpOldTable,0,UnpOldTable.Length);
|
||||
Inp.faddbits(2);
|
||||
|
||||
for (uint I=0;I<BC;I++)
|
||||
{
|
||||
uint Length=(byte)(Inp.fgetbits() >> 12);
|
||||
Inp.faddbits(4);
|
||||
if (Length==15)
|
||||
{
|
||||
uint ZeroCount=(byte)(Inp.fgetbits() >> 12);
|
||||
Inp.faddbits(4);
|
||||
if (ZeroCount==0)
|
||||
BitLength[I]=15;
|
||||
else
|
||||
{
|
||||
ZeroCount+=2;
|
||||
while (ZeroCount-- > 0 && I<BitLength.Length)
|
||||
BitLength[I++]=0;
|
||||
I--;
|
||||
}
|
||||
}
|
||||
else
|
||||
BitLength[I]=(byte)Length;
|
||||
}
|
||||
MakeDecodeTables(BitLength,0,BlockTables.BD,BC30);
|
||||
|
||||
const uint TableSize=HUFF_TABLE_SIZE30;
|
||||
for (uint I=0;I<TableSize;)
|
||||
{
|
||||
if (Inp.InAddr>ReadTop-5)
|
||||
if (!UnpReadBuf30())
|
||||
return(false);
|
||||
uint Number=DecodeNumber(Inp,BlockTables.BD);
|
||||
if (Number<16)
|
||||
{
|
||||
Table[I]=(byte)((Number+this.UnpOldTable[I]) & 0xf);
|
||||
I++;
|
||||
}
|
||||
else
|
||||
if (Number<18)
|
||||
{
|
||||
uint N;
|
||||
if (Number==16)
|
||||
{
|
||||
N=(Inp.fgetbits() >> 13)+3;
|
||||
Inp.faddbits(3);
|
||||
}
|
||||
else
|
||||
{
|
||||
N=(Inp.fgetbits() >> 9)+11;
|
||||
Inp.faddbits(7);
|
||||
}
|
||||
if (I==0)
|
||||
return false; // We cannot have "repeat previous" code at the first position.
|
||||
else
|
||||
while (N-- > 0 && I<TableSize)
|
||||
{
|
||||
Table[I]=Table[I-1];
|
||||
I++;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
uint N;
|
||||
if (Number==18)
|
||||
{
|
||||
N=(Inp.fgetbits() >> 13)+3;
|
||||
Inp.faddbits(3);
|
||||
}
|
||||
else
|
||||
{
|
||||
N=(Inp.fgetbits() >> 9)+11;
|
||||
Inp.faddbits(7);
|
||||
}
|
||||
while (N-- > 0 && I<TableSize)
|
||||
Table[I++]=0;
|
||||
}
|
||||
}
|
||||
TablesRead3=true;
|
||||
if (Inp.InAddr>ReadTop)
|
||||
return false;
|
||||
MakeDecodeTables(Table,0,BlockTables.LD,NC30);
|
||||
MakeDecodeTables(Table,(int)NC30,BlockTables.DD,DC30);
|
||||
MakeDecodeTables(Table,(int)(NC30+DC30),BlockTables.LDD,LDC30);
|
||||
MakeDecodeTables(Table,(int)(NC30+DC30+LDC30),BlockTables.RD,RC30);
|
||||
//x memcpy(UnpOldTable,Table,sizeof(UnpOldTable));
|
||||
Array.Copy(Table,0,UnpOldTable,0,UnpOldTable.Length);
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void UnpInitData30(bool Solid)
|
||||
{
|
||||
if (!Solid)
|
||||
{
|
||||
TablesRead3=false;
|
||||
Utility.Memset(UnpOldTable, 0, UnpOldTable.Length);
|
||||
PPMEscChar=2;
|
||||
UnpBlockType=BLOCK_LZ;
|
||||
}
|
||||
InitFilters30(Solid);
|
||||
}
|
||||
|
||||
|
||||
void InitFilters30(bool Solid)
|
||||
{
|
||||
if (!Solid)
|
||||
{
|
||||
//OldFilterLengths.SoftReset();
|
||||
OldFilterLengths.Clear();
|
||||
LastFilter=0;
|
||||
|
||||
//for (size_t I=0;I<Filters30.Count;I++)
|
||||
// delete Filters30[I];
|
||||
//Filters30.SoftReset();
|
||||
Filters30.Clear();
|
||||
}
|
||||
//for (size_t I=0;I<PrgStack.Count;I++)
|
||||
// delete PrgStack[I];
|
||||
//PrgStack.SoftReset();
|
||||
PrgStack.Clear();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
*/
|
||||
@@ -222,6 +222,180 @@ internal partial class Unpack
|
||||
UnpWriteBuf();
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task Unpack5Async(
|
||||
bool Solid,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
FileExtracted = true;
|
||||
|
||||
if (!Suspended)
|
||||
{
|
||||
UnpInitData(Solid);
|
||||
if (!await UnpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
!ReadBlockHeader(Inp, ref BlockHeader)
|
||||
|| !ReadTables(Inp, ref BlockHeader, ref BlockTables)
|
||||
|| !TablesRead5
|
||||
)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
UnpPtr &= MaxWinMask;
|
||||
|
||||
if (Inp.InAddr >= ReadBorder)
|
||||
{
|
||||
var FileDone = false;
|
||||
|
||||
while (
|
||||
Inp.InAddr > BlockHeader.BlockStart + BlockHeader.BlockSize - 1
|
||||
|| Inp.InAddr == BlockHeader.BlockStart + BlockHeader.BlockSize - 1
|
||||
&& Inp.InBit >= BlockHeader.BlockBitSize
|
||||
)
|
||||
{
|
||||
if (BlockHeader.LastBlockInFile)
|
||||
{
|
||||
FileDone = true;
|
||||
break;
|
||||
}
|
||||
if (
|
||||
!ReadBlockHeader(Inp, ref BlockHeader)
|
||||
|| !ReadTables(Inp, ref BlockHeader, ref BlockTables)
|
||||
)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (FileDone || !await UnpReadBufAsync(cancellationToken).ConfigureAwait(false))
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (((WriteBorder - UnpPtr) & MaxWinMask) < MAX_LZ_MATCH + 3 && WriteBorder != UnpPtr)
|
||||
{
|
||||
await UnpWriteBufAsync(cancellationToken).ConfigureAwait(false);
|
||||
if (WrittenFileSize > DestUnpSize)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
uint MainSlot = DecodeNumber(Inp, BlockTables.LD);
|
||||
if (MainSlot < 256)
|
||||
{
|
||||
if (Fragmented)
|
||||
{
|
||||
FragWindow[UnpPtr++] = (byte)MainSlot;
|
||||
}
|
||||
else
|
||||
{
|
||||
Window[UnpPtr++] = (byte)MainSlot;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (MainSlot >= 262)
|
||||
{
|
||||
uint Length = SlotToLength(Inp, MainSlot - 262);
|
||||
|
||||
uint DBits,
|
||||
Distance = 1,
|
||||
DistSlot = DecodeNumber(Inp, BlockTables.DD);
|
||||
if (DistSlot < 4)
|
||||
{
|
||||
DBits = 0;
|
||||
Distance += DistSlot;
|
||||
}
|
||||
else
|
||||
{
|
||||
DBits = (DistSlot / 2) - 1;
|
||||
Distance += (2 | (DistSlot & 1)) << (int)DBits;
|
||||
}
|
||||
|
||||
if (DBits > 0)
|
||||
{
|
||||
if (DBits >= 4)
|
||||
{
|
||||
if (DBits > 4)
|
||||
{
|
||||
Distance += ((Inp.getbits() >> (int)(20 - DBits)) << 4);
|
||||
Inp.addbits(DBits - 4);
|
||||
}
|
||||
|
||||
uint LowDist = DecodeNumber(Inp, BlockTables.LDD);
|
||||
Distance += LowDist;
|
||||
}
|
||||
else
|
||||
{
|
||||
Distance += Inp.getbits() >> (int)(16 - DBits);
|
||||
Inp.addbits(DBits);
|
||||
}
|
||||
}
|
||||
|
||||
if (Distance > 0x100)
|
||||
{
|
||||
Length++;
|
||||
if (Distance > 0x2000)
|
||||
{
|
||||
Length++;
|
||||
if (Distance > 0x40000)
|
||||
{
|
||||
Length++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
InsertOldDist(Distance);
|
||||
LastLength = Length;
|
||||
CopyString(Length, Distance);
|
||||
continue;
|
||||
}
|
||||
if (MainSlot == 256)
|
||||
{
|
||||
var Filter = new UnpackFilter();
|
||||
if (!ReadFilter(Inp, Filter) || !AddFilter(Filter))
|
||||
{
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (MainSlot == 257)
|
||||
{
|
||||
if (LastLength != 0)
|
||||
{
|
||||
CopyString((uint)LastLength, (uint)OldDist[0]);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (MainSlot < 262)
|
||||
{
|
||||
uint DistNum = MainSlot - 258;
|
||||
uint Distance = (uint)OldDist[(int)DistNum];
|
||||
for (var I = (int)DistNum; I > 0; I--)
|
||||
{
|
||||
OldDist[I] = OldDist[I - 1];
|
||||
}
|
||||
OldDist[0] = Distance;
|
||||
|
||||
uint LengthSlot = DecodeNumber(Inp, BlockTables.RD);
|
||||
uint Length = SlotToLength(Inp, LengthSlot);
|
||||
LastLength = Length;
|
||||
CopyString(Length, Distance);
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
await UnpWriteBufAsync(cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
private uint ReadFilterData(BitInput Inp)
|
||||
{
|
||||
var ByteCount = (Inp.fgetbits() >> 14) + 1;
|
||||
@@ -339,6 +513,58 @@ internal partial class Unpack
|
||||
return ReadCode != -1;
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task<bool> UnpReadBufAsync(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var DataSize = ReadTop - Inp.InAddr; // Data left to process.
|
||||
if (DataSize < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
BlockHeader.BlockSize -= Inp.InAddr - BlockHeader.BlockStart;
|
||||
if (Inp.InAddr > MAX_SIZE / 2)
|
||||
{
|
||||
if (DataSize > 0)
|
||||
{
|
||||
Buffer.BlockCopy(Inp.InBuf, Inp.InAddr, Inp.InBuf, 0, DataSize);
|
||||
}
|
||||
|
||||
Inp.InAddr = 0;
|
||||
ReadTop = DataSize;
|
||||
}
|
||||
else
|
||||
{
|
||||
DataSize = ReadTop;
|
||||
}
|
||||
|
||||
var ReadCode = 0;
|
||||
if (MAX_SIZE != DataSize)
|
||||
{
|
||||
ReadCode = await UnpIO_UnpReadAsync(
|
||||
Inp.InBuf,
|
||||
DataSize,
|
||||
MAX_SIZE - DataSize,
|
||||
cancellationToken
|
||||
)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
|
||||
if (ReadCode > 0) // Can be also -1.
|
||||
{
|
||||
ReadTop += ReadCode;
|
||||
}
|
||||
|
||||
ReadBorder = ReadTop - 30;
|
||||
BlockHeader.BlockStart = Inp.InAddr;
|
||||
if (BlockHeader.BlockSize != -1) // '-1' means not defined yet.
|
||||
{
|
||||
ReadBorder = Math.Min(ReadBorder, BlockHeader.BlockStart + BlockHeader.BlockSize - 1);
|
||||
}
|
||||
return ReadCode != -1;
|
||||
}
|
||||
|
||||
private void UnpWriteBuf()
|
||||
{
|
||||
var WrittenBorder = WrPtr;
|
||||
@@ -533,6 +759,163 @@ internal partial class Unpack
|
||||
}
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task UnpWriteBufAsync(
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var WrittenBorder = WrPtr;
|
||||
var FullWriteSize = (UnpPtr - WrittenBorder) & MaxWinMask;
|
||||
var WriteSizeLeft = FullWriteSize;
|
||||
var NotAllFiltersProcessed = false;
|
||||
|
||||
for (var I = 0; I < Filters.Count; I++)
|
||||
{
|
||||
var flt = Filters[I];
|
||||
if (flt.Type == FILTER_NONE)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (flt.NextWindow)
|
||||
{
|
||||
if (((flt.BlockStart - WrPtr) & MaxWinMask) <= FullWriteSize)
|
||||
{
|
||||
flt.NextWindow = false;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
var BlockStart = flt.BlockStart;
|
||||
var BlockLength = flt.BlockLength;
|
||||
if (((BlockStart - WrittenBorder) & MaxWinMask) < WriteSizeLeft)
|
||||
{
|
||||
if (WrittenBorder != BlockStart)
|
||||
{
|
||||
await UnpWriteAreaAsync(WrittenBorder, BlockStart, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
WrittenBorder = BlockStart;
|
||||
WriteSizeLeft = (UnpPtr - WrittenBorder) & MaxWinMask;
|
||||
}
|
||||
if (BlockLength <= WriteSizeLeft)
|
||||
{
|
||||
if (BlockLength > 0)
|
||||
{
|
||||
var BlockEnd = (BlockStart + BlockLength) & MaxWinMask;
|
||||
|
||||
FilterSrcMemory = EnsureCapacity(
|
||||
FilterSrcMemory,
|
||||
checked((int)BlockLength)
|
||||
);
|
||||
var Mem = FilterSrcMemory;
|
||||
if (BlockStart < BlockEnd || BlockEnd == 0)
|
||||
{
|
||||
if (Fragmented)
|
||||
{
|
||||
FragWindow.CopyData(Mem, 0, BlockStart, BlockLength);
|
||||
}
|
||||
else
|
||||
{
|
||||
Buffer.BlockCopy(Window, (int)BlockStart, Mem, 0, (int)BlockLength);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
var FirstPartLength = MaxWinSize - BlockStart;
|
||||
if (Fragmented)
|
||||
{
|
||||
FragWindow.CopyData(Mem, 0, BlockStart, FirstPartLength);
|
||||
FragWindow.CopyData(Mem, FirstPartLength, 0, BlockEnd);
|
||||
}
|
||||
else
|
||||
{
|
||||
Buffer.BlockCopy(
|
||||
Window,
|
||||
(int)BlockStart,
|
||||
Mem,
|
||||
0,
|
||||
(int)FirstPartLength
|
||||
);
|
||||
Buffer.BlockCopy(
|
||||
Window,
|
||||
0,
|
||||
Mem,
|
||||
(int)FirstPartLength,
|
||||
(int)BlockEnd
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
var OutMem = ApplyFilter(Mem, BlockLength, flt);
|
||||
|
||||
Filters[I].Type = FILTER_NONE;
|
||||
|
||||
if (OutMem != null)
|
||||
{
|
||||
await UnpIO_UnpWriteAsync(OutMem, 0, BlockLength, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
WrittenFileSize += BlockLength;
|
||||
}
|
||||
|
||||
WrittenBorder = BlockEnd;
|
||||
WriteSizeLeft = (UnpPtr - WrittenBorder) & MaxWinMask;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
NotAllFiltersProcessed = true;
|
||||
for (var J = I; J < Filters.Count; J++)
|
||||
{
|
||||
var fltj = Filters[J];
|
||||
if (
|
||||
fltj.Type != FILTER_NONE
|
||||
&& fltj.NextWindow == false
|
||||
&& ((fltj.BlockStart - WrPtr) & MaxWinMask) < FullWriteSize
|
||||
)
|
||||
{
|
||||
fltj.NextWindow = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var EmptyCount = 0;
|
||||
for (var I = 0; I < Filters.Count; I++)
|
||||
{
|
||||
if (EmptyCount > 0)
|
||||
{
|
||||
Filters[I - EmptyCount] = Filters[I];
|
||||
}
|
||||
|
||||
if (Filters[I].Type == FILTER_NONE)
|
||||
{
|
||||
EmptyCount++;
|
||||
}
|
||||
}
|
||||
if (EmptyCount > 0)
|
||||
{
|
||||
Filters.RemoveRange(Filters.Count - EmptyCount, EmptyCount);
|
||||
}
|
||||
|
||||
if (!NotAllFiltersProcessed)
|
||||
{
|
||||
await UnpWriteAreaAsync(WrittenBorder, UnpPtr, cancellationToken).ConfigureAwait(false);
|
||||
WrPtr = UnpPtr;
|
||||
}
|
||||
|
||||
WriteBorder = (UnpPtr + Math.Min(MaxWinSize, UNPACK_MAX_WRITE)) & MaxWinMask;
|
||||
|
||||
if (
|
||||
WriteBorder == UnpPtr
|
||||
|| WrPtr != UnpPtr
|
||||
&& ((WrPtr - UnpPtr) & MaxWinMask) < ((WriteBorder - UnpPtr) & MaxWinMask)
|
||||
)
|
||||
{
|
||||
WriteBorder = WrPtr;
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] ApplyFilter(byte[] __d, uint DataSize, UnpackFilter Flt)
|
||||
{
|
||||
var Data = 0;
|
||||
@@ -664,6 +1047,48 @@ internal partial class Unpack
|
||||
}
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task UnpWriteAreaAsync(
|
||||
size_t StartPtr,
|
||||
size_t EndPtr,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (EndPtr != StartPtr)
|
||||
{
|
||||
UnpSomeRead = true;
|
||||
}
|
||||
|
||||
if (EndPtr < StartPtr)
|
||||
{
|
||||
UnpAllBuf = true;
|
||||
}
|
||||
|
||||
if (Fragmented)
|
||||
{
|
||||
var SizeToWrite = (EndPtr - StartPtr) & MaxWinMask;
|
||||
while (SizeToWrite > 0)
|
||||
{
|
||||
var BlockSize = FragWindow.GetBlockSize(StartPtr, SizeToWrite);
|
||||
FragWindow.GetBuffer(StartPtr, out var __buffer, out var __offset);
|
||||
await UnpWriteDataAsync(__buffer, __offset, BlockSize, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
SizeToWrite -= BlockSize;
|
||||
StartPtr = (StartPtr + BlockSize) & MaxWinMask;
|
||||
}
|
||||
}
|
||||
else if (EndPtr < StartPtr)
|
||||
{
|
||||
await UnpWriteDataAsync(Window, StartPtr, MaxWinSize - StartPtr, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
await UnpWriteDataAsync(Window, 0, EndPtr, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
else
|
||||
{
|
||||
await UnpWriteDataAsync(Window, StartPtr, EndPtr - StartPtr, cancellationToken)
|
||||
.ConfigureAwait(false);
|
||||
}
|
||||
}
|
||||
|
||||
private void UnpWriteData(byte[] Data, size_t offset, size_t Size)
|
||||
{
|
||||
if (WrittenFileSize >= DestUnpSize)
|
||||
@@ -682,6 +1107,29 @@ internal partial class Unpack
|
||||
WrittenFileSize += Size;
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task UnpWriteDataAsync(
|
||||
byte[] Data,
|
||||
size_t offset,
|
||||
size_t Size,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
if (WrittenFileSize >= DestUnpSize)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
var WriteSize = Size;
|
||||
var LeftToWrite = DestUnpSize - WrittenFileSize;
|
||||
if (WriteSize > LeftToWrite)
|
||||
{
|
||||
WriteSize = (size_t)LeftToWrite;
|
||||
}
|
||||
|
||||
await UnpIO_UnpWriteAsync(Data, offset, WriteSize, cancellationToken).ConfigureAwait(false);
|
||||
WrittenFileSize += Size;
|
||||
}
|
||||
|
||||
private void UnpInitData50(bool Solid)
|
||||
{
|
||||
if (!Solid)
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
#nullable disable
|
||||
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using SharpCompress.Common;
|
||||
using static SharpCompress.Compressors.Rar.UnpackV2017.PackDef;
|
||||
using static SharpCompress.Compressors.Rar.UnpackV2017.UnpackGlobal;
|
||||
#if !Rar2017_64bit
|
||||
using size_t = System.UInt32;
|
||||
#else
|
||||
using nint = System.Int64;
|
||||
using nuint = System.UInt64;
|
||||
using size_t = System.UInt64;
|
||||
#endif
|
||||
|
||||
namespace SharpCompress.Compressors.Rar.UnpackV2017;
|
||||
|
||||
@@ -42,11 +37,9 @@ internal sealed partial class Unpack : BitInput
|
||||
// It prevents crash if first DoUnpack call is later made with wrong
|
||||
// (true) 'Solid' value.
|
||||
UnpInitData(false);
|
||||
#if !RarV2017_SFX_MODULE
|
||||
// RAR 1.5 decompression initialization
|
||||
UnpInitData15(false);
|
||||
InitHuff();
|
||||
#endif
|
||||
}
|
||||
|
||||
// later: may need Dispose() if we support thread pool
|
||||
@@ -110,7 +103,7 @@ internal sealed partial class Unpack : BitInput
|
||||
throw new InvalidFormatException("Grow && Fragmented");
|
||||
}
|
||||
|
||||
var NewWindow = Fragmented ? null : new byte[WinSize];
|
||||
var NewWindow = Fragmented ? null : ArrayPool<byte>.Shared.Rent((int)WinSize);
|
||||
|
||||
if (NewWindow == null)
|
||||
{
|
||||
@@ -126,6 +119,7 @@ internal sealed partial class Unpack : BitInput
|
||||
if (Window != null) // If allocated by preceding files.
|
||||
{
|
||||
//free(Window);
|
||||
ArrayPool<byte>.Shared.Return(Window);
|
||||
Window = null;
|
||||
}
|
||||
FragWindow.Init(WinSize);
|
||||
@@ -170,7 +164,6 @@ internal sealed partial class Unpack : BitInput
|
||||
// just for extra safety.
|
||||
switch (Method)
|
||||
{
|
||||
#if !RarV2017_SFX_MODULE
|
||||
case 15: // rar 1.5 compression
|
||||
if (!Fragmented)
|
||||
{
|
||||
@@ -186,33 +179,64 @@ internal sealed partial class Unpack : BitInput
|
||||
}
|
||||
|
||||
break;
|
||||
#endif
|
||||
#if !RarV2017_RAR5ONLY
|
||||
case 29: // rar 3.x compression
|
||||
if (!Fragmented)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
break;
|
||||
case 50: // RAR 5.0 compression algorithm.
|
||||
Unpack5(Solid);
|
||||
break;
|
||||
#if !Rar2017_NOSTRICT
|
||||
default:
|
||||
throw new InvalidFormatException("unknown compression method " + Method);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
private async System.Threading.Tasks.Task DoUnpackAsync(
|
||||
uint Method,
|
||||
bool Solid,
|
||||
System.Threading.CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
// Methods <50 will crash in Fragmented mode when accessing NULL Window.
|
||||
// They cannot be called in such mode now, but we check it below anyway
|
||||
// just for extra safety.
|
||||
switch (Method)
|
||||
{
|
||||
#if !RarV2017_SFX_MODULE
|
||||
case 15: // rar 1.5 compression
|
||||
if (!Fragmented)
|
||||
{
|
||||
await Unpack15Async(Solid, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
break;
|
||||
case 20: // rar 2.x compression
|
||||
case 26: // files larger than 2GB
|
||||
if (!Fragmented)
|
||||
{
|
||||
await Unpack20Async(Solid, cancellationToken).ConfigureAwait(false);
|
||||
}
|
||||
|
||||
break;
|
||||
#endif
|
||||
#if !RarV2017_RAR5ONLY
|
||||
case 29: // rar 3.x compression
|
||||
if (!Fragmented)
|
||||
{
|
||||
// TODO: Create Unpack29Async when ready
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
break;
|
||||
#endif
|
||||
case 50: // RAR 5.0 compression algorithm.
|
||||
/*#if RarV2017_RAR_SMP
|
||||
if (MaxUserThreads > 1)
|
||||
{
|
||||
// We do not use the multithreaded unpack routine to repack RAR archives
|
||||
// in 'suspended' mode, because unlike the single threaded code it can
|
||||
// write more than one dictionary for same loop pass. So we would need
|
||||
// larger buffers of unknown size. Also we do not support multithreading
|
||||
// in fragmented window mode.
|
||||
if (!Fragmented)
|
||||
{
|
||||
Unpack5MT(Solid);
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif*/
|
||||
Unpack5(Solid);
|
||||
// RAR 5.0 has full async support via UnpReadBufAsync and UnpWriteBuf
|
||||
await Unpack5Async(Solid, cancellationToken).ConfigureAwait(false);
|
||||
break;
|
||||
#if !Rar2017_NOSTRICT
|
||||
default:
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
using SharpCompress.Compressors.RLE90;
|
||||
using SharpCompress.IO;
|
||||
|
||||
namespace SharpCompress.Compressors.Squeezed
|
||||
{
|
||||
[CLSCompliant(true)]
|
||||
public class SqueezeStream : Stream, IStreamStack
|
||||
{
|
||||
#if DEBUG_STREAMS
|
||||
@@ -35,12 +34,15 @@ namespace SharpCompress.Compressors.Squeezed
|
||||
private readonly int _compressedSize;
|
||||
private const int NUMVALS = 257;
|
||||
private const int SPEOF = 256;
|
||||
private bool _processed = false;
|
||||
|
||||
private Stream _decodedStream;
|
||||
|
||||
public SqueezeStream(Stream stream, int compressedSize)
|
||||
{
|
||||
_stream = stream;
|
||||
_stream = stream ?? throw new ArgumentNullException(nameof(stream));
|
||||
_compressedSize = compressedSize;
|
||||
_decodedStream = BuildDecodedStream();
|
||||
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugConstruct(typeof(SqueezeStream));
|
||||
#endif
|
||||
@@ -51,52 +53,46 @@ namespace SharpCompress.Compressors.Squeezed
|
||||
#if DEBUG_STREAMS
|
||||
this.DebugDispose(typeof(SqueezeStream));
|
||||
#endif
|
||||
_decodedStream?.Dispose();
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
|
||||
public override bool CanSeek => false;
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length => throw new NotImplementedException();
|
||||
|
||||
public override long Length => throw new NotSupportedException();
|
||||
public override long Position
|
||||
{
|
||||
get => _stream.Position;
|
||||
set => throw new NotImplementedException();
|
||||
get => throw new NotSupportedException();
|
||||
set => throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush() => throw new NotImplementedException();
|
||||
public override void Flush() => throw new NotSupportedException();
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotSupportedException();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (_processed)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
_processed = true;
|
||||
using var binaryReader = new BinaryReader(_stream);
|
||||
return _decodedStream.Read(buffer, offset, count);
|
||||
}
|
||||
|
||||
// Read numnodes (equivalent to convert_u16!(numnodes, buf))
|
||||
var numnodes = binaryReader.ReadUInt16();
|
||||
private Stream BuildDecodedStream()
|
||||
{
|
||||
var binaryReader = new BinaryReader(_stream, Encoding.Default, leaveOpen: true);
|
||||
int numnodes = binaryReader.ReadUInt16();
|
||||
|
||||
// Validation: numnodes should be within bounds
|
||||
if (numnodes >= NUMVALS)
|
||||
if (numnodes >= NUMVALS || numnodes == 0)
|
||||
{
|
||||
throw new InvalidDataException(
|
||||
$"Invalid number of nodes {numnodes} (max {NUMVALS - 1})"
|
||||
);
|
||||
return new MemoryStream(Array.Empty<byte>());
|
||||
}
|
||||
|
||||
// Handle the case where no nodes exist
|
||||
if (numnodes == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Build dnode (tree of nodes)
|
||||
var dnode = new int[numnodes, 2];
|
||||
for (int j = 0; j < numnodes; j++)
|
||||
{
|
||||
@@ -104,42 +100,27 @@ namespace SharpCompress.Compressors.Squeezed
|
||||
dnode[j, 1] = binaryReader.ReadInt16();
|
||||
}
|
||||
|
||||
// Initialize BitReader for reading bits
|
||||
var bitReader = new BitReader(_stream);
|
||||
var decoded = new List<byte>();
|
||||
|
||||
var huffmanDecoded = new MemoryStream();
|
||||
int i = 0;
|
||||
// Decode the buffer using the dnode tree
|
||||
|
||||
while (true)
|
||||
{
|
||||
i = dnode[i, bitReader.ReadBit() ? 1 : 0];
|
||||
if (i < 0)
|
||||
{
|
||||
i = (short)-(i + 1);
|
||||
i = -(i + 1);
|
||||
if (i == SPEOF)
|
||||
{
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
decoded.Add((byte)i);
|
||||
i = 0;
|
||||
}
|
||||
huffmanDecoded.WriteByte((byte)i);
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack the decoded buffer using the RLE class
|
||||
var unpacked = RLE.UnpackRLE(decoded.ToArray());
|
||||
unpacked.CopyTo(buffer, 0);
|
||||
return unpacked.Count();
|
||||
huffmanDecoded.Position = 0;
|
||||
return new RunLength90Stream(huffmanDecoded, (int)huffmanDecoded.Length);
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin) =>
|
||||
throw new NotImplementedException();
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count) =>
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace SharpCompress.Compressors.Xz;
|
||||
|
||||
@@ -30,6 +32,28 @@ public static class BinaryUtils
|
||||
internal static uint ReadLittleEndianUInt32(this Stream stream) =>
|
||||
unchecked((uint)ReadLittleEndianInt32(stream));
|
||||
|
||||
public static async Task<int> ReadLittleEndianInt32Async(
|
||||
this Stream stream,
|
||||
CancellationToken cancellationToken = default
|
||||
)
|
||||
{
|
||||
var bytes = new byte[4];
|
||||
var read = await stream.ReadFullyAsync(bytes, cancellationToken).ConfigureAwait(false);
|
||||
if (!read)
|
||||
{
|
||||
throw new EndOfStreamException();
|
||||
}
|
||||
return BinaryPrimitives.ReadInt32LittleEndian(bytes);
|
||||
}
|
||||
|
||||
internal static async Task<uint> ReadLittleEndianUInt32Async(
|
||||
this Stream stream,
|
||||
CancellationToken cancellationToken = default
|
||||
) =>
|
||||
unchecked(
|
||||
(uint)await ReadLittleEndianInt32Async(stream, cancellationToken).ConfigureAwait(false)
|
||||
);
|
||||
|
||||
internal static byte[] ToBigEndianBytes(this uint uint32)
|
||||
{
|
||||
var result = BitConverter.GetBytes(uint32);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user