Do not search for floppy mode page when mode sense returned no pages. Fixes #242

This commit is contained in:
2019-10-26 01:57:55 +01:00
parent efeaae56c0
commit 45d15b410d
2 changed files with 371 additions and 345 deletions

View File

@@ -67,14 +67,14 @@ namespace DiscImageChef.Core.Devices.Dumping
bool sense;
byte scsiMediumType = 0;
byte scsiDensityCode = 0;
bool containsFloppyPage = false;
var containsFloppyPage = false;
const ushort SBC_PROFILE = 0x0001;
DateTime start;
DateTime end;
double totalDuration = 0;
double currentSpeed = 0;
double maxSpeed = double.MinValue;
double minSpeed = double.MaxValue;
var maxSpeed = double.MinValue;
var minSpeed = double.MaxValue;
byte[] readBuffer;
Modes.DecodedMode? decMode = null;
@@ -89,9 +89,9 @@ namespace DiscImageChef.Core.Devices.Dumping
}
dumpLog.WriteLine("Initializing reader.");
Reader scsiReader = new Reader(dev, dev.Timeout, null, dumpRaw);
ulong blocks = scsiReader.GetDeviceBlocks();
uint blockSize = scsiReader.LogicalBlockSize;
var scsiReader = new Reader(dev, dev.Timeout, null, dumpRaw);
var blocks = scsiReader.GetDeviceBlocks();
var blockSize = scsiReader.LogicalBlockSize;
if (scsiReader.FindReadCommand())
{
dumpLog.WriteLine("ERROR: Cannot find correct read command: {0}.", scsiReader.ErrorMessage);
@@ -103,7 +103,8 @@ namespace DiscImageChef.Core.Devices.Dumping
{
blocks++;
UpdateStatus
?.Invoke($"Media has {blocks} blocks of {blockSize} bytes/each. (for a total of {blocks * (ulong)blockSize} bytes)");
?.Invoke(
$"Media has {blocks} blocks of {blockSize} bytes/each. (for a total of {blocks * (ulong) blockSize} bytes)");
}
// Check how many blocks to read, if error show and return
@@ -114,9 +115,9 @@ namespace DiscImageChef.Core.Devices.Dumping
return;
}
uint blocksToRead = scsiReader.BlocksToRead;
uint logicalBlockSize = blockSize;
uint physicalBlockSize = scsiReader.PhysicalBlockSize;
var blocksToRead = scsiReader.BlocksToRead;
var logicalBlockSize = blockSize;
var physicalBlockSize = scsiReader.PhysicalBlockSize;
if (blocks == 0)
{
@@ -133,7 +134,7 @@ namespace DiscImageChef.Core.Devices.Dumping
if (dev.Type == DeviceType.ATAPI) mediaTags.Add(MediaTagType.ATAPI_IDENTIFY, null);
if (dev.IsPcmcia && dev.Cis != null) mediaTags.Add(MediaTagType.PCMCIA_CIS, null);
sense = dev.ScsiInquiry(out byte[] cmdBuf, out _);
sense = dev.ScsiInquiry(out var cmdBuf, out _);
mediaTags.Add(MediaTagType.SCSI_INQUIRY, cmdBuf);
if (!sense)
{
@@ -175,7 +176,7 @@ namespace DiscImageChef.Core.Devices.Dumping
decMode.Value.Header.BlockDescriptors.Length >= 1)
scsiDensityCode = (byte) decMode.Value.Header.BlockDescriptors[0].Density;
containsFloppyPage =
containsFloppyPage = decMode.Value.Pages != null &&
decMode.Value.Pages.Aggregate(containsFloppyPage,
(current, modePage) => current | (modePage.Page == 0x05));
}
@@ -208,7 +209,7 @@ namespace DiscImageChef.Core.Devices.Dumping
dumpLog.WriteLine("SCSI floppy mode page present: {0}.", containsFloppyPage);
dumpLog.WriteLine("Media identified as {0}.", dskType);
uint longBlockSize = scsiReader.LongBlockSize;
var longBlockSize = scsiReader.LongBlockSize;
if (dumpRaw)
if (blockSize == longBlockSize)
@@ -220,7 +221,8 @@ namespace DiscImageChef.Core.Devices.Dumping
if (!force)
{
StoppingErrorMessage
?.Invoke("Not continuing. If you want to continue reading cooked data when raw is not available use the force option.");
?.Invoke(
"Not continuing. If you want to continue reading cooked data when raw is not available use the force option.");
// TODO: Exit more gracefully
return;
}
@@ -232,14 +234,15 @@ namespace DiscImageChef.Core.Devices.Dumping
// Only a block will be read, but it contains 16 sectors and command expect sector number not block number
blocksToRead = (uint) (longBlockSize == 37856 ? 16 : 1);
UpdateStatus
?.Invoke($"Reading {longBlockSize} raw bytes ({blockSize * blocksToRead} cooked bytes) per sector.");
?.Invoke(
$"Reading {longBlockSize} raw bytes ({blockSize * blocksToRead} cooked bytes) per sector.");
physicalBlockSize = longBlockSize;
blockSize = longBlockSize;
}
bool ret = true;
var ret = true;
foreach(MediaTagType tag in mediaTags.Keys)
foreach (var tag in mediaTags.Keys)
{
if (outputPlugin.SupportedMediaTags.Contains(tag)) continue;
@@ -266,8 +269,8 @@ namespace DiscImageChef.Core.Devices.Dumping
UpdateStatus?.Invoke($"Reading {blocksToRead} sectors at a time.");
dumpLog.WriteLine("Reading {0} sectors at a time.", blocksToRead);
MhddLog mhddLog = new MhddLog(outputPrefix + ".mhddlog.bin", dev, blocks, blockSize, blocksToRead);
IbgLog ibgLog = new IbgLog(outputPrefix + ".ibg", SBC_PROFILE);
var mhddLog = new MhddLog(outputPrefix + ".mhddlog.bin", dev, blocks, blockSize, blocksToRead);
var ibgLog = new IbgLog(outputPrefix + ".ibg", SBC_PROFILE);
ret = outputPlugin.Create(outputPath, dskType, formatOptions, blocks, blockSize);
// Cannot create image
@@ -286,6 +289,7 @@ namespace DiscImageChef.Core.Devices.Dumping
if (opticalDisc)
{
if (outputPlugin is IWritableOpticalImage opticalPlugin)
{
opticalPlugin.SetTracks(new List<Track>
{
new Track
@@ -299,6 +303,7 @@ namespace DiscImageChef.Core.Devices.Dumping
TrackType = TrackType.Data
}
});
}
else
{
dumpLog.WriteLine("The specified plugin does not support storing optical disc images..");
@@ -308,19 +313,20 @@ namespace DiscImageChef.Core.Devices.Dumping
}
else if (decMode.HasValue)
{
bool setGeometry = false;
var setGeometry = false;
foreach(Modes.ModePage page in decMode.Value.Pages)
foreach (var page in decMode.Value.Pages)
if (page.Page == 0x04 && page.Subpage == 0x00)
{
Modes.ModePage_04? rigidPage = Modes.DecodeModePage_04(page.PageResponse);
var rigidPage = Modes.DecodeModePage_04(page.PageResponse);
if (!rigidPage.HasValue || setGeometry) continue;
dumpLog.WriteLine("Setting geometry to {0} cylinders, {1} heads, {2} sectors per track",
rigidPage.Value.Cylinders, rigidPage.Value.Heads,
(uint) (blocks / (rigidPage.Value.Cylinders * rigidPage.Value.Heads)));
UpdateStatus
?.Invoke($"Setting geometry to {rigidPage.Value.Cylinders} cylinders, {rigidPage.Value.Heads} heads, {(uint)(blocks / (rigidPage.Value.Cylinders * rigidPage.Value.Heads))} sectors per track");
?.Invoke(
$"Setting geometry to {rigidPage.Value.Cylinders} cylinders, {rigidPage.Value.Heads} heads, {(uint) (blocks / (rigidPage.Value.Cylinders * rigidPage.Value.Heads))} sectors per track");
outputPlugin.SetGeometry(rigidPage.Value.Cylinders, rigidPage.Value.Heads,
(uint) (blocks / (rigidPage.Value.Cylinders * rigidPage.Value.Heads)));
@@ -328,14 +334,15 @@ namespace DiscImageChef.Core.Devices.Dumping
}
else if (page.Page == 0x05 && page.Subpage == 0x00)
{
Modes.ModePage_05? flexiblePage = Modes.DecodeModePage_05(page.PageResponse);
var flexiblePage = Modes.DecodeModePage_05(page.PageResponse);
if (!flexiblePage.HasValue) continue;
dumpLog.WriteLine("Setting geometry to {0} cylinders, {1} heads, {2} sectors per track",
flexiblePage.Value.Cylinders, flexiblePage.Value.Heads,
flexiblePage.Value.SectorsPerTrack);
UpdateStatus
?.Invoke($"Setting geometry to {flexiblePage.Value.Cylinders} cylinders, {flexiblePage.Value.Heads} heads, {flexiblePage.Value.SectorsPerTrack} sectors per track");
?.Invoke(
$"Setting geometry to {flexiblePage.Value.Cylinders} cylinders, {flexiblePage.Value.Heads} heads, {flexiblePage.Value.SectorsPerTrack} sectors per track");
outputPlugin.SetGeometry(flexiblePage.Value.Cylinders, flexiblePage.Value.Heads,
flexiblePage.Value.SectorsPerTrack);
setGeometry = true;
@@ -358,11 +365,11 @@ namespace DiscImageChef.Core.Devices.Dumping
dumpLog.WriteLine("Resuming from block {0}.", resume.NextBlock);
}
bool newTrim = false;
DateTime timeSpeedStart = DateTime.UtcNow;
var newTrim = false;
var timeSpeedStart = DateTime.UtcNow;
ulong sectorSpeedStart = 0;
InitProgress?.Invoke();
for(ulong i = resume.NextBlock; i < blocks; i += blocksToRead)
for (var i = resume.NextBlock; i < blocks; i += blocksToRead)
{
if (aborted)
{
@@ -382,14 +389,14 @@ namespace DiscImageChef.Core.Devices.Dumping
UpdateProgress?.Invoke($"Reading sector {i} of {blocks} ({currentSpeed:F3} MiB/sec.)", (long) i,
(long) blocks);
sense = scsiReader.ReadBlocks(out readBuffer, i, blocksToRead, out double cmdDuration);
sense = scsiReader.ReadBlocks(out readBuffer, i, blocksToRead, out var cmdDuration);
totalDuration += cmdDuration;
if (!sense && !dev.Error)
{
mhddLog.Write(i, cmdDuration);
ibgLog.Write(i, currentSpeed * 1024);
DateTime writeStart = DateTime.Now;
var writeStart = DateTime.Now;
outputPlugin.WriteSectors(readBuffer, i, blocksToRead);
imageWriteDuration += (DateTime.Now - writeStart).TotalSeconds;
extents.Add(i, blocksToRead, true);
@@ -402,11 +409,11 @@ namespace DiscImageChef.Core.Devices.Dumping
if (i + skip > blocks) skip = (uint) (blocks - i);
// Write empty data
DateTime writeStart = DateTime.Now;
var writeStart = DateTime.Now;
outputPlugin.WriteSectors(new byte[blockSize * skip], i, skip);
imageWriteDuration += (DateTime.Now - writeStart).TotalSeconds;
for(ulong b = i; b < i + skip; b++) resume.BadBlocks.Add(b);
for (var b = i; b < i + skip; b++) resume.BadBlocks.Add(b);
mhddLog.Write(i, cmdDuration < 500 ? 65535 : cmdDuration);
@@ -419,7 +426,7 @@ namespace DiscImageChef.Core.Devices.Dumping
sectorSpeedStart += blocksToRead;
resume.NextBlock = i + blocksToRead;
double elapsed = (DateTime.UtcNow - timeSpeedStart).TotalSeconds;
var elapsed = (DateTime.UtcNow - timeSpeedStart).TotalSeconds;
if (elapsed < 1) continue;
currentSpeed = sectorSpeedStart * blockSize / (1048576 * elapsed);
@@ -435,9 +442,11 @@ namespace DiscImageChef.Core.Devices.Dumping
devicePath);
UpdateStatus?.Invoke($"Dump finished in {(end - start).TotalSeconds} seconds.");
UpdateStatus
?.Invoke($"Average dump speed {(double)blockSize * (double)(blocks + 1) / 1024 / (totalDuration / 1000):F3} KiB/sec.");
?.Invoke(
$"Average dump speed {(double) blockSize * (double) (blocks + 1) / 1024 / (totalDuration / 1000):F3} KiB/sec.");
UpdateStatus
?.Invoke($"Average write speed {(double)blockSize * (double)(blocks + 1) / 1024 / imageWriteDuration:F3} KiB/sec.");
?.Invoke(
$"Average write speed {(double) blockSize * (double) (blocks + 1) / 1024 / imageWriteDuration:F3} KiB/sec.");
dumpLog.WriteLine("Dump finished in {0} seconds.", (end - start).TotalSeconds);
dumpLog.WriteLine("Average dump speed {0:F3} KiB/sec.",
(double) blockSize * (double) (blocks + 1) / 1024 / (totalDuration / 1000));
@@ -445,15 +454,16 @@ namespace DiscImageChef.Core.Devices.Dumping
(double) blockSize * (double) (blocks + 1) / 1024 / imageWriteDuration);
#region Trimming
if (resume.BadBlocks.Count > 0 && !aborted && !notrim && newTrim)
{
start = DateTime.UtcNow;
UpdateStatus?.Invoke("Trimming bad sectors");
dumpLog.WriteLine("Trimming bad sectors");
ulong[] tmpArray = resume.BadBlocks.ToArray();
var tmpArray = resume.BadBlocks.ToArray();
InitProgress?.Invoke();
foreach(ulong badSector in tmpArray)
foreach (var badSector in tmpArray)
{
if (aborted)
{
@@ -465,7 +475,7 @@ namespace DiscImageChef.Core.Devices.Dumping
PulseProgress?.Invoke($"Trimming sector {badSector}");
sense = scsiReader.ReadBlock(out readBuffer, badSector, out double cmdDuration);
sense = scsiReader.ReadBlock(out readBuffer, badSector, out var cmdDuration);
if (sense || dev.Error) continue;
@@ -479,14 +489,16 @@ namespace DiscImageChef.Core.Devices.Dumping
UpdateStatus?.Invoke($"Trimmming finished in {(end - start).TotalSeconds} seconds.");
dumpLog.WriteLine("Trimmming finished in {0} seconds.", (end - start).TotalSeconds);
}
#endregion Trimming
#region Error handling
if (resume.BadBlocks.Count > 0 && !aborted && retryPasses > 0)
{
int pass = 1;
bool forward = true;
bool runningPersistent = false;
var pass = 1;
var forward = true;
var runningPersistent = false;
Modes.ModePage? currentModePage = null;
byte[] md6;
@@ -506,20 +518,20 @@ namespace DiscImageChef.Core.Devices.Dumping
if (!sense)
{
Modes.DecodedMode? dcMode10 = Modes.DecodeMode10(readBuffer, dev.ScsiType);
var dcMode10 = Modes.DecodeMode10(readBuffer, dev.ScsiType);
if (dcMode10.HasValue)
foreach(Modes.ModePage modePage in dcMode10.Value.Pages)
foreach (var modePage in dcMode10.Value.Pages)
if (modePage.Page == 0x01 && modePage.Subpage == 0x00)
currentModePage = modePage;
}
}
else
{
Modes.DecodedMode? dcMode6 = Modes.DecodeMode6(readBuffer, dev.ScsiType);
var dcMode6 = Modes.DecodeMode6(readBuffer, dev.ScsiType);
if (dcMode6.HasValue)
foreach(Modes.ModePage modePage in dcMode6.Value.Pages)
foreach (var modePage in dcMode6.Value.Pages)
if (modePage.Page == 0x01 && modePage.Subpage == 0x00)
currentModePage = modePage;
}
@@ -560,7 +572,7 @@ namespace DiscImageChef.Core.Devices.Dumping
if (dev.ScsiType == PeripheralDeviceTypes.MultiMediaDevice)
{
pgMmc = new Modes.ModePage_01_MMC {PS = false, ReadRetryCount = 255, Parameter = 0x20};
Modes.DecodedMode md = new Modes.DecodedMode
var md = new Modes.DecodedMode
{
Header = new Modes.ModeHeader(),
Pages = new[]
@@ -591,7 +603,7 @@ namespace DiscImageChef.Core.Devices.Dumping
DCR = false,
ReadRetryCount = 255
};
Modes.DecodedMode md = new Modes.DecodedMode
var md = new Modes.DecodedMode
{
Header = new Modes.ModeHeader(),
Pages = new[]
@@ -608,23 +620,28 @@ namespace DiscImageChef.Core.Devices.Dumping
UpdateStatus?.Invoke("Sending MODE SELECT to drive (return damaged blocks).");
dumpLog.WriteLine("Sending MODE SELECT to drive (return damaged blocks).");
sense = dev.ModeSelect(md6, out byte[] senseBuf, true, false, dev.Timeout, out _);
sense = dev.ModeSelect(md6, out var senseBuf, true, false, dev.Timeout, out _);
if (sense) sense = dev.ModeSelect10(md10, out senseBuf, true, false, dev.Timeout, out _);
if (sense)
{
UpdateStatus
?.Invoke("Drive did not accept MODE SELECT command for persistent error reading, try another drive.");
?.Invoke(
"Drive did not accept MODE SELECT command for persistent error reading, try another drive.");
DicConsole.DebugWriteLine("Error: {0}", Sense.PrettifySense(senseBuf));
dumpLog.WriteLine("Drive did not accept MODE SELECT command for persistent error reading, try another drive.");
dumpLog.WriteLine(
"Drive did not accept MODE SELECT command for persistent error reading, try another drive.");
}
else
{
runningPersistent = true;
}
else runningPersistent = true;
}
InitProgress?.Invoke();
repeatRetry:
ulong[] tmpArray = resume.BadBlocks.ToArray();
foreach(ulong badSector in tmpArray)
var tmpArray = resume.BadBlocks.ToArray();
foreach (var badSector in tmpArray)
{
if (aborted)
{
@@ -638,7 +655,7 @@ namespace DiscImageChef.Core.Devices.Dumping
forward ? "forward" : "reverse",
runningPersistent ? "recovering partial data, " : ""));
sense = scsiReader.ReadBlock(out readBuffer, badSector, out double cmdDuration);
sense = scsiReader.ReadBlock(out readBuffer, badSector, out var cmdDuration);
totalDuration += cmdDuration;
if (!sense && !dev.Error)
@@ -649,7 +666,10 @@ namespace DiscImageChef.Core.Devices.Dumping
UpdateStatus?.Invoke($"Correctly retried block {badSector} in pass {pass}.");
dumpLog.WriteLine("Correctly retried block {0} in pass {1}.", badSector, pass);
}
else if(runningPersistent) outputPlugin.WriteSector(readBuffer, badSector);
else if (runningPersistent)
{
outputPlugin.WriteSector(readBuffer, badSector);
}
}
if (pass < retryPasses && !aborted && resume.BadBlocks.Count > 0)
@@ -663,7 +683,7 @@ namespace DiscImageChef.Core.Devices.Dumping
if (runningPersistent && currentModePage.HasValue)
{
Modes.DecodedMode md = new Modes.DecodedMode
var md = new Modes.DecodedMode
{
Header = new Modes.ModeHeader(), Pages = new[] {currentModePage.Value}
};
@@ -678,11 +698,13 @@ namespace DiscImageChef.Core.Devices.Dumping
EndProgress?.Invoke();
}
#endregion Error handling
if (!aborted)
if (opticalDisc)
foreach(KeyValuePair<MediaTagType, byte[]> tag in mediaTags)
{
foreach (var tag in mediaTags)
{
if (tag.Value is null)
{
@@ -700,6 +722,7 @@ namespace DiscImageChef.Core.Devices.Dumping
outputPlugin.ErrorMessage);
return;
}
}
else
{
if (!dev.IsRemovable || dev.IsUsb)
@@ -810,16 +833,16 @@ namespace DiscImageChef.Core.Devices.Dumping
}
resume.BadBlocks.Sort();
foreach(ulong bad in resume.BadBlocks) dumpLog.WriteLine("Sector {0} could not be read.", bad);
foreach (var bad in resume.BadBlocks) dumpLog.WriteLine("Sector {0} could not be read.", bad);
currentTry.Extents = ExtentsConverter.ToMetadata(extents);
outputPlugin.SetDumpHardware(resume.Tries);
if (preSidecar != null) outputPlugin.SetCicmMetadata(preSidecar);
dumpLog.WriteLine("Closing output file.");
UpdateStatus?.Invoke("Closing output file.");
DateTime closeStart = DateTime.Now;
var closeStart = DateTime.Now;
outputPlugin.Close();
DateTime closeEnd = DateTime.Now;
var closeEnd = DateTime.Now;
UpdateStatus?.Invoke($"Closed in {(closeEnd - closeStart).TotalSeconds} seconds.");
dumpLog.WriteLine("Closed in {0} seconds.", (closeEnd - closeStart).TotalSeconds);
@@ -835,16 +858,16 @@ namespace DiscImageChef.Core.Devices.Dumping
{
UpdateStatus?.Invoke("Creating sidecar.");
dumpLog.WriteLine("Creating sidecar.");
FiltersList filters = new FiltersList();
IFilter filter = filters.GetFilter(outputPath);
IMediaImage inputPlugin = ImageFormat.Detect(filter);
var filters = new FiltersList();
var filter = filters.GetFilter(outputPath);
var inputPlugin = ImageFormat.Detect(filter);
if (!inputPlugin.Open(filter))
{
StoppingErrorMessage?.Invoke("Could not open created image.");
return;
}
DateTime chkStart = DateTime.UtcNow;
var chkStart = DateTime.UtcNow;
sidecarClass = new Sidecar(inputPlugin, outputPath, filter.Id, encoding);
sidecarClass.InitProgressEvent += InitProgress;
sidecarClass.UpdateProgressEvent += UpdateProgress;
@@ -853,13 +876,14 @@ namespace DiscImageChef.Core.Devices.Dumping
sidecarClass.UpdateProgressEvent2 += UpdateProgress2;
sidecarClass.EndProgressEvent2 += EndProgress2;
sidecarClass.UpdateStatusEvent += UpdateStatus;
CICMMetadataType sidecar = sidecarClass.Create();
var sidecar = sidecarClass.Create();
end = DateTime.UtcNow;
totalChkDuration = (end - chkStart).TotalMilliseconds;
UpdateStatus?.Invoke($"Sidecar created in {(end - chkStart).TotalSeconds} seconds.");
UpdateStatus
?.Invoke($"Average checksum speed {(double)blockSize * (double)(blocks + 1) / 1024 / (totalChkDuration / 1000):F3} KiB/sec.");
?.Invoke(
$"Average checksum speed {(double) blockSize * (double) (blocks + 1) / 1024 / (totalChkDuration / 1000):F3} KiB/sec.");
dumpLog.WriteLine("Sidecar created in {0} seconds.", (end - chkStart).TotalSeconds);
dumpLog.WriteLine("Average checksum speed {0:F3} KiB/sec.",
(double) blockSize * (double) (blocks + 1) / 1024 / (totalChkDuration / 1000));
@@ -872,7 +896,7 @@ namespace DiscImageChef.Core.Devices.Dumping
sidecar = preSidecar;
}
List<(ulong start, string type)> filesystems = new List<(ulong start, string type)>();
var filesystems = new List<(ulong start, string type)>();
if (sidecar.OpticalDisc[0].Track != null)
filesystems.AddRange(from xmlTrack in sidecar.OpticalDisc[0].Track
where xmlTrack.FileSystemInformation != null
@@ -890,13 +914,13 @@ namespace DiscImageChef.Core.Devices.Dumping
// TODO: Implement layers
sidecar.OpticalDisc[0].Dimensions = Dimensions.DimensionsFromMediaType(dskType);
CommonTypes.Metadata.MediaType.MediaTypeToString(dskType, out string xmlDskTyp,
out string xmlDskSubTyp);
CommonTypes.Metadata.MediaType.MediaTypeToString(dskType, out var xmlDskTyp,
out var xmlDskSubTyp);
sidecar.OpticalDisc[0].DiscType = xmlDskTyp;
sidecar.OpticalDisc[0].DiscSubType = xmlDskSubTyp;
sidecar.OpticalDisc[0].DumpHardwareArray = resume.Tries.ToArray();
foreach(KeyValuePair<MediaTagType, byte[]> tag in mediaTags)
foreach (var tag in mediaTags)
if (outputPlugin.SupportedMediaTags.Contains(tag.Key))
AddMediaTagToSidecar(outputPath, tag, ref sidecar);
}
@@ -1031,7 +1055,7 @@ namespace DiscImageChef.Core.Devices.Dumping
}
}
List<(ulong start, string type)> filesystems = new List<(ulong start, string type)>();
var filesystems = new List<(ulong start, string type)>();
if (sidecar.BlockMedia[0].FileSystemInformation != null)
filesystems.AddRange(from partition in sidecar.BlockMedia[0].FileSystemInformation
where partition.FileSystems != null
@@ -1046,8 +1070,8 @@ namespace DiscImageChef.Core.Devices.Dumping
}
sidecar.BlockMedia[0].Dimensions = Dimensions.DimensionsFromMediaType(dskType);
CommonTypes.Metadata.MediaType.MediaTypeToString(dskType, out string xmlDskTyp,
out string xmlDskSubTyp);
CommonTypes.Metadata.MediaType.MediaTypeToString(dskType, out var xmlDskTyp,
out var xmlDskSubTyp);
sidecar.BlockMedia[0].DiskType = xmlDskTyp;
sidecar.BlockMedia[0].DiskSubType = xmlDskSubTyp;
// TODO: Implement device firmware revision
@@ -1069,18 +1093,20 @@ namespace DiscImageChef.Core.Devices.Dumping
UpdateStatus?.Invoke("Writing metadata sidecar");
FileStream xmlFs = new FileStream(outputPrefix + ".cicm.xml", FileMode.Create);
var xmlFs = new FileStream(outputPrefix + ".cicm.xml", FileMode.Create);
XmlSerializer xmlSer = new XmlSerializer(typeof(CICMMetadataType));
var xmlSer = new XmlSerializer(typeof(CICMMetadataType));
xmlSer.Serialize(xmlFs, sidecar);
xmlFs.Close();
}
UpdateStatus?.Invoke("");
UpdateStatus
?.Invoke($"Took a total of {(end - start).TotalSeconds:F3} seconds ({totalDuration / 1000:F3} processing commands, {totalChkDuration / 1000:F3} checksumming, {imageWriteDuration:F3} writing, {(closeEnd - closeStart).TotalSeconds:F3} closing).");
?.Invoke(
$"Took a total of {(end - start).TotalSeconds:F3} seconds ({totalDuration / 1000:F3} processing commands, {totalChkDuration / 1000:F3} checksumming, {imageWriteDuration:F3} writing, {(closeEnd - closeStart).TotalSeconds:F3} closing).");
UpdateStatus
?.Invoke($"Average speed: {(double)blockSize * (double)(blocks + 1) / 1048576 / (totalDuration / 1000):F3} MiB/sec.");
?.Invoke(
$"Average speed: {(double) blockSize * (double) (blocks + 1) / 1048576 / (totalDuration / 1000):F3} MiB/sec.");
UpdateStatus?.Invoke($"Fastest speed burst: {maxSpeed:F3} MiB/sec.");
UpdateStatus?.Invoke($"Slowest speed burst: {minSpeed:F3} MiB/sec.");
UpdateStatus?.Invoke($"{resume.BadBlocks.Count} sectors could not be read.");

View File

@@ -5,7 +5,7 @@ _dicbase=discimagechef
packager='Natalia Portillo <claunia@claunia.com>'
pkgbase='discimagechef-git'
pkgname=('discimagechef-git' 'discimagechef-gtk-git')
pkgver=v4.5.1.1692.r813.gb1a6c363
pkgver=v4.5.1.1692.r814.gefeaae56
pkgrel=1
pkgdesc='Disc image management and creation tool for disks, tapes, optical and solid state media'
arch=('x86_64' 'armv7h' 'aarch64')