Added suppor for resume mapfile.

This commit is contained in:
2017-06-20 05:48:09 +01:00
parent ff29d85926
commit e57f2a5131
15 changed files with 327 additions and 190 deletions

View File

@@ -342,6 +342,8 @@ namespace DiscImageChef.CommonTypes
ATARI_525_ED, ATARI_525_ED,
/// <summary>5,25", SS, DD, 40 tracks, 18 spt, 256 bytes/sector, MFM</summary> /// <summary>5,25", SS, DD, 40 tracks, 18 spt, 256 bytes/sector, MFM</summary>
ATARI_525_DD, ATARI_525_DD,
/// <summary>3,5", DS, DD, 80 tracks, 10 spt, 512 bytes/sector, MFM</summary>
ATARI_35_DS_DD,
#endregion Atari standard floppy formats #endregion Atari standard floppy formats
#region Commodore standard floppy formats #region Commodore standard floppy formats

View File

@@ -61,6 +61,16 @@ namespace DiscImageChef.Core
return dataFs.Read(array, offset, count); return dataFs.Read(array, offset, count);
} }
public long Seek(ulong block, ulong blockSize)
{
return dataFs.Seek((long)(block * blockSize), SeekOrigin.Begin);
}
public long Seek(ulong offset, SeekOrigin origin)
{
return dataFs.Seek((long)offset, origin);
}
public long Seek(long offset, SeekOrigin origin) public long Seek(long offset, SeekOrigin origin)
{ {
return dataFs.Seek(offset, origin); return dataFs.Seek(offset, origin);

View File

@@ -48,6 +48,7 @@ using DiscImageChef.Filters;
using DiscImageChef.ImagePlugins; using DiscImageChef.ImagePlugins;
using DiscImageChef.PartPlugins; using DiscImageChef.PartPlugins;
using Schemas; using Schemas;
using Extents;
namespace DiscImageChef.Core.Devices.Dumping namespace DiscImageChef.Core.Devices.Dumping
{ {
@@ -166,7 +167,6 @@ namespace DiscImageChef.Core.Devices.Dumping
double currentSpeed = 0; double currentSpeed = 0;
double maxSpeed = double.MinValue; double maxSpeed = double.MinValue;
double minSpeed = double.MaxValue; double minSpeed = double.MaxValue;
List<ulong> unreadableSectors = new List<ulong>();
Checksum dataChk; Checksum dataChk;
aborted = false; aborted = false;
@@ -205,6 +205,13 @@ namespace DiscImageChef.Core.Devices.Dumping
byte heads = ataReader.Heads; byte heads = ataReader.Heads;
byte sectors = ataReader.Sectors; byte sectors = ataReader.Sectors;
bool removable = false || (!dev.IsCompactFlash && ataId.GeneralConfiguration.HasFlag(Decoders.ATA.Identify.GeneralConfigurationBit.Removable));
DumpHardwareType currentTry = null;
ExtentsULong extents = null;
ResumeSupport.Process(ataReader.IsLBA, removable, blocks, dev.Manufacturer, dev.Model, dev.Serial, dev.PlatformID, ref resume, ref currentTry, ref extents);
if(currentTry == null || extents == null)
throw new Exception("Could not process resume file, not continuing...");
if(ataReader.IsLBA) if(ataReader.IsLBA)
{ {
DicConsole.WriteLine("Reading {0} sectors at a time.", blocksToRead); DicConsole.WriteLine("Reading {0} sectors at a time.", blocksToRead);
@@ -212,12 +219,16 @@ namespace DiscImageChef.Core.Devices.Dumping
mhddLog = new MHDDLog(outputPrefix + ".mhddlog.bin", dev, blocks, blockSize, blocksToRead); mhddLog = new MHDDLog(outputPrefix + ".mhddlog.bin", dev, blocks, blockSize, blocksToRead);
ibgLog = new IBGLog(outputPrefix + ".ibg", currentProfile); ibgLog = new IBGLog(outputPrefix + ".ibg", currentProfile);
dumpFile = new DataFile(outputPrefix + ".bin"); dumpFile = new DataFile(outputPrefix + ".bin");
dumpFile.Seek(resume.NextBlock, blockSize);
start = DateTime.UtcNow; start = DateTime.UtcNow;
for(ulong i = 0; i < blocks; i += blocksToRead) for(ulong i = resume.NextBlock; i < blocks; i += blocksToRead)
{ {
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
if((blocks - i) < blocksToRead) if((blocks - i) < blocksToRead)
blocksToRead = (byte)(blocks - i); blocksToRead = (byte)(blocks - i);
@@ -238,11 +249,12 @@ namespace DiscImageChef.Core.Devices.Dumping
mhddLog.Write(i, duration); mhddLog.Write(i, duration);
ibgLog.Write(i, currentSpeed * 1024); ibgLog.Write(i, currentSpeed * 1024);
dumpFile.Write(cmdBuf); dumpFile.Write(cmdBuf);
extents.Add(i, blocksToRead, true);
} }
else else
{ {
for(ulong b = i; b < i + blocksToRead; b++) for(ulong b = i; b < i + blocksToRead; b++)
unreadableSectors.Add(b); resume.BadBlocks.Add(b);
if(duration < 500) if(duration < 500)
mhddLog.Write(i, 65535); mhddLog.Write(i, 65535);
else else
@@ -256,6 +268,7 @@ namespace DiscImageChef.Core.Devices.Dumping
currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (duration / (double)1000); currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (duration / (double)1000);
#pragma warning restore IDE0004 // Cast is necessary, otherwise incorrect value is created #pragma warning restore IDE0004 // Cast is necessary, otherwise incorrect value is created
GC.Collect(); GC.Collect();
resume.NextBlock = i + blocksToRead;
} }
end = DateTime.Now; end = DateTime.Now;
DicConsole.WriteLine(); DicConsole.WriteLine();
@@ -265,30 +278,22 @@ namespace DiscImageChef.Core.Devices.Dumping
#pragma warning restore IDE0004 // Cast is necessary, otherwise incorrect value is created #pragma warning restore IDE0004 // Cast is necessary, otherwise incorrect value is created
#region Error handling #region Error handling
if(unreadableSectors.Count > 0 && !aborted) if(resume.BadBlocks.Count > 0 && !aborted)
{ {
List<ulong> tmpList = new List<ulong>();
foreach(ulong ur in unreadableSectors)
{
for(ulong i = ur; i < ur + blocksToRead; i++)
tmpList.Add(i);
}
tmpList.Sort();
int pass = 0; int pass = 0;
bool forward = true; bool forward = true;
bool runningPersistent = false; bool runningPersistent = false;
unreadableSectors = tmpList;
repeatRetryLba: repeatRetryLba:
ulong[] tmpArray = unreadableSectors.ToArray(); ulong[] tmpArray = resume.BadBlocks.ToArray();
foreach(ulong badSector in tmpArray) foreach(ulong badSector in tmpArray)
{ {
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
DicConsole.Write("\rRetrying sector {0}, pass {1}, {3}{2}", badSector, pass + 1, forward ? "forward" : "reverse", runningPersistent ? "recovering partial data, " : ""); DicConsole.Write("\rRetrying sector {0}, pass {1}, {3}{2}", badSector, pass + 1, forward ? "forward" : "reverse", runningPersistent ? "recovering partial data, " : "");
@@ -298,25 +303,28 @@ namespace DiscImageChef.Core.Devices.Dumping
if(!error) if(!error)
{ {
unreadableSectors.Remove(badSector); resume.BadBlocks.Remove(badSector);
extents.Add(badSector);
dumpFile.WriteAt(cmdBuf, badSector, blockSize); dumpFile.WriteAt(cmdBuf, badSector, blockSize);
} }
else if(runningPersistent) else if(runningPersistent)
dumpFile.WriteAt(cmdBuf, badSector, blockSize); dumpFile.WriteAt(cmdBuf, badSector, blockSize);
} }
if(pass < retryPasses && !aborted && unreadableSectors.Count > 0) if(pass < retryPasses && !aborted && resume.BadBlocks.Count > 0)
{ {
pass++; pass++;
forward = !forward; forward = !forward;
unreadableSectors.Sort(); resume.BadBlocks.Sort();
unreadableSectors.Reverse(); resume.BadBlocks.Reverse();
goto repeatRetryLba; goto repeatRetryLba;
} }
DicConsole.WriteLine(); DicConsole.WriteLine();
} }
#endregion Error handling LBA #endregion Error handling LBA
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
} }
else else
{ {
@@ -334,7 +342,10 @@ namespace DiscImageChef.Core.Devices.Dumping
for(byte Sc = 1; Sc < sectors; Sc++) for(byte Sc = 1; Sc < sectors; Sc++)
{ {
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
#pragma warning disable RECS0018 // Comparison of floating point numbers with equality operator #pragma warning disable RECS0018 // Comparison of floating point numbers with equality operator
if(currentSpeed > maxSpeed && currentSpeed != 0) if(currentSpeed > maxSpeed && currentSpeed != 0)
@@ -354,10 +365,11 @@ namespace DiscImageChef.Core.Devices.Dumping
mhddLog.Write(currentBlock, duration); mhddLog.Write(currentBlock, duration);
ibgLog.Write(currentBlock, currentSpeed * 1024); ibgLog.Write(currentBlock, currentSpeed * 1024);
dumpFile.Write(cmdBuf); dumpFile.Write(cmdBuf);
extents.Add(currentBlock);
} }
else else
{ {
unreadableSectors.Add(currentBlock); resume.BadBlocks.Add(currentBlock);
if(duration < 500) if(duration < 500)
mhddLog.Write(currentBlock, 65535); mhddLog.Write(currentBlock, 65535);
else else
@@ -567,13 +579,9 @@ namespace DiscImageChef.Core.Devices.Dumping
DicConsole.WriteLine("Avegare speed: {0:F3} MiB/sec.", (((double)blockSize * (double)(blocks + 1)) / 1048576) / (totalDuration / 1000)); DicConsole.WriteLine("Avegare speed: {0:F3} MiB/sec.", (((double)blockSize * (double)(blocks + 1)) / 1048576) / (totalDuration / 1000));
DicConsole.WriteLine("Fastest speed burst: {0:F3} MiB/sec.", maxSpeed); DicConsole.WriteLine("Fastest speed burst: {0:F3} MiB/sec.", maxSpeed);
DicConsole.WriteLine("Slowest speed burst: {0:F3} MiB/sec.", minSpeed); DicConsole.WriteLine("Slowest speed burst: {0:F3} MiB/sec.", minSpeed);
DicConsole.WriteLine("{0} sectors could not be read.", unreadableSectors.Count); DicConsole.WriteLine("{0} sectors could not be read.", resume.BadBlocks.Count);
if(unreadableSectors.Count > 0) if(resume.BadBlocks.Count > 0)
{ resume.BadBlocks.Sort();
unreadableSectors.Sort();
foreach(ulong bad in unreadableSectors)
DicConsole.WriteLine("Sector {0} could not be read", bad);
}
DicConsole.WriteLine(); DicConsole.WriteLine();
if(!aborted) if(!aborted)

View File

@@ -45,11 +45,13 @@ using DiscImageChef.Devices;
using Schemas; using Schemas;
using System.Linq; using System.Linq;
using DiscImageChef.Decoders.CD; using DiscImageChef.Decoders.CD;
using Extents;
namespace DiscImageChef.Core.Devices.Dumping namespace DiscImageChef.Core.Devices.Dumping
{ {
internal class CompactDisc internal class CompactDisc
{ {
// TODO: Add support for resume file
internal static void Dump(Device dev, string devicePath, string outputPrefix, ushort retryPasses, bool force, bool dumpRaw, bool persistent, bool stopOnError, ref CICMMetadataType sidecar, ref MediaType dskType, bool separateSubchannel, ref Metadata.Resume resume) internal static void Dump(Device dev, string devicePath, string outputPrefix, ushort retryPasses, bool force, bool dumpRaw, bool persistent, bool stopOnError, ref CICMMetadataType sidecar, ref MediaType dskType, bool separateSubchannel, ref Metadata.Resume resume)
{ {
MHDDLog mhddLog; MHDDLog mhddLog;
@@ -68,7 +70,6 @@ namespace DiscImageChef.Core.Devices.Dumping
double currentSpeed = 0; double currentSpeed = 0;
double maxSpeed = double.MinValue; double maxSpeed = double.MinValue;
double minSpeed = double.MaxValue; double minSpeed = double.MaxValue;
List<ulong> unreadableSectors = new List<ulong>();
Checksum dataChk; Checksum dataChk;
bool readcd = false; bool readcd = false;
byte[] readBuffer; byte[] readBuffer;
@@ -377,6 +378,12 @@ namespace DiscImageChef.Core.Devices.Dumping
DicConsole.WriteLine("Using MMC READ CD command."); DicConsole.WriteLine("Using MMC READ CD command.");
} }
DumpHardwareType currentTry = null;
ExtentsULong extents = null;
ResumeSupport.Process(true, true, blocks, dev.Manufacturer, dev.Model, dev.Serial, dev.PlatformID, ref resume, ref currentTry, ref extents);
if(currentTry == null || extents == null)
throw new Exception("Could not process resume file, not continuing...");
DicConsole.WriteLine("Trying to read Lead-In..."); DicConsole.WriteLine("Trying to read Lead-In...");
bool gotLeadIn = false; bool gotLeadIn = false;
int leadInSectorsGood = 0, leadInSectorsTotal = 0; int leadInSectorsGood = 0, leadInSectorsTotal = 0;
@@ -388,7 +395,7 @@ namespace DiscImageChef.Core.Devices.Dumping
readBuffer = null; readBuffer = null;
for(int leadInBlock = -150; leadInBlock < 0; leadInBlock++) for(int leadInBlock = -150; leadInBlock < 0 && resume.NextBlock == 0; leadInBlock++)
{ {
if(aborted) if(aborted)
break; break;
@@ -434,7 +441,7 @@ namespace DiscImageChef.Core.Devices.Dumping
{ {
sidecar.OpticalDisc[0].LeadIn = new BorderType[] sidecar.OpticalDisc[0].LeadIn = new BorderType[]
{ {
sidecar.OpticalDisc[0].LeadIn[0] = new BorderType new BorderType
{ {
Image = outputPrefix + ".leadin.bin", Image = outputPrefix + ".leadin.bin",
Checksums = dataChk.End().ToArray(), Checksums = dataChk.End().ToArray(),
@@ -477,6 +484,10 @@ namespace DiscImageChef.Core.Devices.Dumping
mhddLog = new MHDDLog(outputPrefix + ".mhddlog.bin", dev, blocks, blockSize, blocksToRead); mhddLog = new MHDDLog(outputPrefix + ".mhddlog.bin", dev, blocks, blockSize, blocksToRead);
ibgLog = new IBGLog(outputPrefix + ".ibg", 0x0008); ibgLog = new IBGLog(outputPrefix + ".ibg", 0x0008);
dumpFile.Seek(resume.NextBlock, (ulong)sectorSize);
if(separateSubchannel)
subFile.Seek(resume.NextBlock, subSize);
start = DateTime.UtcNow; start = DateTime.UtcNow;
for(int t = 0; t < tracks.Count(); t++) for(int t = 0; t < tracks.Count(); t++)
{ {
@@ -511,10 +522,13 @@ namespace DiscImageChef.Core.Devices.Dumping
bool checkedDataFormat = false; bool checkedDataFormat = false;
for(ulong i = (ulong)tracks[t].StartSector; i <= (ulong)tracks[t].EndSector; i += blocksToRead) for(ulong i = resume.NextBlock; i <= (ulong)tracks[t].EndSector; i += blocksToRead)
{ {
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
double cmdDuration = 0; double cmdDuration = 0;
@@ -541,6 +555,7 @@ namespace DiscImageChef.Core.Devices.Dumping
{ {
mhddLog.Write(i, cmdDuration); mhddLog.Write(i, cmdDuration);
ibgLog.Write(i, currentSpeed * 1024); ibgLog.Write(i, currentSpeed * 1024);
extents.Add(i, blocksToRead, true);
if(separateSubchannel) if(separateSubchannel)
{ {
for(int b = 0; b < blocksToRead; b++) for(int b = 0; b < blocksToRead; b++)
@@ -567,11 +582,9 @@ namespace DiscImageChef.Core.Devices.Dumping
else else
dumpFile.Write(new byte[blockSize * blocksToRead]); dumpFile.Write(new byte[blockSize * blocksToRead]);
// TODO: Record error on mapfile
errored += blocksToRead; errored += blocksToRead;
for(ulong b = i; b < i + blocksToRead; b++) for(ulong b = i; b < i + blocksToRead; b++)
unreadableSectors.Add(b); resume.BadBlocks.Add(b);
DicConsole.DebugWriteLine("Dump-Media", "READ error:\n{0}", Decoders.SCSI.Sense.PrettifySense(senseBuf)); DicConsole.DebugWriteLine("Dump-Media", "READ error:\n{0}", Decoders.SCSI.Sense.PrettifySense(senseBuf));
if(cmdDuration < 500) if(cmdDuration < 500)
mhddLog.Write(i, 65535); mhddLog.Write(i, 65535);
@@ -608,6 +621,7 @@ namespace DiscImageChef.Core.Devices.Dumping
#pragma warning disable IDE0004 // Remove Unnecessary Cast #pragma warning disable IDE0004 // Remove Unnecessary Cast
currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000); currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000);
#pragma warning restore IDE0004 // Remove Unnecessary Cast #pragma warning restore IDE0004 // Remove Unnecessary Cast
resume.NextBlock = i + blocksToRead;
} }
} }
DicConsole.WriteLine(); DicConsole.WriteLine();
@@ -618,30 +632,21 @@ namespace DiscImageChef.Core.Devices.Dumping
#pragma warning restore IDE0004 // Remove Unnecessary Cast #pragma warning restore IDE0004 // Remove Unnecessary Cast
#region Compact Disc Error handling #region Compact Disc Error handling
if(unreadableSectors.Count > 0 && !aborted) if(resume.BadBlocks.Count > 0 && !aborted)
{ {
List<ulong> tmpList = new List<ulong>();
foreach(ulong ur in unreadableSectors)
{
for(ulong i = ur; i < ur + blocksToRead; i++)
tmpList.Add(i);
}
tmpList.Sort();
int pass = 0; int pass = 0;
bool forward = true; bool forward = true;
bool runningPersistent = false; bool runningPersistent = false;
unreadableSectors = tmpList;
cdRepeatRetry: cdRepeatRetry:
ulong[] tmpArray = unreadableSectors.ToArray(); ulong[] tmpArray = resume.BadBlocks.ToArray();
foreach(ulong badSector in tmpArray) foreach(ulong badSector in tmpArray)
{ {
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
double cmdDuration = 0; double cmdDuration = 0;
@@ -657,7 +662,10 @@ namespace DiscImageChef.Core.Devices.Dumping
if((!sense && !dev.Error) || runningPersistent) if((!sense && !dev.Error) || runningPersistent)
{ {
if(!sense && !dev.Error) if(!sense && !dev.Error)
unreadableSectors.Remove(badSector); {
resume.BadBlocks.Remove(badSector);
extents.Add(badSector);
}
if(separateSubchannel) if(separateSubchannel)
{ {
@@ -669,12 +677,12 @@ namespace DiscImageChef.Core.Devices.Dumping
} }
} }
if(pass < retryPasses && !aborted && unreadableSectors.Count > 0) if(pass < retryPasses && !aborted && resume.BadBlocks.Count > 0)
{ {
pass++; pass++;
forward = !forward; forward = !forward;
unreadableSectors.Sort(); resume.BadBlocks.Sort();
unreadableSectors.Reverse(); resume.BadBlocks.Reverse();
goto cdRepeatRetry; goto cdRepeatRetry;
} }
@@ -756,6 +764,8 @@ namespace DiscImageChef.Core.Devices.Dumping
DicConsole.WriteLine(); DicConsole.WriteLine();
} }
#endregion Compact Disc Error handling #endregion Compact Disc Error handling
resume.BadBlocks.Sort();
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
dataChk = new Checksum(); dataChk = new Checksum();
dumpFile.Seek(0, SeekOrigin.Begin); dumpFile.Seek(0, SeekOrigin.Begin);
@@ -821,20 +831,7 @@ namespace DiscImageChef.Core.Devices.Dumping
// TODO: Correct this // TODO: Correct this
sidecar.OpticalDisc[0].Checksums = dataChk.End().ToArray(); sidecar.OpticalDisc[0].Checksums = dataChk.End().ToArray();
sidecar.OpticalDisc[0].DumpHardwareArray = new DumpHardwareType[1]; sidecar.OpticalDisc[0].DumpHardwareArray = resume.Tries.ToArray();
sidecar.OpticalDisc[0].DumpHardwareArray[0] = new DumpHardwareType
{
Extents = new ExtentType[1]
};
sidecar.OpticalDisc[0].DumpHardwareArray[0].Extents[0] = new ExtentType
{
Start = 0,
End = blocks - 1
};
sidecar.OpticalDisc[0].DumpHardwareArray[0].Manufacturer = dev.Manufacturer;
sidecar.OpticalDisc[0].DumpHardwareArray[0].Model = dev.Model;
sidecar.OpticalDisc[0].DumpHardwareArray[0].Revision = dev.Revision;
sidecar.OpticalDisc[0].DumpHardwareArray[0].Software = Version.GetSoftwareType(dev.PlatformID);
sidecar.OpticalDisc[0].Image = new ImageType sidecar.OpticalDisc[0].Image = new ImageType
{ {
format = "Raw disk image (sector by sector copy)", format = "Raw disk image (sector by sector copy)",
@@ -847,6 +844,20 @@ namespace DiscImageChef.Core.Devices.Dumping
Metadata.MediaType.MediaTypeToString(dskType, out string xmlDskTyp, out string xmlDskSubTyp); Metadata.MediaType.MediaTypeToString(dskType, out string xmlDskTyp, out string xmlDskSubTyp);
sidecar.OpticalDisc[0].DiscType = xmlDskTyp; sidecar.OpticalDisc[0].DiscType = xmlDskTyp;
sidecar.OpticalDisc[0].DiscSubType = xmlDskSubTyp; sidecar.OpticalDisc[0].DiscSubType = xmlDskSubTyp;
if(!aborted)
{
DicConsole.WriteLine("Writing metadata sidecar");
FileStream xmlFs = new FileStream(outputPrefix + ".cicm.xml",
FileMode.Create);
System.Xml.Serialization.XmlSerializer xmlSer = new System.Xml.Serialization.XmlSerializer(typeof(CICMMetadataType));
xmlSer.Serialize(xmlFs, sidecar);
xmlFs.Close();
}
Statistics.AddMedia(dskType, true);
} }
} }
} }

View File

@@ -35,13 +35,94 @@
// Copyright (C) 2011-2015 Claunia.com // Copyright (C) 2011-2015 Claunia.com
// ****************************************************************************/ // ****************************************************************************/
// //$Id$ // //$Id$
using System; using System;
using System.Collections.Generic;
using DiscImageChef.Metadata;
using Extents;
using Schemas;
namespace DiscImageChef.Core.Devices.Dumping namespace DiscImageChef.Core.Devices.Dumping
{ {
public class ResumeSupport public static class ResumeSupport
{ {
public ResumeSupport() public static void Process(bool isLba, bool removable, ulong blocks, string Manufacturer, string Model, string Serial, Interop.PlatformID platform, ref Resume resume, ref DumpHardwareType currentTry, ref ExtentsULong extents)
{ {
if(resume != null)
{
if(!isLba)
throw new NotImplementedException("Resuming CHS devices is currently not supported.");
if(resume.Removable != removable)
throw new Exception(string.Format("Resume file specifies a {0} device but you're requesting to dump a {1} device, not continuing...",
resume.Removable ? "removable" : "non removable",
removable ? "removable" : "non removable"));
if(resume.LastBlock != blocks - 1)
throw new Exception(string.Format("Resume file specifies a device with {0} blocks but you're requesting to dump one with {1} blocks, not continuing...",
resume.LastBlock + 1, blocks));
foreach(DumpHardwareType oldtry in resume.Tries)
{
if(oldtry.Manufacturer != Manufacturer && !removable)
throw new Exception(string.Format("Resume file specifies a device manufactured by {0} but you're requesting to dump one by {1}, not continuing...",
oldtry.Manufacturer, Manufacturer));
if(oldtry.Model != Model && !removable)
throw new Exception(string.Format("Resume file specifies a device model {0} but you're requesting to dump model {1}, not continuing...",
oldtry.Model, Model));
if(oldtry.Serial != Serial && !removable)
throw new Exception(string.Format("Resume file specifies a device with serial {0} but you're requesting to dump one with serial {1}, not continuing...",
oldtry.Serial, Serial));
if(oldtry.Software == null)
throw new Exception("Found corrupt resume file, cannot continue...");
if(oldtry.Software.Name == "DiscImageChef" && oldtry.Software.OperatingSystem == platform.ToString() && oldtry.Software.Version == Version.GetVersion())
{
if(removable && (oldtry.Manufacturer != Manufacturer || oldtry.Model != Model || oldtry.Serial != Serial))
continue;
currentTry = oldtry;
extents = ExtentsConverter.FromMetadata(currentTry.Extents);
break;
}
}
if(currentTry == null)
{
currentTry = new DumpHardwareType
{
Software = Version.GetSoftwareType(platform),
Manufacturer = Manufacturer,
Model = Model,
Serial = Serial,
};
resume.Tries.Add(currentTry);
extents = new ExtentsULong();
}
}
else
{
resume = new Resume
{
Tries = new List<DumpHardwareType>(),
CreationDate = DateTime.UtcNow,
BadBlocks = new List<ulong>(),
LastBlock = blocks - 1
};
currentTry = new DumpHardwareType
{
Software = Version.GetSoftwareType(platform),
Manufacturer = Manufacturer,
Model = Model,
Serial = Serial
};
resume.Tries.Add(currentTry);
extents = new ExtentsULong();
resume.Removable = removable;
}
} }
} }
} }

View File

@@ -47,6 +47,7 @@ using DiscImageChef.Filters;
using DiscImageChef.ImagePlugins; using DiscImageChef.ImagePlugins;
using DiscImageChef.PartPlugins; using DiscImageChef.PartPlugins;
using Schemas; using Schemas;
using Extents;
namespace DiscImageChef.Core.Devices.Dumping namespace DiscImageChef.Core.Devices.Dumping
{ {
@@ -75,7 +76,6 @@ namespace DiscImageChef.Core.Devices.Dumping
double currentSpeed = 0; double currentSpeed = 0;
double maxSpeed = double.MinValue; double maxSpeed = double.MinValue;
double minSpeed = double.MaxValue; double minSpeed = double.MaxValue;
List<ulong> unreadableSectors = new List<ulong>();
Checksum dataChk; Checksum dataChk;
byte[] readBuffer; byte[] readBuffer;
uint blocksToRead = 64; uint blocksToRead = 64;
@@ -102,6 +102,12 @@ namespace DiscImageChef.Core.Devices.Dumping
DicConsole.WriteLine("Media has {0} blocks of {1} bytes/each. (for a total of {2} bytes)", DicConsole.WriteLine("Media has {0} blocks of {1} bytes/each. (for a total of {2} bytes)",
blocks, blockSize, blocks * (ulong)blockSize); blocks, blockSize, blocks * (ulong)blockSize);
} }
// Check how many blocks to read, if error show and return
if(scsiReader.GetBlocksToRead())
{
DicConsole.ErrorWriteLine(scsiReader.ErrorMessage);
return;
}
blocksToRead = scsiReader.BlocksToRead; blocksToRead = scsiReader.BlocksToRead;
logicalBlockSize = blockSize; logicalBlockSize = blockSize;
physicalBlockSize = scsiReader.PhysicalBlockSize; physicalBlockSize = scsiReader.PhysicalBlockSize;
@@ -315,10 +321,20 @@ namespace DiscImageChef.Core.Devices.Dumping
readBuffer = null; readBuffer = null;
for(ulong i = 0; i < blocks; i += blocksToRead) DumpHardwareType currentTry = null;
ExtentsULong extents = null;
ResumeSupport.Process(true, dev.IsRemovable, blocks, dev.Manufacturer, dev.Model, dev.Serial, dev.PlatformID, ref resume, ref currentTry, ref extents);
if(currentTry == null || extents == null)
throw new Exception("Could not process resume file, not continuing...");
dumpFile.Seek(resume.NextBlock, blockSize);
for(ulong i = resume.NextBlock; i < blocks; i += blocksToRead)
{ {
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
if((blocks - i) < blocksToRead) if((blocks - i) < blocksToRead)
blocksToRead = (uint)(blocks - i); blocksToRead = (uint)(blocks - i);
@@ -340,6 +356,7 @@ namespace DiscImageChef.Core.Devices.Dumping
mhddLog.Write(i, cmdDuration); mhddLog.Write(i, cmdDuration);
ibgLog.Write(i, currentSpeed * 1024); ibgLog.Write(i, currentSpeed * 1024);
dumpFile.Write(readBuffer); dumpFile.Write(readBuffer);
extents.Add(i, blocksToRead, true);
} }
else else
{ {
@@ -350,11 +367,9 @@ namespace DiscImageChef.Core.Devices.Dumping
// Write empty data // Write empty data
dumpFile.Write(new byte[blockSize * blocksToRead]); dumpFile.Write(new byte[blockSize * blocksToRead]);
// TODO: Record error on mapfile
errored += blocksToRead; errored += blocksToRead;
for(ulong b = i; b < i + blocksToRead; b++) for(ulong b = i; b < i + blocksToRead; b++)
unreadableSectors.Add(b); resume.BadBlocks.Add(b);
if(cmdDuration < 500) if(cmdDuration < 500)
mhddLog.Write(i, 65535); mhddLog.Write(i, 65535);
else else
@@ -366,6 +381,7 @@ namespace DiscImageChef.Core.Devices.Dumping
#pragma warning disable IDE0004 // Remove Unnecessary Cast #pragma warning disable IDE0004 // Remove Unnecessary Cast
currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000); currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000);
#pragma warning restore IDE0004 // Remove Unnecessary Cast #pragma warning restore IDE0004 // Remove Unnecessary Cast
resume.NextBlock = i + blocksToRead;
} }
end = DateTime.UtcNow; end = DateTime.UtcNow;
DicConsole.WriteLine(); DicConsole.WriteLine();
@@ -375,30 +391,21 @@ namespace DiscImageChef.Core.Devices.Dumping
#pragma warning restore IDE0004 // Remove Unnecessary Cast #pragma warning restore IDE0004 // Remove Unnecessary Cast
#region Error handling #region Error handling
if(unreadableSectors.Count > 0 && !aborted) if(resume.BadBlocks.Count > 0 && !aborted)
{ {
List<ulong> tmpList = new List<ulong>();
foreach(ulong ur in unreadableSectors)
{
for(ulong i = ur; i < ur + blocksToRead; i++)
tmpList.Add(i);
}
tmpList.Sort();
int pass = 0; int pass = 0;
bool forward = true; bool forward = true;
bool runningPersistent = false; bool runningPersistent = false;
unreadableSectors = tmpList;
repeatRetry: repeatRetry:
ulong[] tmpArray = unreadableSectors.ToArray(); ulong[] tmpArray = resume.BadBlocks.ToArray();
foreach(ulong badSector in tmpArray) foreach(ulong badSector in tmpArray)
{ {
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
DicConsole.Write("\rRetrying sector {0}, pass {1}, {3}{2}", badSector, pass + 1, forward ? "forward" : "reverse", runningPersistent ? "recovering partial data, " : ""); DicConsole.Write("\rRetrying sector {0}, pass {1}, {3}{2}", badSector, pass + 1, forward ? "forward" : "reverse", runningPersistent ? "recovering partial data, " : "");
@@ -407,19 +414,20 @@ namespace DiscImageChef.Core.Devices.Dumping
if(!sense && !dev.Error) if(!sense && !dev.Error)
{ {
unreadableSectors.Remove(badSector); resume.BadBlocks.Remove(badSector);
extents.Add(badSector);
dumpFile.WriteAt(readBuffer, badSector, blockSize); dumpFile.WriteAt(readBuffer, badSector, blockSize);
} }
else if(runningPersistent) else if(runningPersistent)
dumpFile.WriteAt(readBuffer, badSector, blockSize); dumpFile.WriteAt(readBuffer, badSector, blockSize);
} }
if(pass < retryPasses && !aborted && unreadableSectors.Count > 0) if(pass < retryPasses && !aborted && resume.BadBlocks.Count > 0)
{ {
pass++; pass++;
forward = !forward; forward = !forward;
unreadableSectors.Sort(); resume.BadBlocks.Sort();
unreadableSectors.Reverse(); resume.BadBlocks.Reverse();
goto repeatRetry; goto repeatRetry;
} }
@@ -535,6 +543,8 @@ namespace DiscImageChef.Core.Devices.Dumping
DicConsole.WriteLine(); DicConsole.WriteLine();
} }
#endregion Error handling #endregion Error handling
resume.BadBlocks.Sort();
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
dataChk = new Checksum(); dataChk = new Checksum();
dumpFile.Seek(0, SeekOrigin.Begin); dumpFile.Seek(0, SeekOrigin.Begin);
@@ -700,20 +710,7 @@ namespace DiscImageChef.Core.Devices.Dumping
if(opticalDisc) if(opticalDisc)
{ {
sidecar.OpticalDisc[0].Checksums = dataChk.End().ToArray(); sidecar.OpticalDisc[0].Checksums = dataChk.End().ToArray();
sidecar.OpticalDisc[0].DumpHardwareArray = new DumpHardwareType[1]; sidecar.OpticalDisc[0].DumpHardwareArray = resume.Tries.ToArray();
sidecar.OpticalDisc[0].DumpHardwareArray[0] = new DumpHardwareType
{
Extents = new ExtentType[1]
};
sidecar.OpticalDisc[0].DumpHardwareArray[0].Extents[0] = new ExtentType
{
Start = 0,
End = blocks - 1
};
sidecar.OpticalDisc[0].DumpHardwareArray[0].Manufacturer = dev.Manufacturer;
sidecar.OpticalDisc[0].DumpHardwareArray[0].Model = dev.Model;
sidecar.OpticalDisc[0].DumpHardwareArray[0].Revision = dev.Revision;
sidecar.OpticalDisc[0].DumpHardwareArray[0].Software = Version.GetSoftwareType(dev.PlatformID);
sidecar.OpticalDisc[0].Image = new ImageType sidecar.OpticalDisc[0].Image = new ImageType
{ {
format = "Raw disk image (sector by sector copy)", format = "Raw disk image (sector by sector copy)",
@@ -822,22 +819,7 @@ namespace DiscImageChef.Core.Devices.Dumping
sidecar.BlockMedia[0].FileSystemInformation = xmlFileSysInfo; sidecar.BlockMedia[0].FileSystemInformation = xmlFileSysInfo;
if(dev.IsRemovable) if(dev.IsRemovable)
{ sidecar.BlockMedia[0].DumpHardwareArray = resume.Tries.ToArray();
sidecar.BlockMedia[0].DumpHardwareArray = new DumpHardwareType[1];
sidecar.BlockMedia[0].DumpHardwareArray[0] = new DumpHardwareType
{
Extents = new ExtentType[1]
};
sidecar.BlockMedia[0].DumpHardwareArray[0].Extents[0] = new ExtentType
{
Start = 0,
End = blocks - 1
};
sidecar.BlockMedia[0].DumpHardwareArray[0].Manufacturer = dev.Manufacturer;
sidecar.BlockMedia[0].DumpHardwareArray[0].Model = dev.Model;
sidecar.BlockMedia[0].DumpHardwareArray[0].Revision = dev.Revision;
sidecar.BlockMedia[0].DumpHardwareArray[0].Software = Version.GetSoftwareType(dev.PlatformID);
}
} }
DicConsole.WriteLine(); DicConsole.WriteLine();
@@ -848,13 +830,7 @@ namespace DiscImageChef.Core.Devices.Dumping
#pragma warning restore IDE0004 // Cast is necessary, otherwise incorrect value is created #pragma warning restore IDE0004 // Cast is necessary, otherwise incorrect value is created
DicConsole.WriteLine("Fastest speed burst: {0:F3} MiB/sec.", maxSpeed); DicConsole.WriteLine("Fastest speed burst: {0:F3} MiB/sec.", maxSpeed);
DicConsole.WriteLine("Slowest speed burst: {0:F3} MiB/sec.", minSpeed); DicConsole.WriteLine("Slowest speed burst: {0:F3} MiB/sec.", minSpeed);
DicConsole.WriteLine("{0} sectors could not be read.", unreadableSectors.Count); DicConsole.WriteLine("{0} sectors could not be read.", resume.BadBlocks.Count);
if(unreadableSectors.Count > 0)
{
unreadableSectors.Sort();
foreach(ulong bad in unreadableSectors)
DicConsole.WriteLine("Sector {0} could not be read", bad);
}
DicConsole.WriteLine(); DicConsole.WriteLine();
if(!aborted) if(!aborted)

View File

@@ -138,28 +138,13 @@ namespace DiscImageChef.Core.Devices.Dumping
return; return;
} }
FileStream xmlFs = new FileStream(outputPrefix + ".cicm.xml",
FileMode.Create);
System.Xml.Serialization.XmlSerializer xmlSer = new System.Xml.Serialization.XmlSerializer(typeof(CICMMetadataType));
if(dev.SCSIType == Decoders.SCSI.PeripheralDeviceTypes.MultiMediaDevice) if(dev.SCSIType == Decoders.SCSI.PeripheralDeviceTypes.MultiMediaDevice)
{ {
MMC.Dump(dev, devicePath, outputPrefix, retryPasses, force, dumpRaw, persistent, stopOnError, ref sidecar, ref dskType, separateSubchannel, ref resume); MMC.Dump(dev, devicePath, outputPrefix, retryPasses, force, dumpRaw, persistent, stopOnError, ref sidecar, ref dskType, separateSubchannel, ref resume);
DicConsole.WriteLine("Writing metadata sidecar");
xmlSer.Serialize(xmlFs, sidecar);
xmlFs.Close();
return; return;
} }
SBC.Dump(dev, devicePath, outputPrefix, retryPasses, force, dumpRaw, persistent, stopOnError, ref sidecar, ref dskType, false, ref resume); SBC.Dump(dev, devicePath, outputPrefix, retryPasses, force, dumpRaw, persistent, stopOnError, ref sidecar, ref dskType, false, ref resume);
DicConsole.WriteLine("Writing metadata sidecar");
xmlSer.Serialize(xmlFs, sidecar);
xmlFs.Close();
} }
} }
} }

View File

@@ -46,6 +46,7 @@ using DiscImageChef.Filesystems;
using DiscImageChef.Filters; using DiscImageChef.Filters;
using DiscImageChef.ImagePlugins; using DiscImageChef.ImagePlugins;
using DiscImageChef.PartPlugins; using DiscImageChef.PartPlugins;
using Extents;
using Schemas; using Schemas;
namespace DiscImageChef.Core.Devices.Dumping namespace DiscImageChef.Core.Devices.Dumping
@@ -68,7 +69,6 @@ namespace DiscImageChef.Core.Devices.Dumping
double currentSpeed = 0; double currentSpeed = 0;
double maxSpeed = double.MinValue; double maxSpeed = double.MinValue;
double minSpeed = double.MaxValue; double minSpeed = double.MaxValue;
List<ulong> unreadableSectors = new List<ulong>();
Checksum dataChk; Checksum dataChk;
DataFile dumpFile = null; DataFile dumpFile = null;
bool aborted = false; bool aborted = false;
@@ -218,12 +218,28 @@ namespace DiscImageChef.Core.Devices.Dumping
readBuffer = null; readBuffer = null;
ulong currentSector = 0;
double cmdDuration = 0; double cmdDuration = 0;
uint saveBlocksToRead = blocksToRead; uint saveBlocksToRead = blocksToRead;
DumpHardwareType currentTry = null;
ExtentsULong extents = null;
ResumeSupport.Process(true, true, totalSize, dev.Manufacturer, dev.Model, dev.Serial, dev.PlatformID, ref resume, ref currentTry, ref extents);
if(currentTry == null || extents == null)
throw new Exception("Could not process resume file, not continuing...");
ulong currentSector = resume.NextBlock;
dumpFile.Seek(resume.NextBlock, blockSize);
for(int e = 0; e <= 16; e++) for(int e = 0; e <= 16; e++)
{ {
if(aborted)
{
resume.NextBlock = currentSector;
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break;
}
if(currentSector >= blocks)
break;
ulong extentStart, extentEnd; ulong extentStart, extentEnd;
// Extents // Extents
if(e < 16) if(e < 16)
@@ -244,11 +260,18 @@ namespace DiscImageChef.Core.Devices.Dumping
extentEnd = blocks; extentEnd = blocks;
} }
if(currentSector > extentEnd)
continue;
for(ulong i = currentSector; i < extentStart; i += blocksToRead) for(ulong i = currentSector; i < extentStart; i += blocksToRead)
{ {
saveBlocksToRead = blocksToRead; saveBlocksToRead = blocksToRead;
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
if((extentStart - i) < blocksToRead) if((extentStart - i) < blocksToRead)
blocksToRead = (uint)(extentStart - i); blocksToRead = (uint)(extentStart - i);
@@ -270,6 +293,7 @@ namespace DiscImageChef.Core.Devices.Dumping
mhddLog.Write(i, cmdDuration); mhddLog.Write(i, cmdDuration);
ibgLog.Write(i, currentSpeed * 1024); ibgLog.Write(i, currentSpeed * 1024);
dumpFile.Write(readBuffer); dumpFile.Write(readBuffer);
extents.Add(i, blocksToRead, true);
} }
else else
{ {
@@ -280,11 +304,9 @@ namespace DiscImageChef.Core.Devices.Dumping
// Write empty data // Write empty data
dumpFile.Write(new byte[blockSize * blocksToRead]); dumpFile.Write(new byte[blockSize * blocksToRead]);
// TODO: Record error on mapfile
errored += blocksToRead; errored += blocksToRead;
for(ulong b = i; b < i + blocksToRead; b++) for(ulong b = i; b < i + blocksToRead; b++)
unreadableSectors.Add(b); resume.BadBlocks.Add(b);
DicConsole.DebugWriteLine("Dump-Media", "READ error:\n{0}", Decoders.SCSI.Sense.PrettifySense(senseBuf)); DicConsole.DebugWriteLine("Dump-Media", "READ error:\n{0}", Decoders.SCSI.Sense.PrettifySense(senseBuf));
if(cmdDuration < 500) if(cmdDuration < 500)
mhddLog.Write(i, 65535); mhddLog.Write(i, 65535);
@@ -298,13 +320,18 @@ namespace DiscImageChef.Core.Devices.Dumping
currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000); currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000);
#pragma warning restore IDE0004 // Remove Unnecessary Cast #pragma warning restore IDE0004 // Remove Unnecessary Cast
blocksToRead = saveBlocksToRead; blocksToRead = saveBlocksToRead;
currentSector = i + 1;
resume.NextBlock = currentSector;
} }
for(ulong i = extentStart; i <= extentEnd; i += blocksToRead) for(ulong i = extentStart; i <= extentEnd; i += blocksToRead)
{ {
saveBlocksToRead = blocksToRead; saveBlocksToRead = blocksToRead;
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
if((extentEnd - i) < blocksToRead) if((extentEnd - i) < blocksToRead)
blocksToRead = (uint)(extentEnd - i) + 1; blocksToRead = (uint)(extentEnd - i) + 1;
@@ -313,18 +340,23 @@ namespace DiscImageChef.Core.Devices.Dumping
ibgLog.Write(i, currentSpeed * 1024); ibgLog.Write(i, currentSpeed * 1024);
dumpFile.Write(new byte[blocksToRead * 2048]); dumpFile.Write(new byte[blocksToRead * 2048]);
blocksToRead = saveBlocksToRead; blocksToRead = saveBlocksToRead;
extents.Add(i, blocksToRead, true);
currentSector = i + 1;
resume.NextBlock = currentSector;
} }
currentSector = extentEnd + 1; if(!aborted)
if(currentSector >= blocks) currentSector = extentEnd + 1;
break;
} }
// Middle Zone D // Middle Zone D
for(ulong middle = 0; middle < (middleZone - 1); middle += blocksToRead) for(ulong middle = currentSector - blocks - 1; middle < (middleZone - 1); middle += blocksToRead)
{ {
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
if(((middleZone - 1) - middle) < blocksToRead) if(((middleZone - 1) - middle) < blocksToRead)
blocksToRead = (uint)((middleZone - 1) - middle); blocksToRead = (uint)((middleZone - 1) - middle);
@@ -334,8 +366,10 @@ namespace DiscImageChef.Core.Devices.Dumping
mhddLog.Write(middle + currentSector, cmdDuration); mhddLog.Write(middle + currentSector, cmdDuration);
ibgLog.Write(middle + currentSector, currentSpeed * 1024); ibgLog.Write(middle + currentSector, currentSpeed * 1024);
dumpFile.Write(new byte[blockSize * blocksToRead]); dumpFile.Write(new byte[blockSize * blocksToRead]);
extents.Add(currentSector, blocksToRead, true);
currentSector += blocksToRead; currentSector += blocksToRead;
resume.NextBlock = currentSector;
} }
blocksToRead = saveBlocksToRead; blocksToRead = saveBlocksToRead;
@@ -354,10 +388,13 @@ namespace DiscImageChef.Core.Devices.Dumping
} }
// Video Layer 1 // Video Layer 1
for(ulong l1 = l0Video; l1 < (l0Video + l1Video); l1 += blocksToRead) for(ulong l1 = currentSector - blocks - middleZone + l0Video; l1 < (l0Video + l1Video); l1 += blocksToRead)
{ {
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
if(((l0Video + l1Video) - l1) < blocksToRead) if(((l0Video + l1Video) - l1) < blocksToRead)
blocksToRead = (uint)((l0Video + l1Video) - l1); blocksToRead = (uint)((l0Video + l1Video) - l1);
@@ -379,6 +416,7 @@ namespace DiscImageChef.Core.Devices.Dumping
mhddLog.Write(currentSector, cmdDuration); mhddLog.Write(currentSector, cmdDuration);
ibgLog.Write(currentSector, currentSpeed * 1024); ibgLog.Write(currentSector, currentSpeed * 1024);
dumpFile.Write(readBuffer); dumpFile.Write(readBuffer);
extents.Add(currentSector, blocksToRead, true);
} }
else else
{ {
@@ -389,11 +427,9 @@ namespace DiscImageChef.Core.Devices.Dumping
// Write empty data // Write empty data
dumpFile.Write(new byte[blockSize * blocksToRead]); dumpFile.Write(new byte[blockSize * blocksToRead]);
// TODO: Record error on mapfile
// TODO: Handle errors in video partition // TODO: Handle errors in video partition
//errored += blocksToRead; //errored += blocksToRead;
//unreadableSectors.Add(l1); //resume.BadBlocks.Add(l1);
DicConsole.DebugWriteLine("Dump-Media", "READ error:\n{0}", Decoders.SCSI.Sense.PrettifySense(senseBuf)); DicConsole.DebugWriteLine("Dump-Media", "READ error:\n{0}", Decoders.SCSI.Sense.PrettifySense(senseBuf));
if(cmdDuration < 500) if(cmdDuration < 500)
mhddLog.Write(l1, 65535); mhddLog.Write(l1, 65535);
@@ -407,6 +443,7 @@ namespace DiscImageChef.Core.Devices.Dumping
currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000); currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000);
#pragma warning restore IDE0004 // Remove Unnecessary Cast #pragma warning restore IDE0004 // Remove Unnecessary Cast
currentSector += blocksToRead; currentSector += blocksToRead;
resume.NextBlock = currentSector;
} }
sense = dev.KreonUnlockWxripper(out senseBuf, dev.Timeout, out duration); sense = dev.KreonUnlockWxripper(out senseBuf, dev.Timeout, out duration);
@@ -430,11 +467,11 @@ namespace DiscImageChef.Core.Devices.Dumping
#pragma warning restore IDE0004 // Remove Unnecessary Cast #pragma warning restore IDE0004 // Remove Unnecessary Cast
#region Error handling #region Error handling
if(unreadableSectors.Count > 0 && !aborted) if(resume.BadBlocks.Count > 0 && !aborted)
{ {
List<ulong> tmpList = new List<ulong>(); List<ulong> tmpList = new List<ulong>();
foreach(ulong ur in unreadableSectors) foreach(ulong ur in resume.BadBlocks)
{ {
for(ulong i = ur; i < ur + blocksToRead; i++) for(ulong i = ur; i < ur + blocksToRead; i++)
tmpList.Add(i); tmpList.Add(i);
@@ -446,14 +483,17 @@ namespace DiscImageChef.Core.Devices.Dumping
bool forward = true; bool forward = true;
bool runningPersistent = false; bool runningPersistent = false;
unreadableSectors = tmpList; resume.BadBlocks = tmpList;
repeatRetry: repeatRetry:
ulong[] tmpArray = unreadableSectors.ToArray(); ulong[] tmpArray = resume.BadBlocks.ToArray();
foreach(ulong badSector in tmpArray) foreach(ulong badSector in tmpArray)
{ {
if(aborted) if(aborted)
{
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
break; break;
}
cmdDuration = 0; cmdDuration = 0;
@@ -464,19 +504,20 @@ namespace DiscImageChef.Core.Devices.Dumping
if(!sense && !dev.Error) if(!sense && !dev.Error)
{ {
unreadableSectors.Remove(badSector); resume.BadBlocks.Remove(badSector);
extents.Add(badSector);
dumpFile.WriteAt(readBuffer, badSector, blockSize); dumpFile.WriteAt(readBuffer, badSector, blockSize);
} }
else if(runningPersistent) else if(runningPersistent)
dumpFile.WriteAt(readBuffer, badSector, blockSize); dumpFile.WriteAt(readBuffer, badSector, blockSize);
} }
if(pass < retryPasses && !aborted && unreadableSectors.Count > 0) if(pass < retryPasses && !aborted && resume.BadBlocks.Count > 0)
{ {
pass++; pass++;
forward = !forward; forward = !forward;
unreadableSectors.Sort(); resume.BadBlocks.Sort();
unreadableSectors.Reverse(); resume.BadBlocks.Reverse();
goto repeatRetry; goto repeatRetry;
} }
@@ -592,6 +633,8 @@ namespace DiscImageChef.Core.Devices.Dumping
DicConsole.WriteLine(); DicConsole.WriteLine();
} }
#endregion Error handling #endregion Error handling
resume.BadBlocks.Sort();
currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents);
dataChk = new Checksum(); dataChk = new Checksum();
dumpFile.Seek(0, SeekOrigin.Begin); dumpFile.Seek(0, SeekOrigin.Begin);
@@ -757,20 +800,7 @@ namespace DiscImageChef.Core.Devices.Dumping
} }
sidecar.OpticalDisc[0].Checksums = dataChk.End().ToArray(); sidecar.OpticalDisc[0].Checksums = dataChk.End().ToArray();
sidecar.OpticalDisc[0].DumpHardwareArray = new DumpHardwareType[1]; sidecar.OpticalDisc[0].DumpHardwareArray = resume.Tries.ToArray();
sidecar.OpticalDisc[0].DumpHardwareArray[0] = new DumpHardwareType
{
Extents = new ExtentType[1]
};
sidecar.OpticalDisc[0].DumpHardwareArray[0].Extents[0] = new ExtentType
{
Start = 0,
End = blocks - 1
};
sidecar.OpticalDisc[0].DumpHardwareArray[0].Manufacturer = dev.Manufacturer;
sidecar.OpticalDisc[0].DumpHardwareArray[0].Model = dev.Model;
sidecar.OpticalDisc[0].DumpHardwareArray[0].Revision = dev.Revision;
sidecar.OpticalDisc[0].DumpHardwareArray[0].Software = Version.GetSoftwareType(dev.PlatformID);
sidecar.OpticalDisc[0].Image = new ImageType sidecar.OpticalDisc[0].Image = new ImageType
{ {
format = "Raw disk image (sector by sector copy)", format = "Raw disk image (sector by sector copy)",
@@ -816,6 +846,20 @@ namespace DiscImageChef.Core.Devices.Dumping
Metadata.MediaType.MediaTypeToString(dskType, out string xmlDskTyp, out string xmlDskSubTyp); Metadata.MediaType.MediaTypeToString(dskType, out string xmlDskTyp, out string xmlDskSubTyp);
sidecar.OpticalDisc[0].DiscType = xmlDskTyp; sidecar.OpticalDisc[0].DiscType = xmlDskTyp;
sidecar.OpticalDisc[0].DiscSubType = xmlDskSubTyp; sidecar.OpticalDisc[0].DiscSubType = xmlDskSubTyp;
if(!aborted)
{
DicConsole.WriteLine("Writing metadata sidecar");
FileStream xmlFs = new FileStream(outputPrefix + ".cicm.xml",
FileMode.Create);
System.Xml.Serialization.XmlSerializer xmlSer = new System.Xml.Serialization.XmlSerializer(typeof(CICMMetadataType));
xmlSer.Serialize(xmlFs, sidecar);
xmlFs.Close();
}
Statistics.AddMedia(dskType, true);
} }
} }
} }

View File

@@ -78,6 +78,7 @@
<Compile Include="Devices\Dumping\SBC.cs" /> <Compile Include="Devices\Dumping\SBC.cs" />
<Compile Include="Devices\Dumping\XGD.cs" /> <Compile Include="Devices\Dumping\XGD.cs" />
<Compile Include="Properties\AssemblyInfo.cs" /> <Compile Include="Properties\AssemblyInfo.cs" />
<Compile Include="Devices\Dumping\ResumeSupport.cs" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ProjectReference Include="..\DiscImageChef.Console\DiscImageChef.Console.csproj"> <ProjectReference Include="..\DiscImageChef.Console\DiscImageChef.Console.csproj">

View File

@@ -502,6 +502,10 @@ namespace DiscImageChef.Metadata
DiscType = "5.25\" floppy"; DiscType = "5.25\" floppy";
DiscSubType = "Atari double-density"; DiscSubType = "Atari double-density";
break; break;
case CommonTypes.MediaType.ATARI_35_DS_DD:
DiscType = "3.5\" floppy";
DiscSubType = "Atari ST double-density, double-sided, 10 sectors";
break;
case CommonTypes.MediaType.CBM_1540: case CommonTypes.MediaType.CBM_1540:
case CommonTypes.MediaType.CBM_1540_Ext: case CommonTypes.MediaType.CBM_1540_Ext:
DiscType = "5.25\" floppy"; DiscType = "5.25\" floppy";

View File

@@ -40,17 +40,17 @@ namespace DiscImageChef.Metadata
[XmlRoot("DicResume", Namespace = "", IsNullable = false)] [XmlRoot("DicResume", Namespace = "", IsNullable = false)]
public class Resume public class Resume
{ {
[XmlElement(DataType = "date")] [XmlElement(DataType = "dateTime")]
public DateTime CreationDate; public DateTime CreationDate;
[XmlElement(DataType = "date")] [XmlElement(DataType = "dateTime")]
public DateTime LastWriteDate; public DateTime LastWriteDate;
public bool Removable; public bool Removable;
public ulong LastBlock; public ulong LastBlock;
public ulong LastTriedBlock; public ulong NextBlock;
[XmlArrayItem("DumpTry")] [XmlArrayItem("DumpTry")]
public List<DumpHardwareType> Tries; public List<DumpHardwareType> Tries;
[XmlArrayItem("BadBlock")] [XmlArrayItem("Block")]
public List<ulong> BadBlocks; public List<ulong> BadBlocks;
} }
} }

View File

@@ -1,3 +1,7 @@
* Options.cs:
* Commands/DumpMedia.cs:
Implemented resume mapfile support.
* Commands/DumpMedia.cs: * Commands/DumpMedia.cs:
Added resume parameter passing. Added resume parameter passing.

View File

@@ -52,8 +52,6 @@ namespace DiscImageChef.Commands
{ {
public static class DumpMedia public static class DumpMedia
{ {
// TODO: Implement dump map
public static void doDumpMedia(DumpMediaOptions options) public static void doDumpMedia(DumpMediaOptions options)
{ {
DicConsole.DebugWriteLine("Dump-Media command", "--debug={0}", options.Debug); DicConsole.DebugWriteLine("Dump-Media command", "--debug={0}", options.Debug);
@@ -66,6 +64,7 @@ namespace DiscImageChef.Commands
DicConsole.DebugWriteLine("Dump-Media command", "--retry-passes={0}", options.RetryPasses); DicConsole.DebugWriteLine("Dump-Media command", "--retry-passes={0}", options.RetryPasses);
DicConsole.DebugWriteLine("Dump-Media command", "--persistent={0}", options.Persistent); DicConsole.DebugWriteLine("Dump-Media command", "--persistent={0}", options.Persistent);
DicConsole.DebugWriteLine("Dump-Media command", "--separate-subchannel={0}", options.SeparateSubchannel); DicConsole.DebugWriteLine("Dump-Media command", "--separate-subchannel={0}", options.SeparateSubchannel);
DicConsole.DebugWriteLine("Dump-Media command", "--resume={0}", options.Resume);
if(!File.Exists(options.DevicePath)) if(!File.Exists(options.DevicePath))
{ {
@@ -90,8 +89,8 @@ namespace DiscImageChef.Commands
Core.Statistics.AddDevice(dev); Core.Statistics.AddDevice(dev);
Resume resume = null; Resume resume = null;
XmlSerializer xs = new XmlSerializer(resume.GetType()); XmlSerializer xs = new XmlSerializer(typeof(Resume));
if(File.Exists(options.OutputPrefix + ".resume.xml")) if(File.Exists(options.OutputPrefix + ".resume.xml") && options.Resume)
{ {
try try
{ {
@@ -106,6 +105,12 @@ namespace DiscImageChef.Commands
} }
} }
if(resume != null && resume.NextBlock > resume.LastBlock && resume.BadBlocks.Count == 0)
{
DicConsole.WriteLine("Media already dumped correctly, not continuing...");
return;
}
switch(dev.Type) switch(dev.Type)
{ {
case DeviceType.ATA: case DeviceType.ATA:
@@ -126,8 +131,11 @@ namespace DiscImageChef.Commands
throw new NotSupportedException("Unknown device type."); throw new NotSupportedException("Unknown device type.");
} }
if(resume != null) if(resume != null && options.Resume)
{ {
resume.LastWriteDate = DateTime.UtcNow;
resume.BadBlocks.Sort();
if(File.Exists(options.OutputPrefix + ".resume.xml")) if(File.Exists(options.OutputPrefix + ".resume.xml"))
File.Delete(options.OutputPrefix + ".resume.xml"); File.Delete(options.OutputPrefix + ".resume.xml");

View File

@@ -302,6 +302,10 @@ namespace DiscImageChef
[Option("separate-subchannel", Default = false, [Option("separate-subchannel", Default = false,
HelpText = "Save subchannel in a separate file. Only applicable to CD/DDCD/GD.")] HelpText = "Save subchannel in a separate file. Only applicable to CD/DDCD/GD.")]
public bool SeparateSubchannel { get; set; } public bool SeparateSubchannel { get; set; }
[Option('m', "resume", Default = true,
HelpText = "Create/use resume mapfile.")]
public bool Resume { get; set; }
} }
[Verb("device-report", HelpText = "Tests the device capabilities and creates an XML report of them.")] [Verb("device-report", HelpText = "Tests the device capabilities and creates an XML report of them.")]

1
TODO
View File

@@ -31,7 +31,6 @@ Image checksum:
--- Optimize and multithread --- Optimize and multithread
Device handling: Device handling:
--- Add mapfile support for resuming dumps
--- Add support for FreeBSD --- Add support for FreeBSD
--- Add support for MMC/SD devices --- Add support for MMC/SD devices
--- Add support for NVMe devices --- Add support for NVMe devices