From e57f2a51318390cb392b6cb53995d12ad4816355 Mon Sep 17 00:00:00 2001 From: Natalia Portillo Date: Tue, 20 Jun 2017 05:48:09 +0100 Subject: [PATCH] Added suppor for resume mapfile. --- DiscImageChef.CommonTypes/MediaType.cs | 2 + DiscImageChef.Core/DataFile.cs | 10 ++ DiscImageChef.Core/Devices/Dumping/ATA.cs | 64 +++++----- .../Devices/Dumping/CompactDisc.cs | 89 ++++++++------ .../Devices/Dumping/ResumeSupport.cs | 87 ++++++++++++- DiscImageChef.Core/Devices/Dumping/SBC.cs | 96 ++++++--------- DiscImageChef.Core/Devices/Dumping/SCSI.cs | 15 --- DiscImageChef.Core/Devices/Dumping/XGD.cs | 114 ++++++++++++------ DiscImageChef.Core/DiscImageChef.Core.csproj | 1 + DiscImageChef.Metadata/MediaType.cs | 4 + DiscImageChef.Metadata/Resume.cs | 8 +- DiscImageChef/ChangeLog | 4 + DiscImageChef/Commands/DumpMedia.cs | 18 ++- DiscImageChef/Options.cs | 4 + TODO | 1 - 15 files changed, 327 insertions(+), 190 deletions(-) diff --git a/DiscImageChef.CommonTypes/MediaType.cs b/DiscImageChef.CommonTypes/MediaType.cs index 92a10102..1b3aeec7 100644 --- a/DiscImageChef.CommonTypes/MediaType.cs +++ b/DiscImageChef.CommonTypes/MediaType.cs @@ -342,6 +342,8 @@ namespace DiscImageChef.CommonTypes ATARI_525_ED, /// 5,25", SS, DD, 40 tracks, 18 spt, 256 bytes/sector, MFM ATARI_525_DD, + /// 3,5", DS, DD, 80 tracks, 10 spt, 512 bytes/sector, MFM + ATARI_35_DS_DD, #endregion Atari standard floppy formats #region Commodore standard floppy formats diff --git a/DiscImageChef.Core/DataFile.cs b/DiscImageChef.Core/DataFile.cs index 939280d2..c7122f68 100644 --- a/DiscImageChef.Core/DataFile.cs +++ b/DiscImageChef.Core/DataFile.cs @@ -61,6 +61,16 @@ namespace DiscImageChef.Core return dataFs.Read(array, offset, count); } + public long Seek(ulong block, ulong blockSize) + { + return dataFs.Seek((long)(block * blockSize), SeekOrigin.Begin); + } + + public long Seek(ulong offset, SeekOrigin origin) + { + return dataFs.Seek((long)offset, origin); + } + public long Seek(long offset, SeekOrigin origin) { return dataFs.Seek(offset, origin); diff --git a/DiscImageChef.Core/Devices/Dumping/ATA.cs b/DiscImageChef.Core/Devices/Dumping/ATA.cs index 52e18cbb..938ac483 100644 --- a/DiscImageChef.Core/Devices/Dumping/ATA.cs +++ b/DiscImageChef.Core/Devices/Dumping/ATA.cs @@ -48,6 +48,7 @@ using DiscImageChef.Filters; using DiscImageChef.ImagePlugins; using DiscImageChef.PartPlugins; using Schemas; +using Extents; namespace DiscImageChef.Core.Devices.Dumping { @@ -166,7 +167,6 @@ namespace DiscImageChef.Core.Devices.Dumping double currentSpeed = 0; double maxSpeed = double.MinValue; double minSpeed = double.MaxValue; - List unreadableSectors = new List(); Checksum dataChk; aborted = false; @@ -205,6 +205,13 @@ namespace DiscImageChef.Core.Devices.Dumping byte heads = ataReader.Heads; byte sectors = ataReader.Sectors; + bool removable = false || (!dev.IsCompactFlash && ataId.GeneralConfiguration.HasFlag(Decoders.ATA.Identify.GeneralConfigurationBit.Removable)); + DumpHardwareType currentTry = null; + ExtentsULong extents = null; + ResumeSupport.Process(ataReader.IsLBA, removable, blocks, dev.Manufacturer, dev.Model, dev.Serial, dev.PlatformID, ref resume, ref currentTry, ref extents); + if(currentTry == null || extents == null) + throw new Exception("Could not process resume file, not continuing..."); + if(ataReader.IsLBA) { DicConsole.WriteLine("Reading {0} sectors at a time.", blocksToRead); @@ -212,12 +219,16 @@ namespace DiscImageChef.Core.Devices.Dumping mhddLog = new MHDDLog(outputPrefix + ".mhddlog.bin", dev, blocks, blockSize, blocksToRead); ibgLog = new IBGLog(outputPrefix + ".ibg", currentProfile); dumpFile = new DataFile(outputPrefix + ".bin"); + dumpFile.Seek(resume.NextBlock, blockSize); start = DateTime.UtcNow; - for(ulong i = 0; i < blocks; i += blocksToRead) + for(ulong i = resume.NextBlock; i < blocks; i += blocksToRead) { if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } if((blocks - i) < blocksToRead) blocksToRead = (byte)(blocks - i); @@ -238,11 +249,12 @@ namespace DiscImageChef.Core.Devices.Dumping mhddLog.Write(i, duration); ibgLog.Write(i, currentSpeed * 1024); dumpFile.Write(cmdBuf); + extents.Add(i, blocksToRead, true); } else { for(ulong b = i; b < i + blocksToRead; b++) - unreadableSectors.Add(b); + resume.BadBlocks.Add(b); if(duration < 500) mhddLog.Write(i, 65535); else @@ -256,6 +268,7 @@ namespace DiscImageChef.Core.Devices.Dumping currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (duration / (double)1000); #pragma warning restore IDE0004 // Cast is necessary, otherwise incorrect value is created GC.Collect(); + resume.NextBlock = i + blocksToRead; } end = DateTime.Now; DicConsole.WriteLine(); @@ -265,30 +278,22 @@ namespace DiscImageChef.Core.Devices.Dumping #pragma warning restore IDE0004 // Cast is necessary, otherwise incorrect value is created #region Error handling - if(unreadableSectors.Count > 0 && !aborted) + if(resume.BadBlocks.Count > 0 && !aborted) { - List tmpList = new List(); - - foreach(ulong ur in unreadableSectors) - { - for(ulong i = ur; i < ur + blocksToRead; i++) - tmpList.Add(i); - } - - tmpList.Sort(); int pass = 0; bool forward = true; bool runningPersistent = false; - unreadableSectors = tmpList; - repeatRetryLba: - ulong[] tmpArray = unreadableSectors.ToArray(); + ulong[] tmpArray = resume.BadBlocks.ToArray(); foreach(ulong badSector in tmpArray) { if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } DicConsole.Write("\rRetrying sector {0}, pass {1}, {3}{2}", badSector, pass + 1, forward ? "forward" : "reverse", runningPersistent ? "recovering partial data, " : ""); @@ -298,25 +303,28 @@ namespace DiscImageChef.Core.Devices.Dumping if(!error) { - unreadableSectors.Remove(badSector); + resume.BadBlocks.Remove(badSector); + extents.Add(badSector); dumpFile.WriteAt(cmdBuf, badSector, blockSize); } else if(runningPersistent) dumpFile.WriteAt(cmdBuf, badSector, blockSize); } - if(pass < retryPasses && !aborted && unreadableSectors.Count > 0) + if(pass < retryPasses && !aborted && resume.BadBlocks.Count > 0) { pass++; forward = !forward; - unreadableSectors.Sort(); - unreadableSectors.Reverse(); + resume.BadBlocks.Sort(); + resume.BadBlocks.Reverse(); goto repeatRetryLba; } DicConsole.WriteLine(); } #endregion Error handling LBA + + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); } else { @@ -334,7 +342,10 @@ namespace DiscImageChef.Core.Devices.Dumping for(byte Sc = 1; Sc < sectors; Sc++) { if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } #pragma warning disable RECS0018 // Comparison of floating point numbers with equality operator if(currentSpeed > maxSpeed && currentSpeed != 0) @@ -354,10 +365,11 @@ namespace DiscImageChef.Core.Devices.Dumping mhddLog.Write(currentBlock, duration); ibgLog.Write(currentBlock, currentSpeed * 1024); dumpFile.Write(cmdBuf); + extents.Add(currentBlock); } else { - unreadableSectors.Add(currentBlock); + resume.BadBlocks.Add(currentBlock); if(duration < 500) mhddLog.Write(currentBlock, 65535); else @@ -567,13 +579,9 @@ namespace DiscImageChef.Core.Devices.Dumping DicConsole.WriteLine("Avegare speed: {0:F3} MiB/sec.", (((double)blockSize * (double)(blocks + 1)) / 1048576) / (totalDuration / 1000)); DicConsole.WriteLine("Fastest speed burst: {0:F3} MiB/sec.", maxSpeed); DicConsole.WriteLine("Slowest speed burst: {0:F3} MiB/sec.", minSpeed); - DicConsole.WriteLine("{0} sectors could not be read.", unreadableSectors.Count); - if(unreadableSectors.Count > 0) - { - unreadableSectors.Sort(); - foreach(ulong bad in unreadableSectors) - DicConsole.WriteLine("Sector {0} could not be read", bad); - } + DicConsole.WriteLine("{0} sectors could not be read.", resume.BadBlocks.Count); + if(resume.BadBlocks.Count > 0) + resume.BadBlocks.Sort(); DicConsole.WriteLine(); if(!aborted) diff --git a/DiscImageChef.Core/Devices/Dumping/CompactDisc.cs b/DiscImageChef.Core/Devices/Dumping/CompactDisc.cs index 00c76508..89fb8f5e 100644 --- a/DiscImageChef.Core/Devices/Dumping/CompactDisc.cs +++ b/DiscImageChef.Core/Devices/Dumping/CompactDisc.cs @@ -45,11 +45,13 @@ using DiscImageChef.Devices; using Schemas; using System.Linq; using DiscImageChef.Decoders.CD; +using Extents; namespace DiscImageChef.Core.Devices.Dumping { internal class CompactDisc { + // TODO: Add support for resume file internal static void Dump(Device dev, string devicePath, string outputPrefix, ushort retryPasses, bool force, bool dumpRaw, bool persistent, bool stopOnError, ref CICMMetadataType sidecar, ref MediaType dskType, bool separateSubchannel, ref Metadata.Resume resume) { MHDDLog mhddLog; @@ -68,7 +70,6 @@ namespace DiscImageChef.Core.Devices.Dumping double currentSpeed = 0; double maxSpeed = double.MinValue; double minSpeed = double.MaxValue; - List unreadableSectors = new List(); Checksum dataChk; bool readcd = false; byte[] readBuffer; @@ -377,6 +378,12 @@ namespace DiscImageChef.Core.Devices.Dumping DicConsole.WriteLine("Using MMC READ CD command."); } + DumpHardwareType currentTry = null; + ExtentsULong extents = null; + ResumeSupport.Process(true, true, blocks, dev.Manufacturer, dev.Model, dev.Serial, dev.PlatformID, ref resume, ref currentTry, ref extents); + if(currentTry == null || extents == null) + throw new Exception("Could not process resume file, not continuing..."); + DicConsole.WriteLine("Trying to read Lead-In..."); bool gotLeadIn = false; int leadInSectorsGood = 0, leadInSectorsTotal = 0; @@ -388,7 +395,7 @@ namespace DiscImageChef.Core.Devices.Dumping readBuffer = null; - for(int leadInBlock = -150; leadInBlock < 0; leadInBlock++) + for(int leadInBlock = -150; leadInBlock < 0 && resume.NextBlock == 0; leadInBlock++) { if(aborted) break; @@ -434,7 +441,7 @@ namespace DiscImageChef.Core.Devices.Dumping { sidecar.OpticalDisc[0].LeadIn = new BorderType[] { - sidecar.OpticalDisc[0].LeadIn[0] = new BorderType + new BorderType { Image = outputPrefix + ".leadin.bin", Checksums = dataChk.End().ToArray(), @@ -477,6 +484,10 @@ namespace DiscImageChef.Core.Devices.Dumping mhddLog = new MHDDLog(outputPrefix + ".mhddlog.bin", dev, blocks, blockSize, blocksToRead); ibgLog = new IBGLog(outputPrefix + ".ibg", 0x0008); + dumpFile.Seek(resume.NextBlock, (ulong)sectorSize); + if(separateSubchannel) + subFile.Seek(resume.NextBlock, subSize); + start = DateTime.UtcNow; for(int t = 0; t < tracks.Count(); t++) { @@ -511,10 +522,13 @@ namespace DiscImageChef.Core.Devices.Dumping bool checkedDataFormat = false; - for(ulong i = (ulong)tracks[t].StartSector; i <= (ulong)tracks[t].EndSector; i += blocksToRead) + for(ulong i = resume.NextBlock; i <= (ulong)tracks[t].EndSector; i += blocksToRead) { if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } double cmdDuration = 0; @@ -541,6 +555,7 @@ namespace DiscImageChef.Core.Devices.Dumping { mhddLog.Write(i, cmdDuration); ibgLog.Write(i, currentSpeed * 1024); + extents.Add(i, blocksToRead, true); if(separateSubchannel) { for(int b = 0; b < blocksToRead; b++) @@ -567,11 +582,9 @@ namespace DiscImageChef.Core.Devices.Dumping else dumpFile.Write(new byte[blockSize * blocksToRead]); - // TODO: Record error on mapfile - errored += blocksToRead; for(ulong b = i; b < i + blocksToRead; b++) - unreadableSectors.Add(b); + resume.BadBlocks.Add(b); DicConsole.DebugWriteLine("Dump-Media", "READ error:\n{0}", Decoders.SCSI.Sense.PrettifySense(senseBuf)); if(cmdDuration < 500) mhddLog.Write(i, 65535); @@ -608,6 +621,7 @@ namespace DiscImageChef.Core.Devices.Dumping #pragma warning disable IDE0004 // Remove Unnecessary Cast currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000); #pragma warning restore IDE0004 // Remove Unnecessary Cast + resume.NextBlock = i + blocksToRead; } } DicConsole.WriteLine(); @@ -618,30 +632,21 @@ namespace DiscImageChef.Core.Devices.Dumping #pragma warning restore IDE0004 // Remove Unnecessary Cast #region Compact Disc Error handling - if(unreadableSectors.Count > 0 && !aborted) + if(resume.BadBlocks.Count > 0 && !aborted) { - List tmpList = new List(); - - foreach(ulong ur in unreadableSectors) - { - for(ulong i = ur; i < ur + blocksToRead; i++) - tmpList.Add(i); - } - - tmpList.Sort(); - int pass = 0; bool forward = true; bool runningPersistent = false; - unreadableSectors = tmpList; - cdRepeatRetry: - ulong[] tmpArray = unreadableSectors.ToArray(); + ulong[] tmpArray = resume.BadBlocks.ToArray(); foreach(ulong badSector in tmpArray) { if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } double cmdDuration = 0; @@ -657,7 +662,10 @@ namespace DiscImageChef.Core.Devices.Dumping if((!sense && !dev.Error) || runningPersistent) { if(!sense && !dev.Error) - unreadableSectors.Remove(badSector); + { + resume.BadBlocks.Remove(badSector); + extents.Add(badSector); + } if(separateSubchannel) { @@ -669,12 +677,12 @@ namespace DiscImageChef.Core.Devices.Dumping } } - if(pass < retryPasses && !aborted && unreadableSectors.Count > 0) + if(pass < retryPasses && !aborted && resume.BadBlocks.Count > 0) { pass++; forward = !forward; - unreadableSectors.Sort(); - unreadableSectors.Reverse(); + resume.BadBlocks.Sort(); + resume.BadBlocks.Reverse(); goto cdRepeatRetry; } @@ -756,6 +764,8 @@ namespace DiscImageChef.Core.Devices.Dumping DicConsole.WriteLine(); } #endregion Compact Disc Error handling + resume.BadBlocks.Sort(); + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); dataChk = new Checksum(); dumpFile.Seek(0, SeekOrigin.Begin); @@ -821,20 +831,7 @@ namespace DiscImageChef.Core.Devices.Dumping // TODO: Correct this sidecar.OpticalDisc[0].Checksums = dataChk.End().ToArray(); - sidecar.OpticalDisc[0].DumpHardwareArray = new DumpHardwareType[1]; - sidecar.OpticalDisc[0].DumpHardwareArray[0] = new DumpHardwareType - { - Extents = new ExtentType[1] - }; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Extents[0] = new ExtentType - { - Start = 0, - End = blocks - 1 - }; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Manufacturer = dev.Manufacturer; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Model = dev.Model; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Revision = dev.Revision; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Software = Version.GetSoftwareType(dev.PlatformID); + sidecar.OpticalDisc[0].DumpHardwareArray = resume.Tries.ToArray(); sidecar.OpticalDisc[0].Image = new ImageType { format = "Raw disk image (sector by sector copy)", @@ -847,6 +844,20 @@ namespace DiscImageChef.Core.Devices.Dumping Metadata.MediaType.MediaTypeToString(dskType, out string xmlDskTyp, out string xmlDskSubTyp); sidecar.OpticalDisc[0].DiscType = xmlDskTyp; sidecar.OpticalDisc[0].DiscSubType = xmlDskSubTyp; + + if(!aborted) + { + DicConsole.WriteLine("Writing metadata sidecar"); + + FileStream xmlFs = new FileStream(outputPrefix + ".cicm.xml", + FileMode.Create); + + System.Xml.Serialization.XmlSerializer xmlSer = new System.Xml.Serialization.XmlSerializer(typeof(CICMMetadataType)); + xmlSer.Serialize(xmlFs, sidecar); + xmlFs.Close(); + } + + Statistics.AddMedia(dskType, true); } } } diff --git a/DiscImageChef.Core/Devices/Dumping/ResumeSupport.cs b/DiscImageChef.Core/Devices/Dumping/ResumeSupport.cs index f047600d..a6c6bfe3 100644 --- a/DiscImageChef.Core/Devices/Dumping/ResumeSupport.cs +++ b/DiscImageChef.Core/Devices/Dumping/ResumeSupport.cs @@ -35,13 +35,94 @@ // Copyright (C) 2011-2015 Claunia.com // ****************************************************************************/ // //$Id$ -using System; +using System; +using System.Collections.Generic; +using DiscImageChef.Metadata; +using Extents; +using Schemas; + namespace DiscImageChef.Core.Devices.Dumping { - public class ResumeSupport + public static class ResumeSupport { - public ResumeSupport() + public static void Process(bool isLba, bool removable, ulong blocks, string Manufacturer, string Model, string Serial, Interop.PlatformID platform, ref Resume resume, ref DumpHardwareType currentTry, ref ExtentsULong extents) { + if(resume != null) + { + if(!isLba) + throw new NotImplementedException("Resuming CHS devices is currently not supported."); + + if(resume.Removable != removable) + throw new Exception(string.Format("Resume file specifies a {0} device but you're requesting to dump a {1} device, not continuing...", + resume.Removable ? "removable" : "non removable", + removable ? "removable" : "non removable")); + + if(resume.LastBlock != blocks - 1) + throw new Exception(string.Format("Resume file specifies a device with {0} blocks but you're requesting to dump one with {1} blocks, not continuing...", + resume.LastBlock + 1, blocks)); + + + foreach(DumpHardwareType oldtry in resume.Tries) + { + if(oldtry.Manufacturer != Manufacturer && !removable) + throw new Exception(string.Format("Resume file specifies a device manufactured by {0} but you're requesting to dump one by {1}, not continuing...", + oldtry.Manufacturer, Manufacturer)); + + if(oldtry.Model != Model && !removable) + throw new Exception(string.Format("Resume file specifies a device model {0} but you're requesting to dump model {1}, not continuing...", + oldtry.Model, Model)); + + if(oldtry.Serial != Serial && !removable) + throw new Exception(string.Format("Resume file specifies a device with serial {0} but you're requesting to dump one with serial {1}, not continuing...", + oldtry.Serial, Serial)); + + if(oldtry.Software == null) + throw new Exception("Found corrupt resume file, cannot continue..."); + + if(oldtry.Software.Name == "DiscImageChef" && oldtry.Software.OperatingSystem == platform.ToString() && oldtry.Software.Version == Version.GetVersion()) + { + if(removable && (oldtry.Manufacturer != Manufacturer || oldtry.Model != Model || oldtry.Serial != Serial)) + continue; + + currentTry = oldtry; + extents = ExtentsConverter.FromMetadata(currentTry.Extents); + break; + } + } + + if(currentTry == null) + { + currentTry = new DumpHardwareType + { + Software = Version.GetSoftwareType(platform), + Manufacturer = Manufacturer, + Model = Model, + Serial = Serial, + }; + resume.Tries.Add(currentTry); + extents = new ExtentsULong(); + } + } + else + { + resume = new Resume + { + Tries = new List(), + CreationDate = DateTime.UtcNow, + BadBlocks = new List(), + LastBlock = blocks - 1 + }; + currentTry = new DumpHardwareType + { + Software = Version.GetSoftwareType(platform), + Manufacturer = Manufacturer, + Model = Model, + Serial = Serial + }; + resume.Tries.Add(currentTry); + extents = new ExtentsULong(); + resume.Removable = removable; + } } } } diff --git a/DiscImageChef.Core/Devices/Dumping/SBC.cs b/DiscImageChef.Core/Devices/Dumping/SBC.cs index b782f327..9357849d 100644 --- a/DiscImageChef.Core/Devices/Dumping/SBC.cs +++ b/DiscImageChef.Core/Devices/Dumping/SBC.cs @@ -47,6 +47,7 @@ using DiscImageChef.Filters; using DiscImageChef.ImagePlugins; using DiscImageChef.PartPlugins; using Schemas; +using Extents; namespace DiscImageChef.Core.Devices.Dumping { @@ -75,7 +76,6 @@ namespace DiscImageChef.Core.Devices.Dumping double currentSpeed = 0; double maxSpeed = double.MinValue; double minSpeed = double.MaxValue; - List unreadableSectors = new List(); Checksum dataChk; byte[] readBuffer; uint blocksToRead = 64; @@ -102,6 +102,12 @@ namespace DiscImageChef.Core.Devices.Dumping DicConsole.WriteLine("Media has {0} blocks of {1} bytes/each. (for a total of {2} bytes)", blocks, blockSize, blocks * (ulong)blockSize); } + // Check how many blocks to read, if error show and return + if(scsiReader.GetBlocksToRead()) + { + DicConsole.ErrorWriteLine(scsiReader.ErrorMessage); + return; + } blocksToRead = scsiReader.BlocksToRead; logicalBlockSize = blockSize; physicalBlockSize = scsiReader.PhysicalBlockSize; @@ -315,10 +321,20 @@ namespace DiscImageChef.Core.Devices.Dumping readBuffer = null; - for(ulong i = 0; i < blocks; i += blocksToRead) + DumpHardwareType currentTry = null; + ExtentsULong extents = null; + ResumeSupport.Process(true, dev.IsRemovable, blocks, dev.Manufacturer, dev.Model, dev.Serial, dev.PlatformID, ref resume, ref currentTry, ref extents); + if(currentTry == null || extents == null) + throw new Exception("Could not process resume file, not continuing..."); + dumpFile.Seek(resume.NextBlock, blockSize); + + for(ulong i = resume.NextBlock; i < blocks; i += blocksToRead) { if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } if((blocks - i) < blocksToRead) blocksToRead = (uint)(blocks - i); @@ -340,6 +356,7 @@ namespace DiscImageChef.Core.Devices.Dumping mhddLog.Write(i, cmdDuration); ibgLog.Write(i, currentSpeed * 1024); dumpFile.Write(readBuffer); + extents.Add(i, blocksToRead, true); } else { @@ -350,11 +367,9 @@ namespace DiscImageChef.Core.Devices.Dumping // Write empty data dumpFile.Write(new byte[blockSize * blocksToRead]); - // TODO: Record error on mapfile - errored += blocksToRead; for(ulong b = i; b < i + blocksToRead; b++) - unreadableSectors.Add(b); + resume.BadBlocks.Add(b); if(cmdDuration < 500) mhddLog.Write(i, 65535); else @@ -366,6 +381,7 @@ namespace DiscImageChef.Core.Devices.Dumping #pragma warning disable IDE0004 // Remove Unnecessary Cast currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000); #pragma warning restore IDE0004 // Remove Unnecessary Cast + resume.NextBlock = i + blocksToRead; } end = DateTime.UtcNow; DicConsole.WriteLine(); @@ -375,30 +391,21 @@ namespace DiscImageChef.Core.Devices.Dumping #pragma warning restore IDE0004 // Remove Unnecessary Cast #region Error handling - if(unreadableSectors.Count > 0 && !aborted) + if(resume.BadBlocks.Count > 0 && !aborted) { - List tmpList = new List(); - - foreach(ulong ur in unreadableSectors) - { - for(ulong i = ur; i < ur + blocksToRead; i++) - tmpList.Add(i); - } - - tmpList.Sort(); - int pass = 0; bool forward = true; bool runningPersistent = false; - unreadableSectors = tmpList; - repeatRetry: - ulong[] tmpArray = unreadableSectors.ToArray(); + ulong[] tmpArray = resume.BadBlocks.ToArray(); foreach(ulong badSector in tmpArray) { if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } DicConsole.Write("\rRetrying sector {0}, pass {1}, {3}{2}", badSector, pass + 1, forward ? "forward" : "reverse", runningPersistent ? "recovering partial data, " : ""); @@ -407,19 +414,20 @@ namespace DiscImageChef.Core.Devices.Dumping if(!sense && !dev.Error) { - unreadableSectors.Remove(badSector); + resume.BadBlocks.Remove(badSector); + extents.Add(badSector); dumpFile.WriteAt(readBuffer, badSector, blockSize); } else if(runningPersistent) dumpFile.WriteAt(readBuffer, badSector, blockSize); } - if(pass < retryPasses && !aborted && unreadableSectors.Count > 0) + if(pass < retryPasses && !aborted && resume.BadBlocks.Count > 0) { pass++; forward = !forward; - unreadableSectors.Sort(); - unreadableSectors.Reverse(); + resume.BadBlocks.Sort(); + resume.BadBlocks.Reverse(); goto repeatRetry; } @@ -535,6 +543,8 @@ namespace DiscImageChef.Core.Devices.Dumping DicConsole.WriteLine(); } #endregion Error handling + resume.BadBlocks.Sort(); + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); dataChk = new Checksum(); dumpFile.Seek(0, SeekOrigin.Begin); @@ -700,20 +710,7 @@ namespace DiscImageChef.Core.Devices.Dumping if(opticalDisc) { sidecar.OpticalDisc[0].Checksums = dataChk.End().ToArray(); - sidecar.OpticalDisc[0].DumpHardwareArray = new DumpHardwareType[1]; - sidecar.OpticalDisc[0].DumpHardwareArray[0] = new DumpHardwareType - { - Extents = new ExtentType[1] - }; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Extents[0] = new ExtentType - { - Start = 0, - End = blocks - 1 - }; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Manufacturer = dev.Manufacturer; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Model = dev.Model; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Revision = dev.Revision; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Software = Version.GetSoftwareType(dev.PlatformID); + sidecar.OpticalDisc[0].DumpHardwareArray = resume.Tries.ToArray(); sidecar.OpticalDisc[0].Image = new ImageType { format = "Raw disk image (sector by sector copy)", @@ -822,22 +819,7 @@ namespace DiscImageChef.Core.Devices.Dumping sidecar.BlockMedia[0].FileSystemInformation = xmlFileSysInfo; if(dev.IsRemovable) - { - sidecar.BlockMedia[0].DumpHardwareArray = new DumpHardwareType[1]; - sidecar.BlockMedia[0].DumpHardwareArray[0] = new DumpHardwareType - { - Extents = new ExtentType[1] - }; - sidecar.BlockMedia[0].DumpHardwareArray[0].Extents[0] = new ExtentType - { - Start = 0, - End = blocks - 1 - }; - sidecar.BlockMedia[0].DumpHardwareArray[0].Manufacturer = dev.Manufacturer; - sidecar.BlockMedia[0].DumpHardwareArray[0].Model = dev.Model; - sidecar.BlockMedia[0].DumpHardwareArray[0].Revision = dev.Revision; - sidecar.BlockMedia[0].DumpHardwareArray[0].Software = Version.GetSoftwareType(dev.PlatformID); - } + sidecar.BlockMedia[0].DumpHardwareArray = resume.Tries.ToArray(); } DicConsole.WriteLine(); @@ -848,13 +830,7 @@ namespace DiscImageChef.Core.Devices.Dumping #pragma warning restore IDE0004 // Cast is necessary, otherwise incorrect value is created DicConsole.WriteLine("Fastest speed burst: {0:F3} MiB/sec.", maxSpeed); DicConsole.WriteLine("Slowest speed burst: {0:F3} MiB/sec.", minSpeed); - DicConsole.WriteLine("{0} sectors could not be read.", unreadableSectors.Count); - if(unreadableSectors.Count > 0) - { - unreadableSectors.Sort(); - foreach(ulong bad in unreadableSectors) - DicConsole.WriteLine("Sector {0} could not be read", bad); - } + DicConsole.WriteLine("{0} sectors could not be read.", resume.BadBlocks.Count); DicConsole.WriteLine(); if(!aborted) diff --git a/DiscImageChef.Core/Devices/Dumping/SCSI.cs b/DiscImageChef.Core/Devices/Dumping/SCSI.cs index 8c9a39e7..6fc22ee1 100644 --- a/DiscImageChef.Core/Devices/Dumping/SCSI.cs +++ b/DiscImageChef.Core/Devices/Dumping/SCSI.cs @@ -138,28 +138,13 @@ namespace DiscImageChef.Core.Devices.Dumping return; } - FileStream xmlFs = new FileStream(outputPrefix + ".cicm.xml", - FileMode.Create); - System.Xml.Serialization.XmlSerializer xmlSer = new System.Xml.Serialization.XmlSerializer(typeof(CICMMetadataType)); - if(dev.SCSIType == Decoders.SCSI.PeripheralDeviceTypes.MultiMediaDevice) { MMC.Dump(dev, devicePath, outputPrefix, retryPasses, force, dumpRaw, persistent, stopOnError, ref sidecar, ref dskType, separateSubchannel, ref resume); - - DicConsole.WriteLine("Writing metadata sidecar"); - - xmlSer.Serialize(xmlFs, sidecar); - xmlFs.Close(); - return; } SBC.Dump(dev, devicePath, outputPrefix, retryPasses, force, dumpRaw, persistent, stopOnError, ref sidecar, ref dskType, false, ref resume); - - DicConsole.WriteLine("Writing metadata sidecar"); - - xmlSer.Serialize(xmlFs, sidecar); - xmlFs.Close(); } } } diff --git a/DiscImageChef.Core/Devices/Dumping/XGD.cs b/DiscImageChef.Core/Devices/Dumping/XGD.cs index af75a36b..474c4d6f 100644 --- a/DiscImageChef.Core/Devices/Dumping/XGD.cs +++ b/DiscImageChef.Core/Devices/Dumping/XGD.cs @@ -46,6 +46,7 @@ using DiscImageChef.Filesystems; using DiscImageChef.Filters; using DiscImageChef.ImagePlugins; using DiscImageChef.PartPlugins; +using Extents; using Schemas; namespace DiscImageChef.Core.Devices.Dumping @@ -68,7 +69,6 @@ namespace DiscImageChef.Core.Devices.Dumping double currentSpeed = 0; double maxSpeed = double.MinValue; double minSpeed = double.MaxValue; - List unreadableSectors = new List(); Checksum dataChk; DataFile dumpFile = null; bool aborted = false; @@ -218,12 +218,28 @@ namespace DiscImageChef.Core.Devices.Dumping readBuffer = null; - ulong currentSector = 0; double cmdDuration = 0; uint saveBlocksToRead = blocksToRead; + DumpHardwareType currentTry = null; + ExtentsULong extents = null; + ResumeSupport.Process(true, true, totalSize, dev.Manufacturer, dev.Model, dev.Serial, dev.PlatformID, ref resume, ref currentTry, ref extents); + if(currentTry == null || extents == null) + throw new Exception("Could not process resume file, not continuing..."); + ulong currentSector = resume.NextBlock; + dumpFile.Seek(resume.NextBlock, blockSize); for(int e = 0; e <= 16; e++) { + if(aborted) + { + resume.NextBlock = currentSector; + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); + break; + } + + if(currentSector >= blocks) + break; + ulong extentStart, extentEnd; // Extents if(e < 16) @@ -244,11 +260,18 @@ namespace DiscImageChef.Core.Devices.Dumping extentEnd = blocks; } + if(currentSector > extentEnd) + continue; + for(ulong i = currentSector; i < extentStart; i += blocksToRead) { saveBlocksToRead = blocksToRead; + if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } if((extentStart - i) < blocksToRead) blocksToRead = (uint)(extentStart - i); @@ -270,6 +293,7 @@ namespace DiscImageChef.Core.Devices.Dumping mhddLog.Write(i, cmdDuration); ibgLog.Write(i, currentSpeed * 1024); dumpFile.Write(readBuffer); + extents.Add(i, blocksToRead, true); } else { @@ -280,11 +304,9 @@ namespace DiscImageChef.Core.Devices.Dumping // Write empty data dumpFile.Write(new byte[blockSize * blocksToRead]); - // TODO: Record error on mapfile - errored += blocksToRead; for(ulong b = i; b < i + blocksToRead; b++) - unreadableSectors.Add(b); + resume.BadBlocks.Add(b); DicConsole.DebugWriteLine("Dump-Media", "READ error:\n{0}", Decoders.SCSI.Sense.PrettifySense(senseBuf)); if(cmdDuration < 500) mhddLog.Write(i, 65535); @@ -298,13 +320,18 @@ namespace DiscImageChef.Core.Devices.Dumping currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000); #pragma warning restore IDE0004 // Remove Unnecessary Cast blocksToRead = saveBlocksToRead; + currentSector = i + 1; + resume.NextBlock = currentSector; } for(ulong i = extentStart; i <= extentEnd; i += blocksToRead) { saveBlocksToRead = blocksToRead; if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } if((extentEnd - i) < blocksToRead) blocksToRead = (uint)(extentEnd - i) + 1; @@ -313,18 +340,23 @@ namespace DiscImageChef.Core.Devices.Dumping ibgLog.Write(i, currentSpeed * 1024); dumpFile.Write(new byte[blocksToRead * 2048]); blocksToRead = saveBlocksToRead; + extents.Add(i, blocksToRead, true); + currentSector = i + 1; + resume.NextBlock = currentSector; } - currentSector = extentEnd + 1; - if(currentSector >= blocks) - break; + if(!aborted) + currentSector = extentEnd + 1; } // Middle Zone D - for(ulong middle = 0; middle < (middleZone - 1); middle += blocksToRead) + for(ulong middle = currentSector - blocks - 1; middle < (middleZone - 1); middle += blocksToRead) { if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } if(((middleZone - 1) - middle) < blocksToRead) blocksToRead = (uint)((middleZone - 1) - middle); @@ -334,8 +366,10 @@ namespace DiscImageChef.Core.Devices.Dumping mhddLog.Write(middle + currentSector, cmdDuration); ibgLog.Write(middle + currentSector, currentSpeed * 1024); dumpFile.Write(new byte[blockSize * blocksToRead]); + extents.Add(currentSector, blocksToRead, true); currentSector += blocksToRead; + resume.NextBlock = currentSector; } blocksToRead = saveBlocksToRead; @@ -354,10 +388,13 @@ namespace DiscImageChef.Core.Devices.Dumping } // Video Layer 1 - for(ulong l1 = l0Video; l1 < (l0Video + l1Video); l1 += blocksToRead) + for(ulong l1 = currentSector - blocks - middleZone + l0Video; l1 < (l0Video + l1Video); l1 += blocksToRead) { if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } if(((l0Video + l1Video) - l1) < blocksToRead) blocksToRead = (uint)((l0Video + l1Video) - l1); @@ -379,6 +416,7 @@ namespace DiscImageChef.Core.Devices.Dumping mhddLog.Write(currentSector, cmdDuration); ibgLog.Write(currentSector, currentSpeed * 1024); dumpFile.Write(readBuffer); + extents.Add(currentSector, blocksToRead, true); } else { @@ -389,11 +427,9 @@ namespace DiscImageChef.Core.Devices.Dumping // Write empty data dumpFile.Write(new byte[blockSize * blocksToRead]); - // TODO: Record error on mapfile - // TODO: Handle errors in video partition //errored += blocksToRead; - //unreadableSectors.Add(l1); + //resume.BadBlocks.Add(l1); DicConsole.DebugWriteLine("Dump-Media", "READ error:\n{0}", Decoders.SCSI.Sense.PrettifySense(senseBuf)); if(cmdDuration < 500) mhddLog.Write(l1, 65535); @@ -407,6 +443,7 @@ namespace DiscImageChef.Core.Devices.Dumping currentSpeed = ((double)blockSize * blocksToRead / (double)1048576) / (cmdDuration / (double)1000); #pragma warning restore IDE0004 // Remove Unnecessary Cast currentSector += blocksToRead; + resume.NextBlock = currentSector; } sense = dev.KreonUnlockWxripper(out senseBuf, dev.Timeout, out duration); @@ -430,11 +467,11 @@ namespace DiscImageChef.Core.Devices.Dumping #pragma warning restore IDE0004 // Remove Unnecessary Cast #region Error handling - if(unreadableSectors.Count > 0 && !aborted) + if(resume.BadBlocks.Count > 0 && !aborted) { List tmpList = new List(); - foreach(ulong ur in unreadableSectors) + foreach(ulong ur in resume.BadBlocks) { for(ulong i = ur; i < ur + blocksToRead; i++) tmpList.Add(i); @@ -446,14 +483,17 @@ namespace DiscImageChef.Core.Devices.Dumping bool forward = true; bool runningPersistent = false; - unreadableSectors = tmpList; + resume.BadBlocks = tmpList; repeatRetry: - ulong[] tmpArray = unreadableSectors.ToArray(); + ulong[] tmpArray = resume.BadBlocks.ToArray(); foreach(ulong badSector in tmpArray) { if(aborted) + { + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); break; + } cmdDuration = 0; @@ -464,19 +504,20 @@ namespace DiscImageChef.Core.Devices.Dumping if(!sense && !dev.Error) { - unreadableSectors.Remove(badSector); + resume.BadBlocks.Remove(badSector); + extents.Add(badSector); dumpFile.WriteAt(readBuffer, badSector, blockSize); } else if(runningPersistent) dumpFile.WriteAt(readBuffer, badSector, blockSize); } - if(pass < retryPasses && !aborted && unreadableSectors.Count > 0) + if(pass < retryPasses && !aborted && resume.BadBlocks.Count > 0) { pass++; forward = !forward; - unreadableSectors.Sort(); - unreadableSectors.Reverse(); + resume.BadBlocks.Sort(); + resume.BadBlocks.Reverse(); goto repeatRetry; } @@ -592,6 +633,8 @@ namespace DiscImageChef.Core.Devices.Dumping DicConsole.WriteLine(); } #endregion Error handling + resume.BadBlocks.Sort(); + currentTry.Extents = Metadata.ExtentsConverter.ToMetadata(extents); dataChk = new Checksum(); dumpFile.Seek(0, SeekOrigin.Begin); @@ -757,20 +800,7 @@ namespace DiscImageChef.Core.Devices.Dumping } sidecar.OpticalDisc[0].Checksums = dataChk.End().ToArray(); - sidecar.OpticalDisc[0].DumpHardwareArray = new DumpHardwareType[1]; - sidecar.OpticalDisc[0].DumpHardwareArray[0] = new DumpHardwareType - { - Extents = new ExtentType[1] - }; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Extents[0] = new ExtentType - { - Start = 0, - End = blocks - 1 - }; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Manufacturer = dev.Manufacturer; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Model = dev.Model; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Revision = dev.Revision; - sidecar.OpticalDisc[0].DumpHardwareArray[0].Software = Version.GetSoftwareType(dev.PlatformID); + sidecar.OpticalDisc[0].DumpHardwareArray = resume.Tries.ToArray(); sidecar.OpticalDisc[0].Image = new ImageType { format = "Raw disk image (sector by sector copy)", @@ -816,6 +846,20 @@ namespace DiscImageChef.Core.Devices.Dumping Metadata.MediaType.MediaTypeToString(dskType, out string xmlDskTyp, out string xmlDskSubTyp); sidecar.OpticalDisc[0].DiscType = xmlDskTyp; sidecar.OpticalDisc[0].DiscSubType = xmlDskSubTyp; + + if(!aborted) + { + DicConsole.WriteLine("Writing metadata sidecar"); + + FileStream xmlFs = new FileStream(outputPrefix + ".cicm.xml", + FileMode.Create); + + System.Xml.Serialization.XmlSerializer xmlSer = new System.Xml.Serialization.XmlSerializer(typeof(CICMMetadataType)); + xmlSer.Serialize(xmlFs, sidecar); + xmlFs.Close(); + } + + Statistics.AddMedia(dskType, true); } } } diff --git a/DiscImageChef.Core/DiscImageChef.Core.csproj b/DiscImageChef.Core/DiscImageChef.Core.csproj index 5894337d..cebaa192 100644 --- a/DiscImageChef.Core/DiscImageChef.Core.csproj +++ b/DiscImageChef.Core/DiscImageChef.Core.csproj @@ -78,6 +78,7 @@ + diff --git a/DiscImageChef.Metadata/MediaType.cs b/DiscImageChef.Metadata/MediaType.cs index 01918c9a..d998805c 100644 --- a/DiscImageChef.Metadata/MediaType.cs +++ b/DiscImageChef.Metadata/MediaType.cs @@ -502,6 +502,10 @@ namespace DiscImageChef.Metadata DiscType = "5.25\" floppy"; DiscSubType = "Atari double-density"; break; + case CommonTypes.MediaType.ATARI_35_DS_DD: + DiscType = "3.5\" floppy"; + DiscSubType = "Atari ST double-density, double-sided, 10 sectors"; + break; case CommonTypes.MediaType.CBM_1540: case CommonTypes.MediaType.CBM_1540_Ext: DiscType = "5.25\" floppy"; diff --git a/DiscImageChef.Metadata/Resume.cs b/DiscImageChef.Metadata/Resume.cs index 7497a679..47c562b4 100644 --- a/DiscImageChef.Metadata/Resume.cs +++ b/DiscImageChef.Metadata/Resume.cs @@ -40,17 +40,17 @@ namespace DiscImageChef.Metadata [XmlRoot("DicResume", Namespace = "", IsNullable = false)] public class Resume { - [XmlElement(DataType = "date")] + [XmlElement(DataType = "dateTime")] public DateTime CreationDate; - [XmlElement(DataType = "date")] + [XmlElement(DataType = "dateTime")] public DateTime LastWriteDate; public bool Removable; public ulong LastBlock; - public ulong LastTriedBlock; + public ulong NextBlock; [XmlArrayItem("DumpTry")] public List Tries; - [XmlArrayItem("BadBlock")] + [XmlArrayItem("Block")] public List BadBlocks; } } diff --git a/DiscImageChef/ChangeLog b/DiscImageChef/ChangeLog index 4d531d6b..d31b2857 100644 --- a/DiscImageChef/ChangeLog +++ b/DiscImageChef/ChangeLog @@ -1,3 +1,7 @@ +* Options.cs: +* Commands/DumpMedia.cs: + Implemented resume mapfile support. + * Commands/DumpMedia.cs: Added resume parameter passing. diff --git a/DiscImageChef/Commands/DumpMedia.cs b/DiscImageChef/Commands/DumpMedia.cs index b97008be..c5109014 100644 --- a/DiscImageChef/Commands/DumpMedia.cs +++ b/DiscImageChef/Commands/DumpMedia.cs @@ -52,8 +52,6 @@ namespace DiscImageChef.Commands { public static class DumpMedia { - // TODO: Implement dump map - public static void doDumpMedia(DumpMediaOptions options) { DicConsole.DebugWriteLine("Dump-Media command", "--debug={0}", options.Debug); @@ -66,6 +64,7 @@ namespace DiscImageChef.Commands DicConsole.DebugWriteLine("Dump-Media command", "--retry-passes={0}", options.RetryPasses); DicConsole.DebugWriteLine("Dump-Media command", "--persistent={0}", options.Persistent); DicConsole.DebugWriteLine("Dump-Media command", "--separate-subchannel={0}", options.SeparateSubchannel); + DicConsole.DebugWriteLine("Dump-Media command", "--resume={0}", options.Resume); if(!File.Exists(options.DevicePath)) { @@ -90,8 +89,8 @@ namespace DiscImageChef.Commands Core.Statistics.AddDevice(dev); Resume resume = null; - XmlSerializer xs = new XmlSerializer(resume.GetType()); - if(File.Exists(options.OutputPrefix + ".resume.xml")) + XmlSerializer xs = new XmlSerializer(typeof(Resume)); + if(File.Exists(options.OutputPrefix + ".resume.xml") && options.Resume) { try { @@ -106,6 +105,12 @@ namespace DiscImageChef.Commands } } + if(resume != null && resume.NextBlock > resume.LastBlock && resume.BadBlocks.Count == 0) + { + DicConsole.WriteLine("Media already dumped correctly, not continuing..."); + return; + } + switch(dev.Type) { case DeviceType.ATA: @@ -126,8 +131,11 @@ namespace DiscImageChef.Commands throw new NotSupportedException("Unknown device type."); } - if(resume != null) + if(resume != null && options.Resume) { + resume.LastWriteDate = DateTime.UtcNow; + resume.BadBlocks.Sort(); + if(File.Exists(options.OutputPrefix + ".resume.xml")) File.Delete(options.OutputPrefix + ".resume.xml"); diff --git a/DiscImageChef/Options.cs b/DiscImageChef/Options.cs index c8ceba9c..96141508 100644 --- a/DiscImageChef/Options.cs +++ b/DiscImageChef/Options.cs @@ -302,6 +302,10 @@ namespace DiscImageChef [Option("separate-subchannel", Default = false, HelpText = "Save subchannel in a separate file. Only applicable to CD/DDCD/GD.")] public bool SeparateSubchannel { get; set; } + + [Option('m', "resume", Default = true, + HelpText = "Create/use resume mapfile.")] + public bool Resume { get; set; } } [Verb("device-report", HelpText = "Tests the device capabilities and creates an XML report of them.")] diff --git a/TODO b/TODO index f45fe620..f6e53e7a 100644 --- a/TODO +++ b/TODO @@ -31,7 +31,6 @@ Image checksum: --- Optimize and multithread Device handling: ---- Add mapfile support for resuming dumps --- Add support for FreeBSD --- Add support for MMC/SD devices --- Add support for NVMe devices