diff --git a/SabreTools.Helper/Dats/Partials/DatFile.Splitters.cs b/SabreTools.Helper/Dats/Partials/DatFile.Splitters.cs index ee4b1cb1..b4f57e2e 100644 --- a/SabreTools.Helper/Dats/Partials/DatFile.Splitters.cs +++ b/SabreTools.Helper/Dats/Partials/DatFile.Splitters.cs @@ -26,9 +26,10 @@ namespace SabreTools.Helper.Dats /// Parent path for replacement /// List of extensions to split on (first DAT) /// List of extensions to split on (second DAT) + /// Integer representing the maximum amount of parallelization to be used /// Logger object for console and file writing /// True if split succeeded, false otherwise - public bool SplitByExt(string outDir, string basepath, List extA, List extB, Logger logger) + public bool SplitByExt(string outDir, string basepath, List extA, List extB, int maxDegreeOfParallelism, Logger logger) { // Make sure all of the extensions have a dot at the beginning List newExtA = new List(); @@ -115,8 +116,8 @@ namespace SabreTools.Helper.Dats } // Then write out both files - bool success = datdataA.WriteToFile(outDir, logger); - success &= datdataB.WriteToFile(outDir, logger); + bool success = datdataA.WriteToFile(outDir, maxDegreeOfParallelism, logger); + success &= datdataB.WriteToFile(outDir, maxDegreeOfParallelism, logger); return success; } @@ -126,9 +127,10 @@ namespace SabreTools.Helper.Dats /// /// Name of the directory to write the DATs out to /// Parent path for replacement + /// Integer representing the maximum amount of parallelization to be used /// Logger object for console and file writing /// True if split succeeded, false otherwise - public bool SplitByHash(string outDir, string basepath, Logger logger) + public bool SplitByHash(string outDir, string basepath, int maxDegreeOfParallelism, Logger logger) { // Sanitize the basepath to be more predictable basepath = (basepath.EndsWith(Path.DirectorySeparatorChar.ToString()) ? basepath : basepath + Path.DirectorySeparatorChar); @@ -299,10 +301,10 @@ namespace SabreTools.Helper.Dats // Now, output all of the files to the output directory logger.User("DAT information created, outputting new files"); bool success = true; - success &= nodump.WriteToFile(outDir, logger); - success &= sha1.WriteToFile(outDir, logger); - success &= md5.WriteToFile(outDir, logger); - success &= crc.WriteToFile(outDir, logger); + success &= nodump.WriteToFile(outDir, maxDegreeOfParallelism, logger); + success &= sha1.WriteToFile(outDir, maxDegreeOfParallelism, logger); + success &= md5.WriteToFile(outDir, maxDegreeOfParallelism, logger); + success &= crc.WriteToFile(outDir, maxDegreeOfParallelism, logger); return success; } @@ -314,15 +316,16 @@ namespace SabreTools.Helper.Dats /// Parent path for replacement /// True if short names should be used, false otherwise /// True if original filenames should be used as the base for output filename, false otherwise + /// Integer representing the maximum amount of parallelization to be used /// Logger object for console and file writing /// True if split succeeded, false otherwise - public bool SplitByLevel(string outDir, string basepath, bool shortname, bool basedat, Logger logger) + public bool SplitByLevel(string outDir, string basepath, bool shortname, bool basedat, int maxDegreeOfParallelism, Logger logger) { // Sanitize the basepath to be more predictable basepath = (basepath.EndsWith(Path.DirectorySeparatorChar.ToString()) ? basepath : basepath + Path.DirectorySeparatorChar); // First, organize by games so that we can do the right thing - BucketBy(SortedBy.Game, false /* mergeroms */, logger, lower: false, norename: true); + BucketBy(SortedBy.Game, false /* mergeroms */, maxDegreeOfParallelism, logger, lower: false, norename: true); // Create a temporary DAT to add things to DatFile tempDat = new DatFile(this) @@ -341,7 +344,7 @@ namespace SabreTools.Helper.Dats if (tempDat.Name != null && tempDat.Name != Style.GetDirectoryName(key)) { // Process and output the DAT - SplitByLevelHelper(tempDat, outDir, shortname, basedat, logger); + SplitByLevelHelper(tempDat, outDir, shortname, basedat, maxDegreeOfParallelism, logger); // Reset the DAT for the next items tempDat = new DatFile(this) @@ -363,7 +366,7 @@ namespace SabreTools.Helper.Dats } // Then we write the last DAT out since it would be skipped otherwise - SplitByLevelHelper(tempDat, outDir, shortname, basedat, logger); + SplitByLevelHelper(tempDat, outDir, shortname, basedat, maxDegreeOfParallelism, logger); return true; } @@ -394,8 +397,9 @@ namespace SabreTools.Helper.Dats /// Directory to write out to /// True if short naming scheme should be used, false otherwise /// True if original filenames should be used as the base for output filename, false otherwise + /// Integer representing the maximum amount of parallelization to be used /// Logger object for file and console output - private void SplitByLevelHelper(DatFile datFile, string outDir, bool shortname, bool restore, Logger logger) + private void SplitByLevelHelper(DatFile datFile, string outDir, bool shortname, bool restore, int maxDegreeOfParallelism, Logger logger) { // Get the name from the DAT to use separately string name = datFile.Name; @@ -420,7 +424,7 @@ namespace SabreTools.Helper.Dats datFile.Type = null; // Write out the temporary DAT to the proper directory - datFile.WriteToFile(path, logger); + datFile.WriteToFile(path, maxDegreeOfParallelism, logger); } /// @@ -428,9 +432,10 @@ namespace SabreTools.Helper.Dats /// /// Name of the directory to write the DATs out to /// Parent path for replacement + /// Integer representing the maximum amount of parallelization to be used /// Logger object for console and file writing /// True if split succeeded, false otherwise - public bool SplitByType(string outDir, string basepath, Logger logger) + public bool SplitByType(string outDir, string basepath, int maxDegreeOfParallelism, Logger logger) { // Sanitize the basepath to be more predictable basepath = (basepath.EndsWith(Path.DirectorySeparatorChar.ToString()) ? basepath : basepath + Path.DirectorySeparatorChar); @@ -539,9 +544,9 @@ namespace SabreTools.Helper.Dats // Now, output all of the files to the output directory logger.User("DAT information created, outputting new files"); bool success = true; - success &= romdat.WriteToFile(outDir, logger); - success &= diskdat.WriteToFile(outDir, logger); - success &= sampledat.WriteToFile(outDir, logger); + success &= romdat.WriteToFile(outDir, maxDegreeOfParallelism, logger); + success &= diskdat.WriteToFile(outDir, maxDegreeOfParallelism, logger); + success &= sampledat.WriteToFile(outDir, maxDegreeOfParallelism, logger); return success; } diff --git a/SabreTools.Helper/Dats/Partials/DatFile.Statistics.cs b/SabreTools.Helper/Dats/Partials/DatFile.Statistics.cs index da72d34b..d916a8e0 100644 --- a/SabreTools.Helper/Dats/Partials/DatFile.Statistics.cs +++ b/SabreTools.Helper/Dats/Partials/DatFile.Statistics.cs @@ -102,13 +102,14 @@ namespace SabreTools.Helper.Dats /// /// Dictionary representing the outputs /// Set the statistics output format to use + /// Integer representing the maximum amount of parallelization to be used /// Logger object for file and console writing /// True if numbers should be recalculated for the DAT, false otherwise (default) /// Number of games to use, -1 means recalculate games (default) /// True if baddumps should be included in output, false otherwise (default) /// True if nodumps should be included in output, false otherwise (default) - public void OutputStats(Dictionary outputs, StatDatFormat statDatFormat, Logger logger, bool recalculate = false, - long game = -1, bool baddumpCol = false, bool nodumpCol = false) + public void OutputStats(Dictionary outputs, StatDatFormat statDatFormat, int maxDegreeOfParallelism, Logger logger, + bool recalculate = false, long game = -1, bool baddumpCol = false, bool nodumpCol = false) { // If we're supposed to recalculate the statistics, do so if (recalculate) @@ -116,7 +117,7 @@ namespace SabreTools.Helper.Dats RecalculateStats(); } - BucketBy(SortedBy.Game, false /* mergeroms */, logger, norename: true); + BucketBy(SortedBy.Game, false /* mergeroms */, maxDegreeOfParallelism, logger, norename: true); if (TotalSize < 0) { TotalSize = Int64.MaxValue + TotalSize; @@ -262,9 +263,10 @@ namespace SabreTools.Helper.Dats /// True if baddumps should be included in output, false otherwise /// True if nodumps should be included in output, false otherwise /// Set the statistics output format to use + /// Integer representing the maximum amount of parallelization to be used /// Logger object for file and console output public static void OutputStats(List inputs, string reportName, string outDir, bool single, - bool baddumpCol, bool nodumpCol, StatDatFormat statDatFormat, Logger logger) + bool baddumpCol, bool nodumpCol, StatDatFormat statDatFormat, int maxDegreeOfParallelism, Logger logger) { // If there's no output format, set the default if (statDatFormat == 0x0) @@ -362,7 +364,8 @@ namespace SabreTools.Helper.Dats BaddumpCount = dirBaddump, NodumpCount = dirNodump, }; - lastdirdat.OutputStats(outputs, statDatFormat, logger, game: dirGame, baddumpCol: baddumpCol, nodumpCol: nodumpCol); + lastdirdat.OutputStats(outputs, statDatFormat, maxDegreeOfParallelism, logger, + game: dirGame, baddumpCol: baddumpCol, nodumpCol: nodumpCol); // Write the mid-footer, if any OutputStatsWriteMidFooter(outputs, statDatFormat, baddumpCol, nodumpCol); @@ -387,13 +390,14 @@ namespace SabreTools.Helper.Dats List games = new List(); DatFile datdata = new DatFile(); datdata.Parse(filename.Item1, 0, 0, logger); - datdata.BucketBy(SortedBy.Game, false /* mergeroms */, logger, norename: true); + datdata.BucketBy(SortedBy.Game, false /* mergeroms */, maxDegreeOfParallelism, logger, norename: true); // Output single DAT stats (if asked) logger.User("Adding stats for file '" + filename.Item1 + "'\n", false); if (single) { - datdata.OutputStats(outputs, statDatFormat, logger, baddumpCol: baddumpCol, nodumpCol: nodumpCol); + datdata.OutputStats(outputs, statDatFormat, maxDegreeOfParallelism, logger, + baddumpCol: baddumpCol, nodumpCol: nodumpCol); } // Add single DAT stats to dir @@ -442,7 +446,8 @@ namespace SabreTools.Helper.Dats BaddumpCount = dirBaddump, NodumpCount = dirNodump, }; - dirdat.OutputStats(outputs, statDatFormat, logger, game: dirGame, baddumpCol: baddumpCol, nodumpCol: nodumpCol); + dirdat.OutputStats(outputs, statDatFormat, maxDegreeOfParallelism, logger, + game: dirGame, baddumpCol: baddumpCol, nodumpCol: nodumpCol); } // Write the mid-footer, if any @@ -476,7 +481,8 @@ namespace SabreTools.Helper.Dats BaddumpCount = totalBaddump, NodumpCount = totalNodump, }; - totaldata.OutputStats(outputs, statDatFormat, logger, game: totalGame, baddumpCol: baddumpCol, nodumpCol: nodumpCol); + totaldata.OutputStats(outputs, statDatFormat, maxDegreeOfParallelism, logger, + game: totalGame, baddumpCol: baddumpCol, nodumpCol: nodumpCol); // Output footer if needed OutputStatsWriteFooter(outputs, statDatFormat); diff --git a/SabreTools.Helper/Dats/Partials/DatFile.Writers.cs b/SabreTools.Helper/Dats/Partials/DatFile.Writers.cs index 2279f495..3807aa35 100644 --- a/SabreTools.Helper/Dats/Partials/DatFile.Writers.cs +++ b/SabreTools.Helper/Dats/Partials/DatFile.Writers.cs @@ -107,7 +107,8 @@ namespace SabreTools.Helper.Dats // Output initial statistics, for kicks if (stats) { - OutputStats(new Dictionary(), StatDatFormat.None, logger, recalculate: (RomCount + DiskCount == 0), baddumpCol: true, nodumpCol: true); + OutputStats(new Dictionary(), StatDatFormat.None, maxDegreeOfParallelism, logger, + recalculate: (RomCount + DiskCount == 0), baddumpCol: true, nodumpCol: true); } // Bucket roms by game name and optionally dedupe