[mlpack-svn] r15644 - mlpack/conf/jenkins-conf/benchmark/benchmark

fastlab-svn at coffeetalk-1.cc.gatech.edu fastlab-svn at coffeetalk-1.cc.gatech.edu
Wed Aug 21 11:19:20 EDT 2013


Author: marcus
Date: Wed Aug 21 11:19:19 2013
New Revision: 15644

Log:
Clean the benchmark scripts.

Modified:
   mlpack/conf/jenkins-conf/benchmark/benchmark/make_reports.py
   mlpack/conf/jenkins-conf/benchmark/benchmark/memory_benchmark.py
   mlpack/conf/jenkins-conf/benchmark/benchmark/run_benchmark.py
   mlpack/conf/jenkins-conf/benchmark/benchmark/test_config.py

Modified: mlpack/conf/jenkins-conf/benchmark/benchmark/make_reports.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/benchmark/make_reports.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/benchmark/make_reports.py	Wed Aug 21 11:19:19 2013
@@ -35,8 +35,8 @@
 def CreateTopLineChart(db):
   build, results = db.GetResultsSum("mlpack")
 
-  GenerateSingleLineChart(results, "reports/img/mlpack_top_" + str(build) + ".png", 
-      backgroundColor="#F3F3F3")
+  GenerateSingleLineChart(results, "reports/img/mlpack_top_" + str(build) + 
+      ".png", backgroundColor="#F3F3F3")
   return "img/mlpack_top_" + str(build) + ".png"
 
 '''
@@ -62,13 +62,14 @@
       if minData(timings) == time:
         time = "{0:.4f}".format(time) if isFloat(str(time)) else time
         time = str(time) + "s" if isFloat(time) else time 
-        timingTable += '<td><p class="text-success"><strong>' + time + '</strong></p></td>'
+        timingTable += '<td><p class="text-success"><strong>' + time 
+        timingTable += '</strong></p></td>'
       else:
         time = "{0:.4f}".format(time) if isFloat(str(time)) else time
         time = str(time) + "s" if isFloat(time) else time 
         timingTable += "<td>" + time + "</td>"
 
-    timingTable += "</tr>"    
+    timingTable += "</tr>"
 
   return (header, timingTable)
 
@@ -116,7 +117,7 @@
       content = Profiler.MassifMemoryUsageReport(str(result[5])).lstrip(" ")
       memoryValues["content"] = content
 
-      filename = "img/massif_" + os.path.basename(result[5]).split('.')[0] + ".png"    
+      filename = "img/massif_" + os.path.basename(result[5]).split('.')[0] + ".png"
       CreateMassifChart(result[5], "reports/" + filename)
       memoryValues["memoryChart"] = filename
 
@@ -155,7 +156,7 @@
   libraryIds  = db.GetLibraryIds()
   buildIds = []
   for libraryid in libraryIds:
-    buildIds.append((db.GetLatestBuildFromLibary(libraryid[0]), libraryid[1]))  
+    buildIds.append((db.GetLatestBuildFromLibary(libraryid[0]), libraryid[1]))
 
   # Iterate throw all methods and create for each method a new container.
   for method in db.GetAllMethods():
@@ -179,7 +180,7 @@
         mlpackMemoryBuilId = db.GetLatestBuildFromLibary(mlpackMemoryId[0][0])
         if mlpackMemoryBuilId:
           memoryResults = db.GetMemoryResults(mlpackMemoryBuilId, mlpackMemoryId[0][0], method[0])
-          memoryContent = CreateMemoryContent(memoryResults)     
+          memoryContent = CreateMemoryContent(memoryResults)
 
       # Generate a "unique" name for the line chart.
       lineChartName = "img/line_" + chartHash + ".png"
@@ -217,7 +218,6 @@
         reportValues["progressPositive"] = "0%"
         reportValues["progressNegative"] = "100%"
 
-
       reportValues["barChart"] = barChartName
       reportValues["lineChart"] = lineChartName
       reportValues["numLibararies"] = str(len(methodLibararies))
@@ -319,10 +319,10 @@
   # Reports settings.
   database = "reports/benchmark.db"
 
-  # Create folder structure.
+  # Create the folder structure.
   CreateDirectoryStructure(["reports/img", "reports/etc"])
 
-  # Read Config.
+  # Read the config.
   config = Parser(configfile, verbose=False)
   streamData = config.StreamMerge()
 
@@ -351,8 +351,8 @@
     fid.write(template)
 
 if __name__ == '__main__':
-  parser = argparse.ArgumentParser(description="""Perform the benchmark with the
-      given config.""")
+  parser = argparse.ArgumentParser(description="""Perform the memory benchmark 
+      with the given config.""")
   parser.add_argument('-c','--config', help='Configuration file name.', 
       required=True)
 

Modified: mlpack/conf/jenkins-conf/benchmark/benchmark/memory_benchmark.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/benchmark/memory_benchmark.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/benchmark/memory_benchmark.py	Wed Aug 21 11:19:19 2013
@@ -71,10 +71,10 @@
   timeout = 23000
   database = "reports/benchmark.db"
 
-  # Create folder structure.
+  # Create the folder structure.
   CreateDirectoryStructure(["reports/img", "reports/etc"])
 
-  # Read Config.
+  # Read the config.
   config = Parser(configfile, verbose=False)
   streamData = config.StreamMerge()
 
@@ -126,7 +126,7 @@
 
             build[name] = (db.NewBuild(libaryId), libaryId)
 
-          # Load script.
+          # Load the script.
           try:
             module = Loader.ImportModuleFromPath(script)
             methodCall = getattr(module, method)
@@ -135,7 +135,7 @@
             Log.Fatal("Exception: " + str(e))
           else:
 
-            for dataset in datsets:  
+            for dataset in datsets:
               datasetName = NormalizeDatasetName(dataset)
 
               # Logging: Create a new dataset record fot this dataset.
@@ -166,7 +166,7 @@
                 RemoveDataset(modifiedDataset[1])
                 continue
 
-              # Save results in the logfile if the user asked for.
+              # Save results in the database if the user asked for.
               if err != -1 and log:
                 buildId, libaryId = build[name]
                 db.NewMemory(buildId, libaryId, methodId, datasetId, outputName)
@@ -188,4 +188,4 @@
 
   if args:
     log = True if args.log == "True" else False
-    Main(args.config, args.blocks, log)
\ No newline at end of file
+    Main(args.config, args.blocks, log)

Modified: mlpack/conf/jenkins-conf/benchmark/benchmark/run_benchmark.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/benchmark/run_benchmark.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/benchmark/run_benchmark.py	Wed Aug 21 11:19:19 2013
@@ -106,10 +106,10 @@
   timeout = 23000
   database = "reports/benchmark.db"
 
-  # Create folder structure.
-  CreateDirectoryStructure("reports/img", "reports/etc")
+  # Create the folder structure.
+  CreateDirectoryStructure(["reports/img", "reports/etc"])
 
-  # Read Config.
+  # Read the config.
   config = Parser(configfile, verbose=False)
   streamData = config.StreamMerge()
 
@@ -240,7 +240,7 @@
               else:
                 dataMatrix[row][col] = "{0:.6f}".format(sum(time) / trials)
 
-              # Save results in the logfile if the user asked for.
+              # Save results in the databse if the user asked for.
               if log:
                 # Get the variance.
                 var = 0

Modified: mlpack/conf/jenkins-conf/benchmark/benchmark/test_config.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/benchmark/test_config.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/benchmark/test_config.py	Wed Aug 21 11:19:19 2013
@@ -5,7 +5,6 @@
   Test the configuration file.
 '''
 
-
 import os
 import sys
 import inspect
@@ -32,4 +31,4 @@
 
   if args:
     config = Parser(args.config)
-    config.CheckConfig()
\ No newline at end of file
+    config.CheckConfig()



More information about the mlpack-svn mailing list