[mlpack-svn] r15523 - mlpack/conf/jenkins-conf/benchmark/methods/scikit

fastlab-svn at coffeetalk-1.cc.gatech.edu fastlab-svn at coffeetalk-1.cc.gatech.edu
Mon Jul 22 09:30:11 EDT 2013


Author: marcus
Date: Mon Jul 22 09:30:11 2013
New Revision: 15523

Log:
Clean up scikit scripts.

Modified:
   mlpack/conf/jenkins-conf/benchmark/methods/scikit/allknn.py
   mlpack/conf/jenkins-conf/benchmark/methods/scikit/gmm.py
   mlpack/conf/jenkins-conf/benchmark/methods/scikit/ica.py
   mlpack/conf/jenkins-conf/benchmark/methods/scikit/kernel_pca.py
   mlpack/conf/jenkins-conf/benchmark/methods/scikit/kmeans.py
   mlpack/conf/jenkins-conf/benchmark/methods/scikit/lars.py
   mlpack/conf/jenkins-conf/benchmark/methods/scikit/linear_regression.py
   mlpack/conf/jenkins-conf/benchmark/methods/scikit/nbc.py
   mlpack/conf/jenkins-conf/benchmark/methods/scikit/nmf.py
   mlpack/conf/jenkins-conf/benchmark/methods/scikit/pca.py
   mlpack/conf/jenkins-conf/benchmark/methods/scikit/sparse_coding.py

Modified: mlpack/conf/jenkins-conf/benchmark/methods/scikit/allknn.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/methods/scikit/allknn.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/methods/scikit/allknn.py	Mon Jul 22 09:30:11 2013
@@ -38,12 +38,6 @@
     self.dataset = dataset
 
   '''
-  Destructor to clean up at the end.
-  '''
-  def __del__(self):
-    pass
-
-  '''
   Use the scikit libary to implement All K-Nearest-Neighbors.
 
   @param options - Extra options for the method.
@@ -79,7 +73,7 @@
 
       if not leafSize:
         l = 20
-      elif leafSize.group(1) < 0:
+      elif int(leafSize.group(1)) < 0:
         Log.Fatal("Invalid leaf size: " + str(leafSize.group(1)) + ". Must be " +
             "greater than or equal to 0.")
         return -1

Modified: mlpack/conf/jenkins-conf/benchmark/methods/scikit/gmm.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/methods/scikit/gmm.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/methods/scikit/gmm.py	Mon Jul 22 09:30:11 2013
@@ -38,12 +38,6 @@
     self.dataset = dataset
 
   '''
-  Destructor to clean up at the end.
-  '''
-  def __del__(self):
-    pass
-
-  '''
   Use the scikit libary to implement Gaussian Mixture Model.
 
   @param options - Extra options for the method.

Modified: mlpack/conf/jenkins-conf/benchmark/methods/scikit/ica.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/methods/scikit/ica.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/methods/scikit/ica.py	Mon Jul 22 09:30:11 2013
@@ -38,12 +38,6 @@
     self.dataset = dataset
 
   '''
-  Destructor to clean up at the end.
-  '''
-  def __del__(self):
-    pass
-
-  '''
   Use the scikit libary to implement independent component analysis.
 
   @param options - Extra options for the method.

Modified: mlpack/conf/jenkins-conf/benchmark/methods/scikit/kernel_pca.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/methods/scikit/kernel_pca.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/methods/scikit/kernel_pca.py	Mon Jul 22 09:30:11 2013
@@ -38,12 +38,6 @@
     self.dataset = dataset
 
   '''
-  Destructor to clean up at the end.
-  '''
-  def __del__(self):
-    pass
-
-  '''
   Use the scikit libary to implement Kernel Principal Components Analysis.
 
   @param options - Extra options for the method.
@@ -80,10 +74,7 @@
         model = KernelPCA(n_components=d, kernel="sigmoid")
       elif kernel.group(1) == "polynomial":
         degree = re.search('-D (\d+)', options)
-        if not degree:
-          degree = 1
-        else:
-          degree = int(degree.group(1))
+        degree = 1 if not degree else int(degree.group(1))
 
         model = KernelPCA(n_components=d, kernel="poly", degree=degree)
       else:

Modified: mlpack/conf/jenkins-conf/benchmark/methods/scikit/kmeans.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/methods/scikit/kmeans.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/methods/scikit/kmeans.py	Mon Jul 22 09:30:11 2013
@@ -38,12 +38,6 @@
     self.dataset = dataset
 
   '''
-  Destructor to clean up at the end.
-  '''
-  def __del__(self):
-    pass
-
-  '''
   Use the scikit libary to implement K-Means Clustering.
 
   @param options - Extra options for the method.
@@ -76,10 +70,7 @@
           + "equal to 1.")
       return -1
 
-    if not maxIterations:
-      m = 1000
-    else:
-      m = maxIterations.group(1)
+    m = 1000 if not maxIterations else int(maxIterations.group(1))
 
     # Create the KMeans object and perform K-Means clustering.
     with totalTimer:
@@ -87,8 +78,8 @@
         kmeans = KMeans(k=centroids.shape[1], init=centroids, n_init=1, 
             max_iter=m)
       elif seed:
-        kmeans = KMeans(n_clusters=int(clusters.group(1)), init='random', n_init=1, 
-            max_iter=m, random_state=int(seed.group(1)))
+        kmeans = KMeans(n_clusters=int(clusters.group(1)), init='random', 
+            n_init=1, max_iter=m, random_state=int(seed.group(1)))
       else:
         kmeans = KMeans(n_clusters=int(clusters.group(1)), n_init=1, max_iter=m)      
 

Modified: mlpack/conf/jenkins-conf/benchmark/methods/scikit/lars.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/methods/scikit/lars.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/methods/scikit/lars.py	Mon Jul 22 09:30:11 2013
@@ -38,12 +38,6 @@
     self.dataset = dataset
 
   '''
-  Destructor to clean up at the end.
-  '''
-  def __del__(self):
-    pass
-
-  '''
   Use the scikit libary to implement Least Angle Regression.
 
   @param options - Extra options for the method.
@@ -60,10 +54,7 @@
     with totalTimer:
       # Get all the parameters.
       lambda1 = re.search("-l (\d+)", options)
-      if not lambda1:
-        lambda1 = 0.0
-      else:
-        lambda1 = int(lambda1.group(1))
+      lambda1 = 0.0 if not lambda1 else int(lambda1.group(1))
 
       # Perform LARS.
       model = LassoLars(alpha=lambda1)
@@ -82,8 +73,8 @@
   def RunMethod(self, options):
     Log.Info("Perform LARS.", self.verbose)
 
-    if len(self.dataset) < 2:
-      Log.Fatal("The method need two datasets.")
+    if len(self.dataset) != 2:
+      Log.Fatal("This method requires two datasets.")
       return -1
 
     return self.LARSScikit(options)

Modified: mlpack/conf/jenkins-conf/benchmark/methods/scikit/linear_regression.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/methods/scikit/linear_regression.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/methods/scikit/linear_regression.py	Mon Jul 22 09:30:11 2013
@@ -38,12 +38,6 @@
     self.dataset = dataset
 
   '''
-  Destructor to clean up at the end.
-  '''
-  def __del__(self):
-    pass
-
-  '''
   Use the scikit libary to implement Linear Regression.
 
   @param options - Extra options for the method.

Modified: mlpack/conf/jenkins-conf/benchmark/methods/scikit/nbc.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/methods/scikit/nbc.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/methods/scikit/nbc.py	Mon Jul 22 09:30:11 2013
@@ -38,12 +38,6 @@
     self.dataset = dataset
 
   '''
-  Destructor to clean up at the end.
-  '''
-  def __del__(self):
-    pass
-
-  '''
   Use the scikit libary to implement Naive Bayes Classifier.
 
   @param options - Extra options for the method.
@@ -80,8 +74,8 @@
   def RunMethod(self, options):
     Log.Info("Perform NBC.", self.verbose)
 
-    if len(self.dataset) < 2:
-      Log.Fatal("The method need two datasets.")
+    if len(self.dataset) != 2:
+      Log.Fatal("This method requires two datasets.")
       return -1
 
     return self.NBCScikit(options)

Modified: mlpack/conf/jenkins-conf/benchmark/methods/scikit/nmf.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/methods/scikit/nmf.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/methods/scikit/nmf.py	Mon Jul 22 09:30:11 2013
@@ -38,12 +38,6 @@
     self.dataset = dataset
 
   '''
-  Destructor to clean up at the end.
-  '''
-  def __del__(self):
-    pass
-
-  '''
   Use the scikit libary to implement Non-negative Matrix Factorization.
 
   @param options - Extra options for the method.
@@ -74,15 +68,8 @@
           Log.Fatal("The rank of the factorization cannot be less than 1.")
           return -1
 
-      if not maxIterations:
-        m = 10000
-      else:
-        m = maxIterations.group(1)
-
-      if not minResidue:
-        e = 1e-05
-      else:
-        e = float(minResidue.group(1))
+      m = 10000 if not maxIterations else int(maxIterations.group(1))
+      e = 1e-05 if not maxIterations else int(minResidue.group(1))
 
       if updateRule:
         u = updateRule.group(1)

Modified: mlpack/conf/jenkins-conf/benchmark/methods/scikit/pca.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/methods/scikit/pca.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/methods/scikit/pca.py	Mon Jul 22 09:30:11 2013
@@ -38,12 +38,6 @@
     self.dataset = dataset
 
   '''
-  Destructor to clean up at the end.
-  '''
-  def __del__(self):
-    pass
-
-  '''
   Use the scikit libary to implement Principal Components Analysis.
 
   @param options - Extra options for the method.

Modified: mlpack/conf/jenkins-conf/benchmark/methods/scikit/sparse_coding.py
==============================================================================
--- mlpack/conf/jenkins-conf/benchmark/methods/scikit/sparse_coding.py	(original)
+++ mlpack/conf/jenkins-conf/benchmark/methods/scikit/sparse_coding.py	Mon Jul 22 09:30:11 2013
@@ -38,12 +38,6 @@
     self.dataset = dataset
 
   '''
-  Destructor to clean up at the end.
-  '''
-  def __del__(self):
-    pass
-
-  '''
   Use the scikit libary to implement Sparse Coding.
 
   @param options - Extra options for the method.
@@ -79,7 +73,7 @@
     Log.Info("Perform Sparse Coding.", self.verbose)
 
     if len(self.dataset) != 2:
-      Log.Fatal("The method need two datasets.")
+      Log.Fatal("This method requires two datasets.")
       return -1
 
     return self.SparseCodingScikit(options)



More information about the mlpack-svn mailing list