[mlpack-svn] r10389 - mlpack/trunk/src/mlpack/methods/nnsvm

fastlab-svn at coffeetalk-1.cc.gatech.edu fastlab-svn at coffeetalk-1.cc.gatech.edu
Thu Nov 24 02:14:00 EST 2011


Author: rcurtin
Date: 2011-11-24 02:14:00 -0500 (Thu, 24 Nov 2011)
New Revision: 10389

Modified:
   mlpack/trunk/src/mlpack/methods/nnsvm/nnsmo.hpp
   mlpack/trunk/src/mlpack/methods/nnsvm/nnsmo_impl.hpp
   mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm.hpp
   mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm_impl.hpp
   mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm_main.cpp
Log:
Fix formatting of NNSVM code (#153).


Modified: mlpack/trunk/src/mlpack/methods/nnsvm/nnsmo.hpp
===================================================================
--- mlpack/trunk/src/mlpack/methods/nnsvm/nnsmo.hpp	2011-11-24 07:02:29 UTC (rev 10388)
+++ mlpack/trunk/src/mlpack/methods/nnsvm/nnsmo.hpp	2011-11-24 07:14:00 UTC (rev 10389)
@@ -1,3 +1,8 @@
+/**
+ * @file nnsmo.hpp
+ *
+ * The non-negative SMO algorithm.
+ */
 #ifndef __MLPACK_METHODS_NNSVM_NNSMO_HPP
 #define __MLPACK_METHODS_NNSVM_NNSMO_HPP
 
@@ -14,141 +19,139 @@
 template<typename TKernel>
 class NNSMO
 {
+ public:
+  typedef TKernel Kernel;
 
-  public:
-    typedef TKernel Kernel;
+ private:
+  arma::mat kernel_cache_sign_;
+  Kernel kernel_;
+  size_t n_data_; // number of data samples
+  arma::mat dataset_; // alias for the data matrix
+  arma::vec alpha_; // the alphas, to be optimized
+  arma::vec error_; // the error cache
+  double thresh_; // negation of the intercept
+  double c_;
+  size_t budget_;
+  double sum_alpha_;
 
-  private:
-    arma::mat kernel_cache_sign_;
-    Kernel kernel_;
-    size_t n_data_; // number of data samples
-    arma::mat dataset_; // alias for the data matrix
-    arma::vec alpha_; // the alphas, to be optimized
-    arma::vec error_; // the error cache
-    double thresh_; // negation of the intercept
-    double c_;
-    size_t budget_;
-    double sum_alpha_;
+  size_t n_feature_; // number of data features
+  double w_square_sum_; // square sum of the weight vector
+  arma::vec VTA_; //
+  double eps_; // the tolerace of progress on alpha values
+  size_t max_iter_; // the maximum iteration, termination criteria
 
-    size_t n_feature_; // number of data features
-    double w_square_sum_; // square sum of the weight vector
-    arma::vec VTA_; //
-    double eps_; // the tolerace of progress on alpha values
-    size_t max_iter_; // the maximum iteration, termination criteria
+ public:
+  NNSMO() {}
+  ~NNSMO() {}
 
-  public:
-    NNSMO() {}
-    ~NNSMO() {}
+  /**
+   * Initializes an NNSMO problem.
+   *
+   * You must initialize separately the kernel.
+   */
+  void Init(const arma::mat& dataset_in, double c_in, size_t budget_in,
+            double eps_in, size_t max_iter_in)
+  {
+    c_ = c_in;
 
-    /**
-     * Initializes an NNSMO problem.
-     *
-     * You must initialize separately the kernel.
-     */
-    void Init(const arma::mat& dataset_in, double c_in, size_t budget_in,
-              double eps_in, size_t max_iter_in)
-    {
-      c_ = c_in;
+    dataset_ = dataset_in;
 
-      dataset_ = dataset_in;
+    n_data_ = dataset_.n_cols;
+    budget_ = std::min(budget_in, (size_t) n_data_);
 
-      n_data_ = dataset_.n_cols;
-      budget_ = std::min(budget_in, (size_t) n_data_);
+    alpha_.zeros(n_data_);
+    sum_alpha_ = 0;
 
-      alpha_.zeros(n_data_);
-      sum_alpha_ = 0;
+    error_.zeros(n_data_);
+    for(size_t i = 0; i < n_data_; i++)
+    {
+      error_[i] -= GetLabelSign_(i);
+    }
 
-      error_.zeros(n_data_);
-      for(size_t i = 0; i < n_data_; i++)
-      {
-        error_[i] -= GetLabelSign_(i);
-      }
+    thresh_ = 0;
 
-      thresh_ = 0;
+    n_feature_ = dataset_.n_rows - 1;
+    VTA_.zeros(n_feature_);
+    eps_ = eps_in;
+    max_iter_ = max_iter_in;
+  }
 
-      n_feature_ = dataset_.n_rows - 1;
-      VTA_.zeros(n_feature_);
-      eps_ = eps_in;
-      max_iter_ = max_iter_in;
-    }
+  void Train();
 
-    void Train();
+  double threshold() const
+  {
+    return thresh_;
+  }
 
-    double threshold() const
-    {
-      return thresh_;
-    }
+  void GetNNSVM(arma::mat& support_vectors, arma::vec& alpha, arma::vec& w)
+      const;
 
-    void GetNNSVM(arma::mat& support_vectors, arma::vec& alpha, arma::vec& w) const;
+ private:
+  size_t TrainIteration_(bool examine_all);
 
-  private:
-    size_t TrainIteration_(bool examine_all);
+  bool TryChange_(size_t j);
 
-    bool TryChange_(size_t j);
+  double CalculateDF_(size_t i, size_t j, double error_j);
 
-    double CalculateDF_(size_t i, size_t j, double error_j);
+  bool TakeStep_(size_t i, size_t j, double error_j);
 
-    bool TakeStep_(size_t i, size_t j, double error_j);
+  double FixAlpha_(double alpha) const
+  {
+    if (alpha < NNSMO_ZERO)
+      alpha = 0;
+    else if (alpha > c_ - NNSMO_ZERO)
+      alpha = c_;
 
-    double FixAlpha_(double alpha) const
-    {
-      if (alpha < NNSMO_ZERO)
-      {
-        alpha = 0;
-      }
-      else if (alpha > c_ - NNSMO_ZERO)
-      {
-        alpha = c_;
-      }
-      return alpha;
-    }
+    return alpha;
+  }
 
-    bool IsBound_(double alpha) const
-    {
-      return alpha <= 0 || alpha >= c_;
-    }
+  bool IsBound_(double alpha) const
+  {
+    return (alpha <= 0) || (alpha >= c_);
+  }
 
-   // labels: the last row of the data matrix, 0 or 1
-    int GetLabelSign_(size_t i) const
-    {
-      return (dataset_(dataset_.n_rows - 1, i) != 0) ? 1 : -1;
-    }
+  // labels: the last row of the data matrix, 0 or 1
+  int GetLabelSign_(size_t i) const
+  {
+    return (dataset_(dataset_.n_rows - 1, i) != 0) ? 1 : -1;
+  }
 
-    void GetVector_(size_t i, arma::vec& v) const
-    {
-      v = arma::vec((double*) dataset_.colptr(i), dataset_.n_rows - 1, false, true); // manual ugly constructor
-    }
+  void GetVector_(size_t i, arma::vec& v) const
+  {
+    v = arma::vec((double*) dataset_.colptr(i), dataset_.n_rows - 1, false,
+        true); // manual ugly constructor
+  }
 
-    double Error_(size_t i) const
-    {
-      return error_[i];
-    }
+  double Error_(size_t i) const
+  {
+    return error_[i];
+  }
 
-    double Evaluate_(size_t i) const;
+  double Evaluate_(size_t i) const;
 
-    double EvalKernel_(size_t i, size_t j) const
-    {
-      return kernel_cache_sign_(i, j) * (GetLabelSign_(i) * GetLabelSign_(j));
-    }
+  double EvalKernel_(size_t i, size_t j) const
+  {
+    return kernel_cache_sign_(i, j) * (GetLabelSign_(i) * GetLabelSign_(j));
+  }
 
-    void CalcKernels_()
+  void CalcKernels_()
+  {
+    kernel_cache_sign_.set_size(n_data_, n_data_);
+    fprintf(stderr, "Kernel Start\n");
+    for (size_t i = 0; i < n_data_; i++)
     {
-      kernel_cache_sign_.set_size(n_data_, n_data_);
-      fprintf(stderr, "Kernel Start\n");
-      for (size_t i = 0; i < n_data_; i++)
+      for (size_t j = 0; j < n_data_; j++)
       {
-        for (size_t j = 0; j < n_data_; j++)
-        {
-          arma::vec v_i;
-          GetVector_(i, v_i);
-          arma::vec v_j;
-          GetVector_(j, v_j);
-          double k = kernel_.Evaluate(v_i, v_j);
-          kernel_cache_sign_(j, i) = k * GetLabelSign_(i) * GetLabelSign_(j);
-        }
+        arma::vec v_i;
+        GetVector_(i, v_i);
+        arma::vec v_j;
+        GetVector_(j, v_j);
+        double k = kernel_.Evaluate(v_i, v_j);
+        kernel_cache_sign_(j, i) = k * GetLabelSign_(i) * GetLabelSign_(j);
       }
-      fprintf(stderr, "Kernel Stop\n");
     }
+    fprintf(stderr, "Kernel Stop\n");
+  }
 };
 
 }; // namespace nnsvm

Modified: mlpack/trunk/src/mlpack/methods/nnsvm/nnsmo_impl.hpp
===================================================================
--- mlpack/trunk/src/mlpack/methods/nnsvm/nnsmo_impl.hpp	2011-11-24 07:02:29 UTC (rev 10388)
+++ mlpack/trunk/src/mlpack/methods/nnsvm/nnsmo_impl.hpp	2011-11-24 07:14:00 UTC (rev 10389)
@@ -1,3 +1,8 @@
+/**
+ * @file nnsmo_impl.hpp
+ *
+ * Implementation of the non-negative SMO algorithm.
+ */
 #ifndef __MLPACK_METHODS_NNSVM_NNSMO_IMPL_HPP
 #define __MLPACK_METHODS_NNSVM_NNSMO_IMPL_HPP
 
@@ -6,7 +11,9 @@
 
 // return the support vector, the support alpha vector and the weight vector of the trained NNSVM
 template<typename TKernel>
-void NNSMO<TKernel>::GetNNSVM(arma::mat& support_vectors, arma::vec& support_alpha, arma::vec& w) const
+void NNSMO<TKernel>::GetNNSVM(arma::mat& support_vectors,
+                              arma::vec& support_alpha,
+                              arma::vec& w) const
 {
   size_t n_support = 0;
   size_t i_support = 0;
@@ -37,7 +44,7 @@
   }
 
   w.set_size(n_feature_);
-  for(size_t s = 0; s < n_feature_; s++)
+  for (size_t s = 0; s < n_feature_; s++)
     w[s] = math::ClampNonNegative(VTA_[s]);
 }
 
@@ -60,29 +67,26 @@
     num_changed = TrainIteration_(examine_all);
 
     if (examine_all)
-    {
       examine_all = false;
-    }
     else if (num_changed == 0)
-    {
       examine_all = true;
-    }
 
-	//if exceed the maximum number of iterations, finished
+    //if exceed the maximum number of iterations, finished
     if (++n_iter == max_iter_)
     {
       fprintf(stderr, "Max iterations Reached! \n");
       break;
     }
 
-	//for every max(n_data_, 1000) iterations, show progress
+    //for every max(n_data_, 1000) iterations, show progress
     if (n_iter % counter == 0 )
       fprintf(stderr, ".");
   }
 
   //compute the final objective value
   double obj = sum_alpha_ - w_square_sum_/2;
-  fprintf(stderr, "iter=%zu, %zu, %f, %f, %f, obj=%f \n", n_iter, num_changed, thresh_, sum_alpha_, w_square_sum_, obj);
+  fprintf(stderr, "iter=%zu, %zu, %f, %f, %f, obj=%f \n", n_iter, num_changed,
+      thresh_, sum_alpha_, w_square_sum_, obj);
 }
 
 //NNSMO training iteration
@@ -94,16 +98,16 @@
   for (size_t i = 0; i < n_data_; i++)
   {
     if ((examine_all || !IsBound_(alpha_[i])) && TryChange_(i))
-    {
       num_changed++;
-    }
   }
+
   return num_changed;
 }
 
 // try to find the working set
-//	outer loop: alpha_j, KKT violation
-//	inner loop: alpha_i, maximum objective value increase with respective to alpha_i, j
+//  outer loop: alpha_j, KKT violation
+//  inner loop: alpha_i, maximum objective value increase with respective to
+//  alpha_i, j
 template<typename TKernel>
 bool NNSMO<TKernel>::TryChange_(size_t j)
 {
@@ -113,9 +117,7 @@
 
   if (!((rj < -NNSMO_TOLERANCE && alpha_[j] < c_)
       || (rj > NNSMO_TOLERANCE && alpha_[j] > 0)))
-  {
     return false; // nothing to change
-  }
 
   // first try the one we suspect to have the largest yield
   size_t i = -1;
@@ -129,20 +131,21 @@
       i = k;
     }
   }
+
   if (i != (size_t) -1 && TakeStep_(i, j, error_j))
   {
     return true;
   }
 
-
   return false;
 }
 
-//compute the increase of objective value with respect to updating of alpha_i, alpha_j
+// Compute the increase of objective value with respect to updating of alpha_i,
+// alpha_j
 template<typename TKernel>
 double NNSMO<TKernel>::CalculateDF_(size_t i, size_t j, double error_j)
 {
-  //1. check i,j
+  // 1. check i,j
   if (i == j)
   {
     return -1;
@@ -157,7 +160,7 @@
   double error_i = Error_(i);
   double r;
 
-  //2. compute L, H of alpha_j
+  // 2. compute L, H of alpha_j
   if (s < 0)
   {
     mlpack::Log::Assert(s == -1);
@@ -167,6 +170,7 @@
   {
     r = alpha_j + alpha_i - c_; // target values are equal
   }
+
   l = math::ClampNonNegative(r);
   u = c_ + math::ClampNonPositive(r);
 
@@ -176,7 +180,7 @@
     return -1;
   }
 
-  //3. compute eta using cached kernel values
+  // 3. compute eta using cached kernel values
   double kii = EvalKernel_(i, i);
   double kij = EvalKernel_(i, j);
   double kjj = EvalKernel_(j, j);
@@ -196,12 +200,12 @@
   double delta_alpha_j = alpha_j - alpha_[j];
 
   // check if there is progress
-  if (fabs(delta_alpha_j) < eps_*(alpha_j + alpha_[j] + eps_))
+  if (fabs(delta_alpha_j) < eps_ * (alpha_j + alpha_[j] + eps_))
   {
     return -1;
   }
 
-  //4. compute increase of objective value
+  // 4. compute increase of objective value
   arma::vec w(n_feature_);
   for (size_t s = 0; s < n_feature_; s++)
   {
@@ -210,13 +214,14 @@
   }
   w_square_sum_ = dot(w, w);
   double delta_f = w_square_sum_ / 2;
-  if(yi != yj)
+  if (yi != yj)
     delta_f += 2* delta_alpha_j;
 
   return delta_f;
 }
 
-// update alpha_i, alpha_j, as well as the VTA_, negation of intercept: thresh_ and the error cache: error_
+// Update alpha_i, alpha_j, as well as the VTA_, negation of intercept: thresh_
+// and the error cache: error_
 template<typename TKernel>
 bool NNSMO<TKernel>::TakeStep_(size_t i, size_t j, double error_j)
 {
@@ -255,7 +260,7 @@
     return false;
   }
 
-  //3. compute eta using cached kernel values
+  // 3. compute eta using cached kernel values
   double kii = EvalKernel_(i, i);
   double kij = EvalKernel_(i, j);
   double kjj = EvalKernel_(j, j);
@@ -296,7 +301,7 @@
   double delta_alpha_i = alpha_i - alpha_[i];
   delta_alpha_j = alpha_j - alpha_[j];
 
-  //4. update VTA_, w_square_sum_
+  // 4. update VTA_, w_square_sum_
   arma::vec w(n_feature_);
   for (size_t s = 0; s < n_feature_; s++)
   {
@@ -330,9 +335,7 @@
 
   // update error cache using the new threshold
   for (size_t k = 0; k < n_data_ ; k++)
-  {
     error_[k] -= thresh_;
-  }
 
   return true;
 }

Modified: mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm.hpp
===================================================================
--- mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm.hpp	2011-11-24 07:02:29 UTC (rev 10388)
+++ mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm.hpp	2011-11-24 07:14:00 UTC (rev 10389)
@@ -1,10 +1,10 @@
 /**
- * @file nnsvm.h
+ * @file nnsvm.hpp
  *
  * This head file contains functions for performing NNSVM training.
  * NNSMO algorithm is employed.
  *
- * @see nnsmo.h
+ * @see nnsmo.hpp
  */
 #ifndef __MLPACK_METHODS_NNSVM_NNSVM_HPP
 #define __MLPACK_METHODS_NNSVM_NNSVM_HPP
@@ -36,11 +36,11 @@
 template<typename TKernel>
 class NNSVM
 {
-  public:
-    typedef TKernel Kernel;
+ public:
+  typedef TKernel Kernel;
 
-  private:
-    struct nnsvm_model model_;
+ private:
+  struct nnsvm_model model_;
 
   struct NNSVM_PARAMETERS
   {
@@ -55,20 +55,32 @@
   arma::mat support_vectors_;
   size_t num_features_;
 
-  public:
-    void Init(const arma::mat& dataset, size_t n_classes);
-    void Init(const arma::mat& dataset, size_t n_classes, size_t c, size_t b, double eps, size_t max_iter);
-    void InitTrain(const arma::mat& dataset, size_t n_classes);
-    void InitTrain(const arma::mat& dataset, size_t n_classes, size_t c, size_t b, double eps, size_t max_iter);
-    void SaveModel(std::string modelfilename);
-    void LoadModel(arma::mat& testset, std::string modelfilename);
-    size_t Classify(const arma::vec& vector);
-    void BatchClassify(arma::mat& testset, std::string testlabelfilename);
-    void LoadModelBatchClassify(arma::mat& testset, std::string modelfilename, std::string testlabelfilename);
-    double getThreshold() { return model_.thresh_; }
-    size_t getSupportVectorCount() { return model_.num_sv_; }
-    const arma::vec getSupportVectorCoefficients() { return model_.sv_coef_; }
-    const arma::vec getWeightVector() { return model_.w_; }
+ public:
+  void Init(const arma::mat& dataset, size_t n_classes);
+  void Init(const arma::mat& dataset,
+            size_t n_classes,
+            size_t c,
+            size_t b,
+            double eps,
+            size_t max_iter);
+  void InitTrain(const arma::mat& dataset, size_t n_classes);
+  void InitTrain(const arma::mat& dataset,
+                 size_t n_classes,
+                 size_t c,
+                 size_t b,
+                 double eps,
+                 size_t max_iter);
+  void SaveModel(std::string modelfilename);
+  void LoadModel(arma::mat& testset, std::string modelfilename);
+  size_t Classify(const arma::vec& vector);
+  void BatchClassify(arma::mat& testset, std::string testlabelfilename);
+  void LoadModelBatchClassify(arma::mat& testset,
+                              std::string modelfilename,
+                              std::string testlabelfilename);
+  double getThreshold() { return model_.thresh_; }
+  size_t getSupportVectorCount() { return model_.num_sv_; }
+  const arma::vec getSupportVectorCoefficients() { return model_.sv_coef_; }
+  const arma::vec getWeightVector() { return model_.w_; }
 };
 
 }; // namespace nnsvm

Modified: mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm_impl.hpp
===================================================================
--- mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm_impl.hpp	2011-11-24 07:02:29 UTC (rev 10388)
+++ mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm_impl.hpp	2011-11-24 07:14:00 UTC (rev 10389)
@@ -1,3 +1,8 @@
+/**
+ * @file nnsvm_impl.hpp
+ *
+ * Implementation of the NNSVM class.
+ */
 #ifndef __MLPACK_METHODS_NNSVM_NNSVM_IMPL_HPP
 #define __MLPACK_METHODS_NNSVM_NNSVM_IMPL_HPP
 
@@ -16,13 +21,14 @@
 {
   Init(dataset, n_classes, 10, dataset.n_rows, 1.0e-6, 1000);
 }
+
 template<typename TKernel>
 void NNSVM<TKernel>::Init(const arma::mat& dataset, size_t n_classes, size_t c, size_t b, double eps, size_t max_iter)
 {
   // c; default:10
   param_.c_ = c;
   // budget parameter, controls # of support vectors; default: # of data samples
-  if(!mlpack::CLI::HasParam("nnsvm/b"))
+  if (!mlpack::CLI::HasParam("nnsvm/b"))
     mlpack::CLI::GetParam<double>("nnsvm/b") = dataset.n_rows;
 
   param_.b_ = b;
@@ -30,34 +36,40 @@
   param_.eps_ = eps;
   //max iterations: max_iter, default: 1000
   param_.max_iter_ = max_iter;
-  fprintf(stderr, "c=%f, eps=%g, max_iter=%zu \n", param_.c_, param_.eps_, param_.max_iter_);
+  fprintf(stderr, "c=%f, eps=%g, max_iter=%zu \n", param_.c_, param_.eps_,
+      param_.max_iter_);
 }
 
 /**
-* Initialization(data dependent) and training for NNSVM Classifier
-*
-* @param: labeled training set
-* @param: number of classes (different labels) in the training set
-* @param: module name
-*/
+ * Initialization(data dependent) and training for NNSVM Classifier
+ *
+ * @param: labeled training set
+ * @param: number of classes (different labels) in the training set
+ * @param: module name
+ */
 template<typename TKernel>
-void NNSVM<TKernel>::InitTrain(
-    const arma::mat& dataset, size_t n_classes)
+void NNSVM<TKernel>::InitTrain(const arma::mat& dataset, size_t n_classes)
 {
   InitTrain(dataset, n_classes, 10, dataset.n_rows, 1.0e-6, 1000);
 }
+
 template<typename TKernel>
-void NNSVM<TKernel>::InitTrain(
-    const arma::mat& dataset, size_t n_classes, size_t c, size_t b, double eps, size_t max_iter)
+void NNSVM<TKernel>::InitTrain(const arma::mat& dataset,
+                               size_t n_classes,
+                               size_t c,
+                               size_t b,
+                               double eps,
+                               size_t max_iter)
 {
   std::cerr << "made it to " << __LINE__ << " in "__FILE__"\n";
   Init(dataset, n_classes, c, b, eps, max_iter);
-  /* # of features = # of rows in data matrix - 1, as last row is for labels*/
+  /* # of features = # of rows in data matrix - 1, as last row is for labels */
   num_features_ = dataset.n_rows - 1;
   Log::Assert(n_classes == 2, "SVM is only a binary classifier");
   CLI::GetParam<std::string>("kernel_type") = typeid(TKernel).name();
 
-  /* Initialize parameters c_, budget_, eps_, max_iter_, VTA_, alpha_, error_, thresh_ */
+  /* Initialize parameters c_, budget_, eps_, max_iter_, VTA_, alpha_, error_,
+   * thresh_ */
   NNSMO<Kernel> nnsmo;
   nnsmo.Init(dataset, param_.c_, param_.b_, param_.eps_, param_.max_iter_);
 
@@ -68,7 +80,8 @@
 
   /* Get the trained bi-class model */
   nnsmo.GetNNSVM(support_vectors_, model_.sv_coef_, model_.w_);
-  std::cerr << "the NUMBER of elements in sv_coef_ is " << model_.sv_coef_.n_elem << "\n";
+  std::cerr << "the NUMBER of elements in sv_coef_ is "
+      << model_.sv_coef_.n_elem << "\n";
   mlpack::Log::Assert(model_.sv_coef_.n_elem != 0);
   model_.num_sv_ = support_vectors_.n_cols;
   model_.thresh_ = nnsmo.threshold();
@@ -79,14 +92,14 @@
 }
 
 /**
-* Save the NNSVM model to a text file
-*
-* @param: name of the model file
-*/
+ * Save the NNSVM model to a text file
+ *
+ * @param: name of the model file
+ */
 template<typename TKernel>
 void NNSVM<TKernel>::SaveModel(std::string modelfilename)
 {
-  // TODO: Why do we do this? 
+  // TODO: Why do we do this?
   FILE *fp = fopen(modelfilename.c_str(), "w");
   if (fp == NULL)
   {
@@ -96,18 +109,18 @@
 
   fprintf(fp, "svm_type svm_c\n"); // TODO: svm-mu, svm-regression...
   // save kernel parameters
- // param_.kernel_.SaveParam(fp);
+  // param_.kernel_.SaveParam(fp);
   fprintf(fp, "total_num_sv %zu\n", model_.num_sv_);
   fprintf(fp, "threshold %g\n", model_.thresh_);
   fprintf(fp, "weights");
   size_t len = model_.w_.n_elem;
-  for(size_t s = 0; s < len; s++)
+  for (size_t s = 0; s < len; s++)
     fprintf(fp, " %f", model_.w_[s]);
   fprintf(fp, "\nsvs\n");
-  for(size_t i=0; i < model_.num_sv_; i++)
+  for (size_t i = 0; i < model_.num_sv_; i++)
   {
      fprintf(fp, "%f ", model_.sv_coef_[i]);
-     for(size_t s=0; s < num_features_; s++)
+     for(size_t s = 0; s < num_features_; s++)
      {
        fprintf(fp, "%f ", support_vectors_(s, i));
      }
@@ -117,10 +130,10 @@
 }
 
 /**
-* Load NNSVM model file
-*
-* @param: name of the model file
-*/
+ * Load NNSVM model file
+ *
+ * @param: name of the model file
+ */
 // TODO: use XML
 template<typename TKernel>
 void NNSVM<TKernel>::LoadModel(arma::mat& testset, std::string modelfilename)
@@ -143,10 +156,10 @@
   while (1)
   {
     fscanf(fp, "%80s", cmd);
-    if(strcmp(cmd,"svm_type") == 0)
+    if (strcmp(cmd,"svm_type") == 0)
     {
       fscanf(fp, "%80s", cmd);
-      if(strcmp(cmd, "svm_c") == 0)
+      if (strcmp(cmd, "svm_c") == 0)
       {
         fprintf(stderr, "SVM_C\n");
       }
@@ -161,7 +174,7 @@
     }
     else if (strcmp(cmd, "weights")==0)
     {
-      for (size_t s= 0; s < num_features_; s++)
+      for (size_t s = 0; s < num_features_; s++)
       {
         fscanf(fp, "%lf", &temp_f);
         model_.w_[s] = temp_f;
@@ -194,47 +207,43 @@
 }
 
 /**
-* NNSVM classification for one testing vector
-*
-* @param: testing vector
-*
-* @return: a label (integer)
-*/
-
+ * NNSVM classification for one testing vector
+ *
+ * @param: testing vector
+ *
+ * @return: a label (integer)
+ */
 template<typename TKernel>
 size_t NNSVM<TKernel>::Classify(const arma::vec& datum)
 {
   double summation = dot(model_.w_, datum);
 
   return (summation - model_.thresh_ > 0.0) ? 1 : 0;
-
-  return 0;
 }
 
 /**
-* Online batch classification for multiple testing vectors. No need to load model file,
-* since models are already in RAM.
-*
-* Note: for test set, if no true test labels provided, just put some dummy labels
-* (e.g. all -1) in the last row of testset
-*
-* @param: testing set
-* @param: file name of the testing data
-*/
+ * Online batch classification for multiple testing vectors. No need to load
+ * model file, since models are already in RAM.
+ *
+ * Note: for test set, if no true test labels provided, just put some dummy
+ * labels (e.g. all -1) in the last row of testset
+ *
+ * @param: testing set
+ * @param: file name of the testing data
+ */
 template<typename TKernel>
-void NNSVM<TKernel>::BatchClassify(arma::mat& testset, std::string testlablefilename)
+void NNSVM<TKernel>::BatchClassify(arma::mat& testset,
+                                   std::string testlablefilename)
 {
   FILE *fp = fopen(testlablefilename.c_str(), "w");
   if (fp == NULL)
-  {
     mlpack::Log::Fatal << "Cannot save test labels to file!" << std::endl;
-    return;
-  }
+
   num_features_ = testset.n_cols - 1;
   for (size_t i = 0; i < testset.n_rows; i++)
   {
     arma::vec testvec(num_features_);
-    for(size_t j = 0; j < num_features_; j++)
+    for (size_t j = 0; j < num_features_; j++)
     {
       testvec[j] = testset(j, i);
     }
@@ -245,12 +254,13 @@
 }
 
 /**
-* Load models from a file, and perform offline batch classification for multiple testing vectors
-*
-* @param: testing set
-* @param: name of the model file
-* @param: name of the file to store classified labels
-*/
+ * Load models from a file, and perform offline batch classification for
+ * multiple testing vectors
+ *
+ * @param: testing set
+ * @param: name of the model file
+ * @param: name of the file to store classified labels
+ */
 template<typename TKernel>
 void NNSVM<TKernel>::LoadModelBatchClassify(arma::mat& testset, std::string modelfilename, std::string testlabelfilename)
 {

Modified: mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm_main.cpp
===================================================================
--- mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm_main.cpp	2011-11-24 07:02:29 UTC (rev 10388)
+++ mlpack/trunk/src/mlpack/methods/nnsvm/nnsvm_main.cpp	2011-11-24 07:14:00 UTC (rev 10389)
@@ -1,18 +1,19 @@
 /**
- * @file nnsvm.cc
+ * @file nnsvm_main.cpp
  *
- * This file contains main routines for performing
- * NNSVM (Non-Negativity constrained SVM) training
- * NNSMO algorithm is employed.
+ * This file contains main routines for performing NNSVM (Non-Negativity
+ * constrained SVM) training NNSMO algorithm is employed.
  *
  * It currently support "train", "train-test", "test" mode with "linear" kernel
  * Example:
- *  nnsvm --mode=train --train_data=toy1.csv --kernel=linear --c=10.0 --eps=0.000001 --max_iter=1000
- *  nnsvm --mode=train_test --train_data=toy1.csv --test_data=toy2.csv --kernel=linear --c=10.0 --max_iter=1000
+ *  nnsvm --mode=train --train_data=toy1.csv --kernel=linear --c=10.0
+ *  --eps=0.000001 --max_iter=1000
+ *  nnsvm --mode=train_test --train_data=toy1.csv --test_data=toy2.csv
+ *  --kernel=linear --c=10.0 --max_iter=1000
  *  nnsvm --mode=test --train_data=toy2.csv --kernel=linear
  *
- * @see nnsvm.h
- * @see nnsmo.h
+ * @see nnsvm.hpp
+ * @see nnsmo.hpp
  */
 #include <iostream>
 #include "nnsvm.hpp"
@@ -20,19 +21,21 @@
 #include <mlpack/core/kernels/linear_kernel.hpp>
 
 PARAM_STRING_REQ("mode", "operating mode: train, train_test, or test", "nnsvm");
-PARAM_STRING_REQ("kernel", "kernel type: linear (currently supported)", "nnsvm");
-PARAM_STRING_REQ("train_data", "name of the file containing the training data", "nnsvm");
+PARAM_STRING_REQ("kernel", "kernel type: linear (currently supported)",
+    "nnsvm");
+PARAM_STRING_REQ("train_data", "name of the file containing the training data",
+    "nnsvm");
 
 using namespace mlpack;
 using namespace mlpack::kernel;
 using namespace mlpack::nnsvm;
 
 /**
-* NNSVM training - Main function
-*
-* @param: argc
-* @param: argv
-*/
+ * NNSVM training - Main function
+ *
+ * @param: argc
+ * @param: argv
+ */
 int main(int argc, char *argv[])
 {
   CLI::ParseCommandLine(argc, argv);
@@ -62,9 +65,11 @@
           CLI::GetParam<int>("nnsvm/max_iter"));
 
       Timers::StartTimer("nnsvm/nnsvm_train");
-      Log::Debug << "nnsvm_train_time" << CLI::GetParam<timeval>("nnsvm/nnsvm_train").tv_usec / 1e6 << std::endl;
+      Log::Debug << "nnsvm_train_time"
+          << CLI::GetParam<timeval>("nnsvm/nnsvm_train").tv_usec / 1e6
+          << std::endl;
       /* training and testing, thus no need to load model from file */
-      if (mode=="train_test")
+      if (mode == "train_test")
       {
         Log::Debug << "Non-Negativity SVM Classifying... " << std::endl;
         /* Load testing data */
@@ -77,7 +82,7 @@
       }
     }
   }
-  /* Testing(offline) Mode, need loading model file and testing data */
+  /* Testing (offline) Mode, need loading model file and testing data */
   else if (mode == "test")
   {
     Log::Debug << "Non-Negativity Constrained SVM Classifying... " << std::endl;
@@ -90,7 +95,7 @@
     {
       NNSVM<LinearKernel> nnsvm;
       nnsvm.Init(testset, 2);
-      nnsvm.LoadModelBatchClassify(testset, "nnsvm_model", "testlabels"); // TODO:param_req
+      nnsvm.LoadModelBatchClassify(testset, "nnsvm_model", "testlabels");
     }
   }
   return 0;




More information about the mlpack-svn mailing list